diff --git a/CHANGELOG.md b/CHANGELOG.md index 51b698df7..67aa69663 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,9 @@ New functions `spdk_accel_submit_encrypt` and `spdk_accel_submit_decrypt` were a New accel module `dpdk_cryptodev` has been added. It uses DPDK crypto PMD and support encrypt and decrypt operations. New RPC `dpdk_cryptodev_scan_accel_module` has been added to enable this accel module. +New accel module `mlx5` was added. It implements crypto operations, enabled when SPDK is configured with +RDMA provider is mlx5_dv and crypto support. + ### bdev Added RPCs bdev_nvme_start_mdns_discovery, bdev_nvme_get_mdns_discovery_info and diff --git a/configure b/configure index 989c51da7..b8bcd43f0 100755 --- a/configure +++ b/configure @@ -874,12 +874,16 @@ than or equal to 4.14 will see significantly reduced performance. fi if [ "${CONFIG[RDMA_PROV]}" == "mlx5_dv" ]; then - if ! echo -e '#include \n' \ - '#include \n' \ - '#include \n' \ - 'int main(void) { return rdma_establish(NULL) || ' \ - '!!IBV_QP_INIT_ATTR_SEND_OPS_FLAGS || !!MLX5_OPCODE_RDMA_WRITE; }\n' \ - | "${BUILD_CMD[@]}" -lmlx5 -I${rootdir}/include -c - 2> /dev/null; then + MLX5_DV_BUILD_BUILD_CMD=" + #include \n + #include \n + int main(void) { return rdma_establish(NULL) ||\n + !!IBV_QP_INIT_ATTR_SEND_OPS_FLAGS || !!MLX5_OPCODE_RDMA_WRITE" + if [ "${CONFIG[CRYPTO]}" = "y" ]; then + MLX5_DV_BUILD_BUILD_CMD+="|| !!MLX5DV_CRYPTO_ENGINES_CAP_AES_XTS_SINGLE_BLOCK" + fi + MLX5_DV_BUILD_BUILD_CMD+=";}" + if ! echo -e $MLX5_DV_BUILD_BUILD_CMD | "${BUILD_CMD[@]}" -lmlx5 -I${rootdir}/include -c -; then echo "mlx5_dv provider is not supported" exit 1 fi diff --git a/doc/jsonrpc.md b/doc/jsonrpc.md index 36db786fa..f11e8c1a5 100644 --- a/doc/jsonrpc.md +++ b/doc/jsonrpc.md @@ -450,6 +450,7 @@ Example response: "dpdk_cryptodev_scan_accel_module", "dpdk_cryptodev_set_driver", "dpdk_cryptodev_get_driver", + "mlx5_scan_accel_module", "bdev_virtio_attach_controller", "bdev_virtio_scsi_get_devices", "bdev_virtio_detach_controller", @@ -2132,6 +2133,43 @@ Example response: } ~~~ +### mlx5_scan_accel_module {#rpc_mlx5_scan_accel_module} + +Enable mlx5 accel offload + +#### Parameters + +Name | Optional | Type | Description +----------------------- | -------- |--------| ----------- +qp_size | Optional | number | qpair size +num_requests | Optional | number | Size of the shared requests pool + +#### Example + +Example request: + +~~~json +{ + "jsonrpc": "2.0", + "method": "mlx5_scan_accel_module", + "id": 1, + "params": { + "qp_size": 256, + "num_requests": 2047 + } +} +~~~ + +Example response: + +~~~json +{ + "jsonrpc": "2.0", + "id": 1, + "result": true +} +~~~ + ## Block Device Abstraction Layer {#jsonrpc_components_bdev} ### bdev_set_options {#rpc_bdev_set_options} diff --git a/include/spdk_internal/mlx5.h b/include/spdk_internal/mlx5.h new file mode 100644 index 000000000..88e5ef96f --- /dev/null +++ b/include/spdk_internal/mlx5.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. + * All rights reserved. + */ + +#ifndef SPDK_MLX5_H +#define SPDK_MLX5_H + +#include + +struct spdk_mlx5_crypto_dek; +struct spdk_mlx5_crypto_keytag; + +struct spdk_mlx5_crypto_dek_create_attr { + /* Data Encryption Key in binary form */ + char *dek; + /* Length of the dek */ + size_t dek_len; +}; + +/** + * Return a NULL terminated array of devices which support crypto operation on Nvidia NICs + * + * \param dev_num The size of the array or 0 + * \return Array of contexts. This array must be released with \b spdk_mlx5_crypto_devs_release + */ +struct ibv_context **spdk_mlx5_crypto_devs_get(int *dev_num); + +/** + * Releases array of devices allocated by \b spdk_mlx5_crypto_devs_get + * + * \param rdma_devs Array of device to be released + */ +void spdk_mlx5_crypto_devs_release(struct ibv_context **rdma_devs); + +/** + * Create a keytag which contains DEKs per each crypto device in the system + * + * \param attr Crypto attributes + * \param out Keytag + * \return 0 on success, negated errno of failure + */ +int spdk_mlx5_crypto_keytag_create(struct spdk_mlx5_crypto_dek_create_attr *attr, + struct spdk_mlx5_crypto_keytag **out); + +/** + * Destroy a keytag created using \b spdk_mlx5_crypto_keytag_create + * + * \param keytag Keytag pointer + */ +void spdk_mlx5_crypto_keytag_destroy(struct spdk_mlx5_crypto_keytag *keytag); + +/** + * Fills attributes used to register UMR with crypto operation + * + * \param attr_out Configured UMR attributes + * \param keytag Keytag with DEKs + * \param pd Protection Domain which is going to be used to register UMR. This function will find a DEK in \b keytag with the same PD + * \param block_size Logical block size + * \param iv Initialization vector or tweak. Usually that is logical block address + * \param encrypt_on_tx If set, memory data will be encrypted during TX and wire data will be decrypted during RX. If not set, memory data will be decrypted during TX and wire data will be encrypted during RX. + * \return 0 on success, negated errno on failure + */ +int spdk_mlx5_crypto_set_attr(struct mlx5dv_crypto_attr *attr_out, + struct spdk_mlx5_crypto_keytag *keytag, struct ibv_pd *pd, + uint32_t block_size, uint64_t iv, bool encrypt_on_tx); + + +#endif /* SPDK_MLX5_H */ diff --git a/lib/Makefile b/lib/Makefile index 37186bd70..52264f3c5 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -22,6 +22,9 @@ DIRS-$(CONFIG_VBDEV_COMPRESS) += reduce DIRS-$(CONFIG_RDMA) += rdma DIRS-$(CONFIG_VFIO_USER) += vfu_tgt +ifeq ($(CONFIG_RDMA_PROV),mlx5_dv) +DIRS-y += mlx5 +endif # If CONFIG_ENV is pointing at a directory in lib, build it. # Out-of-tree env implementations must be built separately by the user. ENV_NAME := $(notdir $(CONFIG_ENV)) diff --git a/lib/mlx5/Makefile b/lib/mlx5/Makefile new file mode 100644 index 000000000..511c83215 --- /dev/null +++ b/lib/mlx5/Makefile @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# + +SPDK_ROOT_DIR := $(abspath $(CURDIR)/../..) +include $(SPDK_ROOT_DIR)/mk/spdk.common.mk + +SO_VER := 1 +SO_MINOR := 0 + +C_SRCS = mlx5_crypto.c +LIBNAME = mlx5 + +LOCAL_SYS_LIBS += -lmlx5 -libverbs -lrdmacm + +SPDK_MAP_FILE = $(abspath $(CURDIR)/spdk_mlx5.map) + +include $(SPDK_ROOT_DIR)/mk/spdk.lib.mk diff --git a/lib/mlx5/mlx5_crypto.c b/lib/mlx5/mlx5_crypto.c new file mode 100644 index 000000000..9e413ac39 --- /dev/null +++ b/lib/mlx5/mlx5_crypto.c @@ -0,0 +1,345 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#include +#include +#include + +#include "spdk/stdinc.h" +#include "spdk/queue.h" +#include "spdk/log.h" +#include "spdk/likely.h" +#include "spdk/util.h" +#include "spdk_internal/mlx5.h" +#include "spdk_internal/rdma.h" + +#define MLX5_VENDOR_ID_MELLANOX 0x2c9 + +/* Plaintext key sizes */ +/* 64b keytag */ +#define SPDK_MLX5_AES_XTS_KEYTAG_SIZE 8 +/* key1_128b + key2_128b */ +#define SPDK_MLX5_AES_XTS_128_DEK_BYTES 32 +/* key1_256b + key2_256b */ +#define SPDK_MLX5_AES_XTS_256_DEK_BYTES 64 +/* key1_128b + key2_128b + 64b_keytag */ +#define SPDK_MLX5_AES_XTS_128_DEK_BYTES_WITH_KEYTAG (SPDK_MLX5_AES_XTS_128_DEK_BYTES + SPDK_MLX5_AES_XTS_KEYTAG_SIZE) +/* key1_256b + key2_256b + 64b_keytag */ +#define SPDK_MLX5_AES_XTS_256_DEK_BYTES_WITH_KEYTAG (SPDK_MLX5_AES_XTS_256_DEK_BYTES + SPDK_MLX5_AES_XTS_KEYTAG_SIZE) + +struct spdk_mlx5_crypto_dek { + struct mlx5dv_dek *dek_obj; + struct ibv_pd *pd; + struct ibv_context *context; +}; + +struct spdk_mlx5_crypto_keytag { + struct spdk_mlx5_crypto_dek *deks; + uint32_t deks_num; + bool has_keytag; + char keytag[8]; +}; + +struct ibv_context ** +spdk_mlx5_crypto_devs_get(int *dev_num) +{ + struct ibv_context **rdma_devs, **rdma_devs_out = NULL, *dev; + struct ibv_device_attr dev_attr; + struct mlx5dv_context dv_dev_attr; + int num_rdma_devs = 0, i, rc; + int num_crypto_devs = 0; + + /* query all devices, save mlx5 with crypto support */ + rdma_devs = rdma_get_devices(&num_rdma_devs); + if (!rdma_devs || !num_rdma_devs) { + *dev_num = 0; + return NULL; + } + + rdma_devs_out = calloc(num_rdma_devs + 1, sizeof(*rdma_devs_out)); + if (!rdma_devs_out) { + SPDK_ERRLOG("Memory allocation failed\n"); + return NULL; + } + + for (i = 0; i < num_rdma_devs; i++) { + dev = rdma_devs[i]; + + rc = ibv_query_device(dev, &dev_attr); + if (rc) { + SPDK_ERRLOG("Failed to query dev %s, skipping\n", dev->device->name); + continue; + } + if (dev_attr.vendor_id != MLX5_VENDOR_ID_MELLANOX) { + SPDK_DEBUGLOG(mlx5, "dev %s is not Mellanox device, skipping\n", dev->device->name); + continue; + } + + memset(&dv_dev_attr, 0, sizeof(dv_dev_attr)); + dv_dev_attr.comp_mask |= MLX5DV_CONTEXT_MASK_CRYPTO_OFFLOAD; + rc = mlx5dv_query_device(dev, &dv_dev_attr); + if (rc) { + SPDK_ERRLOG("Failed to query mlx5 dev %s, skipping\n", dev->device->name); + continue; + } + if (!(dv_dev_attr.crypto_caps.flags & MLX5DV_CRYPTO_CAPS_CRYPTO)) { + SPDK_DEBUGLOG(mlx5, "dev %s crypto engine doesn't support crypto, skipping\n", dev->device->name); + continue; + } + if (!(dv_dev_attr.crypto_caps.crypto_engines & (MLX5DV_CRYPTO_ENGINES_CAP_AES_XTS | + MLX5DV_CRYPTO_ENGINES_CAP_AES_XTS_SINGLE_BLOCK))) { + SPDK_DEBUGLOG(mlx5, "dev %s crypto engine doesn't support AES_XTS, skipping\n", dev->device->name); + continue; + } + if (dv_dev_attr.crypto_caps.wrapped_import_method & + MLX5DV_CRYPTO_WRAPPED_IMPORT_METHOD_CAP_AES_XTS) { + SPDK_WARNLOG("dev %s uses wrapped import method (0x%x) which is not supported by mlx5 accel module\n", + dev->device->name, dv_dev_attr.crypto_caps.wrapped_import_method); + continue; + } + + SPDK_NOTICELOG("Crypto dev %s\n", dev->device->name); + rdma_devs_out[num_crypto_devs++] = dev; + } + + if (!num_crypto_devs) { + SPDK_DEBUGLOG(mlx5, "Found no mlx5 crypto devices\n"); + goto err_out; + } + + rdma_free_devices(rdma_devs); + *dev_num = num_crypto_devs; + + return rdma_devs_out; + +err_out: + free(rdma_devs_out); + rdma_free_devices(rdma_devs); + *dev_num = 0; + return NULL; +} + +void +spdk_mlx5_crypto_devs_release(struct ibv_context **rdma_devs) +{ + if (rdma_devs) { + free(rdma_devs); + } +} + +void +spdk_mlx5_crypto_keytag_destroy(struct spdk_mlx5_crypto_keytag *keytag) +{ + struct spdk_mlx5_crypto_dek *dek; + uint32_t i; + + if (!keytag) { + return; + } + + for (i = 0; i < keytag->deks_num; i++) { + dek = &keytag->deks[i]; + if (dek->dek_obj) { + mlx5dv_dek_destroy(dek->dek_obj); + } + if (dek->pd) { + spdk_rdma_put_pd(dek->pd); + } + } + spdk_memset_s(keytag->keytag, sizeof(keytag->keytag), 0, sizeof(keytag->keytag)); + free(keytag->deks); + free(keytag); +} + +int +spdk_mlx5_crypto_keytag_create(struct spdk_mlx5_crypto_dek_create_attr *attr, + struct spdk_mlx5_crypto_keytag **out) +{ + struct spdk_mlx5_crypto_dek *dek; + struct spdk_mlx5_crypto_keytag *keytag; + struct ibv_context **devs; + struct mlx5dv_dek_init_attr init_attr = {}; + struct mlx5dv_dek_attr query_attr; + int num_devs = 0, i, rc; + bool has_keytag; + + + if (!attr || !attr->dek) { + return -EINVAL; + } + switch (attr->dek_len) { + case SPDK_MLX5_AES_XTS_128_DEK_BYTES_WITH_KEYTAG: + init_attr.key_size = MLX5DV_CRYPTO_KEY_SIZE_128; + has_keytag = true; + SPDK_DEBUGLOG(mlx5, "128b AES_XTS with keytag\n"); + break; + case SPDK_MLX5_AES_XTS_256_DEK_BYTES_WITH_KEYTAG: + init_attr.key_size = MLX5DV_CRYPTO_KEY_SIZE_256; + has_keytag = true; + SPDK_DEBUGLOG(mlx5, "256b AES_XTS with keytag\n"); + break; + case SPDK_MLX5_AES_XTS_128_DEK_BYTES: + init_attr.key_size = MLX5DV_CRYPTO_KEY_SIZE_128; + has_keytag = false; + SPDK_DEBUGLOG(mlx5, "128b AES_XTS\n"); + break; + case SPDK_MLX5_AES_XTS_256_DEK_BYTES: + init_attr.key_size = MLX5DV_CRYPTO_KEY_SIZE_256; + has_keytag = false; + SPDK_DEBUGLOG(mlx5, "256b AES_XTS\n"); + break; + default: + SPDK_ERRLOG("Invalid key length %zu. The following keys are supported:\n" + "128b key + key2, %u bytes;\n" + "256b key + key2, %u bytes\n" + "128b key + key2 + keytag, %u bytes\n" + "256b lye + key2 + keytag, %u bytes\n", + attr->dek_len, SPDK_MLX5_AES_XTS_128_DEK_BYTES, MLX5DV_CRYPTO_KEY_SIZE_256, + SPDK_MLX5_AES_XTS_128_DEK_BYTES_WITH_KEYTAG, SPDK_MLX5_AES_XTS_256_DEK_BYTES_WITH_KEYTAG); + return -EINVAL; + } + + devs = spdk_mlx5_crypto_devs_get(&num_devs); + if (!devs || !num_devs) { + SPDK_DEBUGLOG(mlx5, "No crypto devices found\n"); + return -ENOTSUP; + } + + keytag = calloc(1, sizeof(*keytag)); + if (!keytag) { + SPDK_ERRLOG("Memory allocation failed\n"); + spdk_mlx5_crypto_devs_release(devs); + return -ENOMEM; + } + keytag->deks = calloc(num_devs, sizeof(struct spdk_mlx5_crypto_dek)); + if (!keytag->deks) { + SPDK_ERRLOG("Memory allocation failed\n"); + spdk_mlx5_crypto_devs_release(devs); + free(keytag); + return -ENOMEM; + } + + for (i = 0; i < num_devs; i++) { + keytag->deks_num++; + dek = &keytag->deks[i]; + dek->pd = spdk_rdma_get_pd(devs[i]); + if (!dek->pd) { + SPDK_ERRLOG("Failed to get PD on device %s\n", devs[i]->device->name); + rc = -EINVAL; + goto err_out; + } + dek->context = devs[i]; + + init_attr.pd = dek->pd; + init_attr.has_keytag = has_keytag; + init_attr.key_purpose = MLX5DV_CRYPTO_KEY_PURPOSE_AES_XTS; + init_attr.comp_mask = MLX5DV_DEK_INIT_ATTR_CRYPTO_LOGIN; + init_attr.crypto_login = NULL; + memcpy(init_attr.key, attr->dek, attr->dek_len); + + dek->dek_obj = mlx5dv_dek_create(dek->context, &init_attr); + spdk_memset_s(init_attr.key, sizeof(init_attr.key), 0, sizeof(init_attr.key)); + if (!dek->dek_obj) { + SPDK_ERRLOG("mlx5dv_dek_create failed on dev %s, errno %d\n", dek->context->device->name, errno); + rc = -EINVAL; + goto err_out; + } + + memset(&query_attr, 0, sizeof(query_attr)); + rc = mlx5dv_dek_query(dek->dek_obj, &query_attr); + if (rc) { + SPDK_ERRLOG("Failed to query DEK on dev %s, rc %d\n", dek->context->device->name, rc); + goto err_out; + } + if (query_attr.state != MLX5DV_DEK_STATE_READY) { + SPDK_ERRLOG("DEK on dev %s state %d\n", dek->context->device->name, query_attr.state); + rc = -EINVAL; + goto err_out; + } + } + + if (has_keytag) { + /* Save keytag, it will be used to configure crypto MKEY */ + keytag->has_keytag = true; + memcpy(keytag->keytag, attr->dek + attr->dek_len - SPDK_MLX5_AES_XTS_KEYTAG_SIZE, + SPDK_MLX5_AES_XTS_KEYTAG_SIZE); + } + + spdk_mlx5_crypto_devs_release(devs); + *out = keytag; + + return 0; + +err_out: + spdk_mlx5_crypto_keytag_destroy(keytag); + spdk_mlx5_crypto_devs_release(devs); + + return rc; +} + +static inline struct spdk_mlx5_crypto_dek * +mlx5_crypto_get_dek_by_pd(struct spdk_mlx5_crypto_keytag *keytag, struct ibv_pd *pd) +{ + struct spdk_mlx5_crypto_dek *dek; + uint32_t i; + + for (i = 0; i < keytag->deks_num; i++) { + dek = &keytag->deks[i]; + if (dek->pd == pd) { + return dek; + } + } + + return NULL; +} + +int +spdk_mlx5_crypto_set_attr(struct mlx5dv_crypto_attr *attr_out, + struct spdk_mlx5_crypto_keytag *keytag, struct ibv_pd *pd, + uint32_t block_size, uint64_t iv, bool encrypt_on_tx) +{ + struct spdk_mlx5_crypto_dek *dek; + enum mlx5dv_block_size bs; + + dek = mlx5_crypto_get_dek_by_pd(keytag, pd); + if (spdk_unlikely(!dek)) { + SPDK_ERRLOG("No DEK for pd %p (dev %s)\n", pd, pd->context->device->name); + return -EINVAL; + } + + switch (block_size) { + case 512: + bs = MLX5DV_BLOCK_SIZE_512; + break; + case 520: + bs = MLX5DV_BLOCK_SIZE_520; + break; + case 4048: + bs = MLX5DV_BLOCK_SIZE_4048; + break; + case 4096: + bs = MLX5DV_BLOCK_SIZE_4096; + break; + case 4160: + bs = MLX5DV_BLOCK_SIZE_4160; + break; + default: + SPDK_ERRLOG("Unsupported block size %u\n", block_size); + return -EINVAL; + } + + memset(attr_out, 0, sizeof(*attr_out)); + attr_out->dek = dek->dek_obj; + attr_out->crypto_standard = MLX5DV_CRYPTO_STANDARD_AES_XTS; + attr_out->data_unit_size = bs; + attr_out->encrypt_on_tx = encrypt_on_tx; + memcpy(attr_out->initial_tweak, &iv, sizeof(iv)); + if (keytag->has_keytag) { + memcpy(attr_out->keytag, keytag->keytag, sizeof(keytag->keytag)); + } + + return 0; +} + +SPDK_LOG_REGISTER_COMPONENT(mlx5) diff --git a/lib/mlx5/spdk_mlx5.map b/lib/mlx5/spdk_mlx5.map new file mode 100644 index 000000000..55f570915 --- /dev/null +++ b/lib/mlx5/spdk_mlx5.map @@ -0,0 +1,11 @@ +{ + global: + + spdk_mlx5_crypto_devs_get; + spdk_mlx5_crypto_devs_release; + spdk_mlx5_crypto_keytag_create; + spdk_mlx5_crypto_keytag_destroy; + spdk_mlx5_crypto_set_attr; + + local: *; +}; diff --git a/mk/spdk.lib_deps.mk b/mk/spdk.lib_deps.mk index 2fd440500..0261d16ed 100644 --- a/mk/spdk.lib_deps.mk +++ b/mk/spdk.lib_deps.mk @@ -70,6 +70,9 @@ DEPDIRS-nvmf := accel log sock util nvme thread $(JSON_LIBS) trace bdev ifeq ($(CONFIG_RDMA),y) DEPDIRS-nvmf += rdma endif +ifeq ($(CONFIG_RDMA_PROV),mlx5_dv) +DEPDIRS-mlx5 = log rdma util +endif DEPDIRS-scsi := log util thread $(JSON_LIBS) trace bdev DEPDIRS-iscsi := log sock util conf thread $(JSON_LIBS) trace scsi @@ -100,6 +103,10 @@ DEPDIRS-accel_iaa := log idxd thread $(JSON_LIBS) accel trace DEPDIRS-accel_dpdk_cryptodev := log thread $(JSON_LIBS) accel DEPDIRS-accel_dpdk_compressdev := log thread $(JSON_LIBS) accel util +ifeq ($(CONFIG_RDMA_PROV),mlx5_dv) +DEPDIRS-accel_mlx5 := accel thread log mlx5 rdma util +endif + # module/env_dpdk DEPDIRS-env_dpdk_rpc := log $(JSON_LIBS) diff --git a/mk/spdk.modules.mk b/mk/spdk.modules.mk index 76c96fcca..edc6fe6af 100644 --- a/mk/spdk.modules.mk +++ b/mk/spdk.modules.mk @@ -108,6 +108,10 @@ ifeq ($(CONFIG_DPDK_COMPRESSDEV),y) ACCEL_MODULES_LIST += accel_dpdk_compressdev endif +ifeq ($(CONFIG_RDMA_PROV)|$(CONFIG_CRYPTO),mlx5_dv|y) +ACCEL_MODULES_LIST += accel_mlx5 +endif + SCHEDULER_MODULES_LIST = scheduler_dynamic ifeq (y,$(DPDK_POWER)) SCHEDULER_MODULES_LIST += env_dpdk scheduler_dpdk_governor scheduler_gscheduler diff --git a/module/accel/Makefile b/module/accel/Makefile index 29958a671..47b6a66c9 100644 --- a/module/accel/Makefile +++ b/module/accel/Makefile @@ -12,6 +12,9 @@ DIRS-$(CONFIG_DPDK_COMPRESSDEV) += dpdk_compressdev DIRS-$(CONFIG_IDXD) += dsa DIRS-$(CONFIG_IDXD) += iaa DIRS-$(CONFIG_CRYPTO) += dpdk_cryptodev +ifeq ($(CONFIG_RDMA_PROV)|$(CONFIG_CRYPTO),mlx5_dv|y) +DIRS-y += mlx5 +endif .PHONY: all clean $(DIRS-y) diff --git a/module/accel/mlx5/Makefile b/module/accel/mlx5/Makefile new file mode 100644 index 000000000..9c88ca343 --- /dev/null +++ b/module/accel/mlx5/Makefile @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES +# All rights reserved. +# + +SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..) +include $(SPDK_ROOT_DIR)/mk/spdk.common.mk + +SO_VER := 1 +SO_MINOR := 0 + +LIBNAME = accel_mlx5 +C_SRCS = accel_mlx5.c accel_mlx5_rpc.c + +SPDK_MAP_FILE = $(SPDK_ROOT_DIR)/mk/spdk_blank.map + +LOCAL_SYS_LIBS += -libverbs -lmlx5 + +include $(SPDK_ROOT_DIR)/mk/spdk.lib.mk diff --git a/module/accel/mlx5/accel_mlx5.c b/module/accel/mlx5/accel_mlx5.c new file mode 100644 index 000000000..e1c94cba3 --- /dev/null +++ b/module/accel/mlx5/accel_mlx5.c @@ -0,0 +1,1182 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#include "spdk/env.h" +#include "spdk/thread.h" +#include "spdk/queue.h" +#include "spdk/log.h" +#include "spdk/string.h" +#include "spdk/likely.h" +#include "spdk/dma.h" +#include "spdk/json.h" +#include "spdk/util.h" + +#include "spdk_internal/mlx5.h" +#include "spdk_internal/rdma.h" +#include "spdk_internal/accel_module.h" +#include "spdk_internal/assert.h" +#include "spdk_internal/sgl.h" +#include "accel_mlx5.h" + +#include +#include + +#define ACCEL_MLX5_QP_SIZE (256u) +#define ACCEL_MLX5_NUM_REQUESTS (2048u - 1) + +#define ACCEL_MLX5_MAX_SGE (16u) +#define ACCEL_MLX5_MAX_WC (64u) +#define ACCEL_MLX5_ALLOC_REQS_IN_BATCH (16u) + +struct accel_mlx5_io_channel; +struct accel_mlx5_task; + +struct accel_mlx5_crypto_dev_ctx { + struct spdk_mempool *requests_pool; + struct ibv_context *context; + struct ibv_pd *pd; + TAILQ_ENTRY(accel_mlx5_crypto_dev_ctx) link; +}; + +struct accel_mlx5_module { + struct spdk_accel_module_if module; + struct accel_mlx5_crypto_dev_ctx *crypto_ctxs; + uint32_t num_crypto_ctxs; + struct accel_mlx5_attr attr; + bool enabled; +}; + +enum accel_mlx5_wrid_type { + ACCEL_MLX5_WRID_MKEY, + ACCEL_MLX5_WRID_WRITE, +}; + +struct accel_mlx5_wrid { + uint8_t wrid; +}; + +struct accel_mlx5_req { + struct accel_mlx5_task *task; + struct mlx5dv_mkey *mkey; + struct ibv_sge src_sg[ACCEL_MLX5_MAX_SGE]; + struct ibv_sge dst_sg[ACCEL_MLX5_MAX_SGE]; + uint16_t src_sg_count; + uint16_t dst_sg_count; + struct accel_mlx5_wrid mkey_wrid; + struct accel_mlx5_wrid write_wrid; + TAILQ_ENTRY(accel_mlx5_req) link; +}; + +struct accel_mlx5_task { + struct spdk_accel_task base; + struct accel_mlx5_dev *dev; + TAILQ_HEAD(, accel_mlx5_req) reqs; + uint32_t num_reqs; + uint32_t num_completed_reqs; + uint32_t num_submitted_reqs; + int rc; + struct spdk_iov_sgl src; + struct spdk_iov_sgl dst; + struct accel_mlx5_req *cur_req; + /* If set, memory data will be encrypted during TX and wire data will be + decrypted during RX. + If not set, memory data will be decrypted during TX and wire data will + be encrypted during RX. */ + bool encrypt_on_tx; + bool inplace; + TAILQ_ENTRY(accel_mlx5_task) link; +}; + +struct accel_mlx5_qp { + struct ibv_qp *qp; + struct ibv_qp_ex *qpex; + struct mlx5dv_qp_ex *mqpx; /* more qpairs to the god of qpairs */ + struct ibv_cq *cq; + struct accel_mlx5_io_channel *ch; + bool wr_started; + uint16_t num_reqs; + uint16_t num_free_reqs; +}; + +struct accel_mlx5_dev { + struct accel_mlx5_qp *qp; + struct ibv_cq *cq; + struct spdk_rdma_mem_map *mmap; + struct accel_mlx5_crypto_dev_ctx *dev_ctx; + uint32_t reqs_submitted; + uint32_t max_reqs; + /* Pending tasks waiting for requests resources */ + TAILQ_HEAD(, accel_mlx5_task) nomem; + /* tasks submitted to HW. We can't complete a task even in error case until we reap completions for all + * submitted requests */ + TAILQ_HEAD(, accel_mlx5_task) in_hw; + /* tasks between wr_start and wr_complete */ + TAILQ_HEAD(, accel_mlx5_task) before_submit; + TAILQ_ENTRY(accel_mlx5_dev) link; +}; + +struct accel_mlx5_io_channel { + struct accel_mlx5_dev *devs; + struct spdk_poller *poller; + uint32_t num_devs; + /* Index in \b devs to be used for crypto in round-robin way */ + uint32_t dev_idx; +}; + +struct accel_mlx5_req_init_ctx { + struct ibv_pd *pd; + int rc; +}; + +static struct accel_mlx5_module g_accel_mlx5; + +static int +mlx5_qp_init_2_rts(struct ibv_qp *qp, uint32_t dest_qp_num) +{ + struct ibv_qp_attr cur_attr = {}, attr = {}; + struct ibv_qp_init_attr init_attr = {}; + struct ibv_port_attr port_attr = {}; + union ibv_gid gid = {}; + int rc; + uint8_t port; + int attr_mask = IBV_QP_PKEY_INDEX | + IBV_QP_PORT | + IBV_QP_ACCESS_FLAGS | + IBV_QP_PATH_MTU | + IBV_QP_AV | + IBV_QP_DEST_QPN | + IBV_QP_RQ_PSN | + IBV_QP_MAX_DEST_RD_ATOMIC | + IBV_QP_MIN_RNR_TIMER | + IBV_QP_TIMEOUT | + IBV_QP_RETRY_CNT | + IBV_QP_RNR_RETRY | + IBV_QP_SQ_PSN | + IBV_QP_MAX_QP_RD_ATOMIC; + + if (!qp) { + return -EINVAL; + } + + rc = ibv_query_qp(qp, &cur_attr, attr_mask, &init_attr); + if (rc) { + SPDK_ERRLOG("Failed to query qp %p %u\n", qp, qp->qp_num); + return rc; + } + + port = cur_attr.port_num; + rc = ibv_query_port(qp->context, port, &port_attr); + if (rc) { + SPDK_ERRLOG("Failed to query port num %d\n", port); + return rc; + } + + if (port_attr.state != IBV_PORT_ARMED && port_attr.state != IBV_PORT_ACTIVE) { + SPDK_ERRLOG("Wrong port %d state %d\n", port, port_attr.state); + return -ENETUNREACH; + } + + rc = ibv_query_gid(qp->context, port, 0, &gid); + if (rc) { + SPDK_ERRLOG("Failed to get GID on port %d, rc %d\n", port, rc); + return rc; + } + + attr.qp_state = IBV_QPS_INIT; + attr.pkey_index = cur_attr.pkey_index; + attr.port_num = cur_attr.port_num; + attr.qp_access_flags = IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_READ | IBV_ACCESS_REMOTE_WRITE; + attr_mask = IBV_QP_STATE | IBV_QP_PKEY_INDEX | IBV_QP_PORT | IBV_QP_ACCESS_FLAGS; + + rc = ibv_modify_qp(qp, &attr, attr_mask); + if (rc) { + SPDK_ERRLOG("Failed to modify qp %p %u to INIT state, rc %d\n", qp, qp->qp_num, rc); + return rc; + } + + attr.qp_state = IBV_QPS_RTR; + attr.path_mtu = cur_attr.path_mtu; + /* dest_qp_num == qp_num - self loopback connection */ + attr.dest_qp_num = dest_qp_num; + attr.rq_psn = cur_attr.rq_psn; + attr.max_dest_rd_atomic = cur_attr.max_dest_rd_atomic; + attr.min_rnr_timer = cur_attr.min_rnr_timer; + attr.ah_attr = cur_attr.ah_attr; + attr.ah_attr.dlid = port_attr.lid; + attr.ah_attr.sl = 0; + attr.ah_attr.src_path_bits = 0; + + if (port_attr.link_layer == IBV_LINK_LAYER_ETHERNET) { + /* Ethernet requires to set GRH */ + attr.ah_attr.is_global = 1; + attr.ah_attr.grh.hop_limit = 1; + attr.ah_attr.grh.dgid = gid; + } else { + attr.ah_attr.is_global = 0; + } + + assert(attr.ah_attr.port_num == port); + + attr_mask = IBV_QP_STATE | IBV_QP_PATH_MTU | IBV_QP_DEST_QPN | IBV_QP_RQ_PSN | + IBV_QP_MAX_DEST_RD_ATOMIC | IBV_QP_MIN_RNR_TIMER | IBV_QP_AV; + + rc = ibv_modify_qp(qp, &attr, attr_mask); + if (rc) { + SPDK_ERRLOG("Failed to modify qp %p %u to RTR state, rc %d\n", qp, qp->qp_num, rc); + return rc; + } + + memset(&attr, 0, sizeof(attr)); + attr.qp_state = IBV_QPS_RTS; + attr.timeout = cur_attr.timeout; + attr.retry_cnt = cur_attr.retry_cnt; + attr.sq_psn = cur_attr.sq_psn; + attr.rnr_retry = cur_attr.rnr_retry; + attr.max_rd_atomic = cur_attr.max_rd_atomic; + attr_mask = IBV_QP_STATE | IBV_QP_TIMEOUT | IBV_QP_RETRY_CNT | IBV_QP_SQ_PSN | IBV_QP_RNR_RETRY | + IBV_QP_MAX_QP_RD_ATOMIC; + + rc = ibv_modify_qp(qp, &attr, attr_mask); + if (rc) { + SPDK_ERRLOG("Failed to modify qp %p %u to RTS state, rc %d\n", qp, qp->qp_num, rc); + return rc; + } + + return 0; +} + +static inline enum ibv_qp_state +accel_mlx5_get_qp_state(struct ibv_qp *qp) { + struct ibv_qp_attr qp_attr; + struct ibv_qp_init_attr init_attr; + + ibv_query_qp(qp, &qp_attr, IBV_QP_STATE, &init_attr); + + return qp_attr.qp_state; +} + +static inline void +accel_mlx5_task_complete(struct accel_mlx5_task *task) +{ + struct accel_mlx5_req *req; + + assert(task->num_reqs == task->num_completed_reqs); + SPDK_DEBUGLOG(accel_mlx5, "Complete task %p, opc %d\n", task, task->base.op_code); + + TAILQ_FOREACH(req, &task->reqs, link) { + spdk_mempool_put(task->dev->dev_ctx->requests_pool, req); + } + spdk_accel_task_complete(&task->base, task->rc); +} + +static inline int +accel_mlx5_flush_wrs(struct accel_mlx5_dev *dev) +{ + struct accel_mlx5_task *task; + struct accel_mlx5_qp *qp = dev->qp; + int rc; + + if (spdk_unlikely(!qp->wr_started)) { + return 0; + } + + SPDK_DEBUGLOG(accel_mlx5, "Completing WRs on dev %s\n", dev->dev_ctx->context->device->name); + rc = ibv_wr_complete(qp->qpex); + if (spdk_unlikely(rc)) { + SPDK_ERRLOG("ibv_wr_complete rc %d\n", rc); + /* Complete all affected requests */ + TAILQ_FOREACH(task, &dev->before_submit, link) { + task->rc = rc; + accel_mlx5_task_complete(task); + } + TAILQ_INIT(&dev->before_submit); + } else { + TAILQ_CONCAT(&dev->in_hw, &dev->before_submit, link); + } + + qp->wr_started = false; + + return rc; +} + +static inline int +accel_mlx5_fill_block_sge(struct accel_mlx5_req *req, struct ibv_sge *sge, + struct spdk_iov_sgl *iovs) +{ + struct spdk_rdma_memory_translation translation; + void *addr; + uint32_t remaining = req->task->base.block_size; + uint32_t size; + int i = 0; + int rc; + + while (remaining) { + size = spdk_min(remaining, iovs->iov->iov_len - iovs->iov_offset); + addr = (void *)iovs->iov->iov_base + iovs->iov_offset; + rc = spdk_rdma_get_translation(req->task->dev->mmap, addr, size, &translation); + if (spdk_unlikely(rc)) { + SPDK_ERRLOG("Memory translation failed, addr %p, length %u\n", addr, size); + return rc; + } + spdk_iov_sgl_advance(iovs, size); + sge[i].lkey = spdk_rdma_memory_translation_get_lkey(&translation); + sge[i].addr = (uint64_t)addr; + sge[i].length = size; + i++; + assert(remaining >= size); + remaining -= size; + } + + return i; +} + +static inline bool +accel_mlx5_compare_iovs(struct iovec *v1, struct iovec *v2, uint32_t iovcnt) +{ + uint32_t i; + + for (i = 0; i < iovcnt; i++) { + if (v1[i].iov_base != v2[i].iov_base || v1[i].iov_len != v2[i].iov_len) { + return false; + } + } + + return true; +} + +static inline uint32_t +accel_mlx5_task_alloc_reqs(struct accel_mlx5_task *task) +{ + struct accel_mlx5_req *reqs_tmp[ACCEL_MLX5_ALLOC_REQS_IN_BATCH], *req; + uint32_t i, num_reqs, allocated_reqs = 0; + uint32_t remaining_reqs = task->num_reqs - task->num_completed_reqs; + uint32_t qp_slot = task->dev->max_reqs - task->dev->reqs_submitted; + int rc; + + assert(task->num_reqs >= task->num_completed_reqs); + remaining_reqs = spdk_min(remaining_reqs, qp_slot); + + while (remaining_reqs) { + num_reqs = spdk_min(ACCEL_MLX5_ALLOC_REQS_IN_BATCH, remaining_reqs); + rc = spdk_mempool_get_bulk(task->dev->dev_ctx->requests_pool, (void **)reqs_tmp, num_reqs); + if (spdk_unlikely(rc)) { + return allocated_reqs; + } + for (i = 0; i < num_reqs; i++) { + req = reqs_tmp[i]; + req->src_sg_count = 0; + req->dst_sg_count = 0; + req->task = task; + TAILQ_INSERT_TAIL(&task->reqs, req, link); + } + allocated_reqs += num_reqs; + remaining_reqs -= num_reqs; + } + + return allocated_reqs; +} + +static inline int +accel_mlx5_task_process(struct accel_mlx5_task *mlx5_task) +{ + struct spdk_accel_task *task = &mlx5_task->base; + struct accel_mlx5_dev *dev = mlx5_task->dev; + struct accel_mlx5_qp *qp = dev->qp; + struct ibv_qp_ex *qpx = qp->qpex; + struct mlx5dv_qp_ex *mqpx = qp->mqpx; + struct mlx5dv_mkey_conf_attr mkey_attr = {}; + struct mlx5dv_crypto_attr cattr; + struct accel_mlx5_req *req; + uint64_t iv; + uint32_t num_setters = 3; /* access flags, layout, crypto */ + int rc; + + iv = task->iv + mlx5_task->num_completed_reqs; + + if (!qp->wr_started) { + ibv_wr_start(qpx); + qp->wr_started = true; + } + + SPDK_DEBUGLOG(accel_mlx5, "begin, task, %p, reqs: total %u, submitted %u, completed %u\n", + mlx5_task, mlx5_task->num_reqs, mlx5_task->num_submitted_reqs, mlx5_task->num_completed_reqs); + + while (mlx5_task->cur_req && dev->reqs_submitted < dev->max_reqs) { + req = mlx5_task->cur_req; + rc = accel_mlx5_fill_block_sge(req, req->src_sg, &mlx5_task->src); + if (spdk_unlikely(rc <= 0)) { + if (rc == 0) { + rc = -EINVAL; + } + SPDK_ERRLOG("failed set src sge, rc %d\n", rc); + goto err_out; + } + req->src_sg_count = rc; + + /* prepare memory key - destination for WRITE operation */ + qpx->wr_flags = IBV_SEND_INLINE; + qpx->wr_id = (uint64_t)&req->mkey_wrid; + mlx5dv_wr_mkey_configure(mqpx, req->mkey, num_setters, &mkey_attr); + mlx5dv_wr_set_mkey_access_flags(mqpx, + IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_REMOTE_READ); + if (mlx5_task->inplace) { + mlx5dv_wr_set_mkey_layout_list(mqpx, req->src_sg_count, req->src_sg); + } else { + rc = accel_mlx5_fill_block_sge(req, req->dst_sg, &mlx5_task->dst); + if (spdk_unlikely(rc <= 0)) { + if (rc == 0) { + rc = -EINVAL; + } + SPDK_ERRLOG("failed set dst sge, rc %d\n", rc); + mlx5_task->rc = rc; + goto err_out; + } + req->dst_sg_count = rc; + mlx5dv_wr_set_mkey_layout_list(mqpx, req->dst_sg_count, req->dst_sg); + } + SPDK_DEBUGLOG(accel_mlx5, "req %p, task %p crypto_attr: bs %u, iv %"PRIu64", enc_on_tx %d\n", + req, req->task, task->block_size, iv, mlx5_task->encrypt_on_tx); + rc = spdk_mlx5_crypto_set_attr(&cattr, task->crypto_key->priv, dev->dev_ctx->pd, task->block_size, + iv++, mlx5_task->encrypt_on_tx); + if (spdk_unlikely(rc)) { + SPDK_ERRLOG("failed to set crypto attr, rc %d\n", rc); + mlx5_task->rc = rc; + goto err_out; + } + mlx5dv_wr_set_mkey_crypto(mqpx, &cattr); + + /* Prepare WRITE, use rkey from mkey, remote addr is always 0 - start of the mkey */ + qpx->wr_flags = IBV_SEND_SIGNALED; + qpx->wr_id = (uint64_t)&req->write_wrid; + ibv_wr_rdma_write(qpx, req->mkey->rkey, 0); + /* local buffers, SG is already filled */ + ibv_wr_set_sge_list(qpx, req->src_sg_count, req->src_sg); + + mlx5_task->num_submitted_reqs++; + assert(mlx5_task->num_submitted_reqs <= mlx5_task->num_reqs); + dev->reqs_submitted++; + mlx5_task->cur_req = TAILQ_NEXT(mlx5_task->cur_req, link); + } + + SPDK_DEBUGLOG(accel_mlx5, "end, task, %p, reqs: total %u, submitted %u, completed %u\n", mlx5_task, + mlx5_task->num_reqs, mlx5_task->num_submitted_reqs, mlx5_task->num_completed_reqs); + + TAILQ_INSERT_TAIL(&dev->before_submit, mlx5_task, link); + + return 0; + +err_out: + /* Abort all WRs submitted since last wr_start */ + ibv_wr_abort(qpx); + accel_mlx5_task_complete(mlx5_task); + TAILQ_FOREACH(mlx5_task, &dev->before_submit, link) { + mlx5_task->rc = rc; + accel_mlx5_task_complete(mlx5_task); + } + TAILQ_INIT(&dev->before_submit); + + return rc; + +} + +static inline int +accel_mlx5_task_continue(struct accel_mlx5_task *task) +{ + struct accel_mlx5_req *req; + + TAILQ_FOREACH(req, &task->reqs, link) { + spdk_mempool_put(task->dev->dev_ctx->requests_pool, req); + } + TAILQ_INIT(&task->reqs); + + if (spdk_unlikely(task->rc)) { + accel_mlx5_task_complete(task); + return 0; + } + + if (spdk_unlikely(!accel_mlx5_task_alloc_reqs(task))) { + /* Pool is empty, queue this task */ + TAILQ_INSERT_TAIL(&task->dev->nomem, task, link); + return -ENOMEM; + } + task->cur_req = TAILQ_FIRST(&task->reqs); + + return accel_mlx5_task_process(task); +} + +static inline int +accel_mlx5_task_init(struct accel_mlx5_task *mlx5_task, struct accel_mlx5_dev *dev) +{ + struct spdk_accel_task *task = &mlx5_task->base; + size_t src_nbytes = 0, dst_nbytes = 0; + uint32_t i; + + switch (task->op_code) { + case ACCEL_OPC_ENCRYPT: + mlx5_task->encrypt_on_tx = true; + break; + case ACCEL_OPC_DECRYPT: + mlx5_task->encrypt_on_tx = false; + break; + default: + SPDK_ERRLOG("Unsupported accel opcode %d\n", task->op_code); + return -ENOTSUP; + } + + for (i = 0; i < task->s.iovcnt; i++) { + src_nbytes += task->s.iovs[i].iov_len; + } + + for (i = 0; i < task->d.iovcnt; i++) { + dst_nbytes += task->d.iovs[i].iov_len; + } + + if (spdk_unlikely(src_nbytes != dst_nbytes)) { + return -EINVAL; + } + if (spdk_unlikely(src_nbytes % mlx5_task->base.block_size != 0)) { + return -EINVAL; + } + + mlx5_task->dev = dev; + mlx5_task->rc = 0; + mlx5_task->num_completed_reqs = 0; + mlx5_task->num_submitted_reqs = 0; + mlx5_task->cur_req = NULL; + mlx5_task->num_reqs = src_nbytes / mlx5_task->base.block_size; + spdk_iov_sgl_init(&mlx5_task->src, task->s.iovs, task->s.iovcnt, 0); + if (task->d.iovcnt == 0 || (task->d.iovcnt == task->s.iovcnt && + accel_mlx5_compare_iovs(task->d.iovs, task->s.iovs, task->s.iovcnt))) { + mlx5_task->inplace = true; + } else { + mlx5_task->inplace = false; + spdk_iov_sgl_init(&mlx5_task->dst, task->d.iovs, task->d.iovcnt, 0); + } + + TAILQ_INIT(&mlx5_task->reqs); + if (spdk_unlikely(!accel_mlx5_task_alloc_reqs(mlx5_task))) { + /* Pool is empty, queue this task */ + SPDK_DEBUGLOG(accel_mlx5, "no reqs in pool, dev %s\n", + mlx5_task->dev->dev_ctx->context->device->name); + return -ENOMEM; + } + mlx5_task->cur_req = TAILQ_FIRST(&mlx5_task->reqs); + + SPDK_DEBUGLOG(accel_mlx5, "task %p, inplace %d, num_reqs %d\n", mlx5_task, mlx5_task->inplace, + mlx5_task->num_reqs); + + return 0; +} + +static int +accel_mlx5_submit_tasks(struct spdk_io_channel *_ch, struct spdk_accel_task *task) +{ + struct accel_mlx5_io_channel *ch = spdk_io_channel_get_ctx(_ch); + struct accel_mlx5_task *mlx5_task = SPDK_CONTAINEROF(task, struct accel_mlx5_task, base); + struct accel_mlx5_dev *dev; + int rc; + + if (!g_accel_mlx5.enabled || !task->crypto_key || + task->crypto_key->module_if != &g_accel_mlx5.module || + !task->crypto_key->priv) { + return -EINVAL; + } + dev = &ch->devs[ch->dev_idx]; + ch->dev_idx++; + if (ch->dev_idx == ch->num_devs) { + ch->dev_idx = 0; + } + + rc = accel_mlx5_task_init(mlx5_task, dev); + if (spdk_unlikely(rc)) { + if (rc == -ENOMEM) { + SPDK_DEBUGLOG(accel_mlx5, "no reqs to handle new task %p (requred %u), put to queue\n", mlx5_task, + mlx5_task->num_reqs); + TAILQ_INSERT_TAIL(&dev->nomem, mlx5_task, link); + return 0; + } + return rc; + } + + return accel_mlx5_task_process(mlx5_task); +} + +static inline int64_t +accel_mlx5_poll_cq(struct accel_mlx5_dev *dev) +{ + struct ibv_wc wc[ACCEL_MLX5_MAX_WC]; + struct accel_mlx5_task *task; + struct accel_mlx5_req *req; + struct accel_mlx5_wrid *wr; + int reaped, i, rc; + + reaped = ibv_poll_cq(dev->cq, ACCEL_MLX5_MAX_WC, wc); + if (spdk_unlikely(reaped < 0)) { + SPDK_ERRLOG("Error polling CQ! (%d): %s\n", errno, spdk_strerror(errno)); + return reaped; + } else if (reaped == 0) { + return 0; + } + + SPDK_DEBUGLOG(accel_mlx5, "Reaped %d cpls on dev %s\n", reaped, + dev->dev_ctx->context->device->name); + + for (i = 0; i < reaped; i++) { + wr = (struct accel_mlx5_wrid *)wc[i].wr_id; + + switch (wr->wrid) { + case ACCEL_MLX5_WRID_MKEY: + /* We only get this completion in error case */ + req = SPDK_CONTAINEROF(wr, struct accel_mlx5_req, mkey_wrid); + if (!wc[i].status) { + SPDK_ERRLOG("Got unexpected cpl for mkey configure, req %p, qp %p, state %d\n", + req, dev->qp->qp, accel_mlx5_get_qp_state(dev->qp->qp)); + } else { + SPDK_ERRLOG("MKEY: qp %p, state %d, req %p, task %p WC status %d\n", + dev->qp->qp, accel_mlx5_get_qp_state(dev->qp->qp), req, req->task, wc[i].status); + } + break; + case ACCEL_MLX5_WRID_WRITE: + req = SPDK_CONTAINEROF(wr, struct accel_mlx5_req, write_wrid); + task = req->task; + if (wc[i].status) { + assert(req->task); + SPDK_ERRLOG("WRITE: qp %p, state %d, req %p, task %p WC status %d\n", dev->qp->qp, + accel_mlx5_get_qp_state(dev->qp->qp), req, req->task, wc[i].status); + if (!task->rc) { + task->rc = -EIO; + } + } + + task->num_completed_reqs++; + assert(dev->reqs_submitted); + dev->reqs_submitted--; + SPDK_DEBUGLOG(accel_mlx5, "req %p, task %p, remaining %u\n", req, task, + task->num_reqs - task->num_completed_reqs); + if (task->num_completed_reqs == task->num_reqs) { + TAILQ_REMOVE(&dev->in_hw, task, link); + accel_mlx5_task_complete(task); + } else if (task->num_completed_reqs == task->num_submitted_reqs) { + assert(task->num_submitted_reqs < task->num_reqs); + TAILQ_REMOVE(&dev->in_hw, task, link); + rc = accel_mlx5_task_continue(task); + if (spdk_unlikely(rc)) { + if (rc != -ENOMEM) { + task->rc = rc; + accel_mlx5_task_complete(task); + } + } + } + break; + } + } + + return reaped; +} + +static inline void +accel_mlx5_resubmit_nomem_tasks(struct accel_mlx5_dev *dev) +{ + struct accel_mlx5_task *task, *tmp; + int rc; + + TAILQ_FOREACH_SAFE(task, &dev->nomem, link, tmp) { + TAILQ_REMOVE(&dev->nomem, task, link); + rc = accel_mlx5_task_continue(task); + if (rc) { + if (rc == -ENOMEM) { + break; + } else { + task->rc = rc; + accel_mlx5_task_complete(task); + } + } + } +} + +static int +accel_mlx5_poller(void *ctx) +{ + struct accel_mlx5_io_channel *ch = ctx; + struct accel_mlx5_dev *dev; + + int64_t completions = 0, rc; + uint32_t i; + + for (i = 0; i < ch->num_devs; i++) { + dev = &ch->devs[i]; + if (dev->reqs_submitted) { + rc = accel_mlx5_poll_cq(dev); + if (spdk_unlikely(rc < 0)) { + SPDK_ERRLOG("Error %"PRId64" on CQ, dev %s\n", rc, dev->dev_ctx->context->device->name); + } + completions += rc; + accel_mlx5_flush_wrs(dev); + } + if (!TAILQ_EMPTY(&dev->nomem)) { + accel_mlx5_resubmit_nomem_tasks(dev); + } + } + + return !!completions; +} + +static bool +accel_mlx5_supports_opcode(enum accel_opcode opc) +{ + assert(g_accel_mlx5.enabled); + + switch (opc) { + case ACCEL_OPC_ENCRYPT: + case ACCEL_OPC_DECRYPT: + return true; + default: + return false; + } +} + +static struct spdk_io_channel * +accel_mlx5_get_io_channel(void) +{ + assert(g_accel_mlx5.enabled); + return spdk_get_io_channel(&g_accel_mlx5); +} + +static void +accel_mlx5_qp_destroy(struct accel_mlx5_qp *qp) +{ + if (!qp) { + return; + } + + if (qp->qp) { + ibv_destroy_qp(qp->qp); + qp->qp = NULL; + } + + free(qp); +} + +static struct accel_mlx5_qp * +accel_mlx5_qp_create(struct ibv_cq *cq, struct accel_mlx5_io_channel *ch, struct ibv_pd *pd, + int qp_size) +{ + struct accel_mlx5_qp *qp; + struct ibv_qp_init_attr_ex dv_qp_attr = { + .qp_context = ch, + .cap = { + .max_send_wr = qp_size, + .max_recv_wr = 0, + .max_send_sge = ACCEL_MLX5_MAX_SGE, + .max_inline_data = sizeof(struct ibv_sge) * ACCEL_MLX5_MAX_SGE, + }, + .qp_type = IBV_QPT_RC, + .comp_mask = IBV_QP_INIT_ATTR_PD | IBV_QP_INIT_ATTR_SEND_OPS_FLAGS, + .pd = pd, + .send_ops_flags = IBV_QP_EX_WITH_RDMA_WRITE | IBV_QP_EX_WITH_SEND | IBV_QP_EX_WITH_RDMA_READ | IBV_QP_EX_WITH_BIND_MW, + .send_cq = cq, + .recv_cq = cq, + }; + /* Attrs required for MKEYs registration */ + struct mlx5dv_qp_init_attr mlx5_qp_attr = { + .comp_mask = MLX5DV_QP_INIT_ATTR_MASK_SEND_OPS_FLAGS, + .send_ops_flags = MLX5DV_QP_EX_WITH_MKEY_CONFIGURE + }; + int rc; + + if (!dv_qp_attr.send_cq || !dv_qp_attr.recv_cq) { + return NULL; + } + + qp = calloc(1, sizeof(*qp)); + if (!qp) { + return NULL; + } + + qp->qp = mlx5dv_create_qp(cq->context, &dv_qp_attr, &mlx5_qp_attr); + if (!qp->qp) { + SPDK_ERRLOG("Failed to create qpair, errno %s (%d)\n", spdk_strerror(errno), errno); + free(qp); + return NULL; + } + + rc = mlx5_qp_init_2_rts(qp->qp, qp->qp->qp_num); + if (rc) { + SPDK_ERRLOG("Failed to create loopback connection, qp_num %u\n", qp->qp->qp_num); + accel_mlx5_qp_destroy(qp); + return NULL; + } + + qp->qpex = ibv_qp_to_qp_ex(qp->qp); + if (!qp->qpex) { + SPDK_ERRLOG("Failed to get qpex\n"); + accel_mlx5_qp_destroy(qp); + return NULL; + } + + qp->mqpx = mlx5dv_qp_ex_from_ibv_qp_ex(qp->qpex); + if (!qp->mqpx) { + SPDK_ERRLOG("Failed to get mqpx\n"); + accel_mlx5_qp_destroy(qp); + return NULL; + } + + qp->num_reqs = qp_size; + qp->cq = cq; + + return qp; +} + +static void +accel_mlx5_destroy_cb(void *io_device, void *ctx_buf) +{ + struct accel_mlx5_io_channel *ch = ctx_buf; + struct accel_mlx5_dev *dev; + uint32_t i; + + spdk_poller_unregister(&ch->poller); + for (i = 0; i < ch->num_devs; i++) { + dev = &ch->devs[i]; + accel_mlx5_qp_destroy(dev->qp); + if (dev->cq) { + ibv_destroy_cq(dev->cq); + dev->cq = NULL; + } + spdk_rdma_free_mem_map(&dev->mmap); + } + free(ch->devs); +} + +static int +accel_mlx5_create_cb(void *io_device, void *ctx_buf) +{ + struct accel_mlx5_io_channel *ch = ctx_buf; + struct accel_mlx5_crypto_dev_ctx *dev_ctx; + struct accel_mlx5_dev *dev; + uint32_t i; + int rc; + + ch->devs = calloc(g_accel_mlx5.num_crypto_ctxs, sizeof(*ch->devs)); + if (!ch->devs) { + SPDK_ERRLOG("Memory allocation failed\n"); + return -ENOMEM; + } + + for (i = 0; i < g_accel_mlx5.num_crypto_ctxs; i++) { + dev_ctx = &g_accel_mlx5.crypto_ctxs[i]; + dev = &ch->devs[i]; + dev->dev_ctx = dev_ctx; + ch->num_devs++; + dev->cq = ibv_create_cq(dev_ctx->context, g_accel_mlx5.attr.qp_size, ch, NULL, 0); + if (!dev->cq) { + SPDK_ERRLOG("Failed to create CQ on dev %s\n", dev_ctx->context->device->name); + rc = -ENOMEM; + goto err_out; + } + + dev->qp = accel_mlx5_qp_create(dev->cq, ch, dev_ctx->pd, g_accel_mlx5.attr.qp_size); + if (!dev->qp) { + SPDK_ERRLOG("Failed to create QP on dev %s\n", dev_ctx->context->device->name); + rc = -ENOMEM; + goto err_out; + } + + TAILQ_INIT(&dev->nomem); + TAILQ_INIT(&dev->in_hw); + TAILQ_INIT(&dev->before_submit); + /* Each request consumes 2 WQE - MKEY and RDMA_WRITE. MKEY is unsignaled, so we count only RDMA_WRITE completions. + * Divide user defined qp_size by two for simplicity */ + dev->max_reqs = g_accel_mlx5.attr.qp_size / 2; + dev->mmap = spdk_rdma_create_mem_map(dev_ctx->pd, NULL, SPDK_RDMA_MEMORY_MAP_ROLE_INITIATOR); + if (!dev->mmap) { + SPDK_ERRLOG("Failed to create memory map\n"); + accel_mlx5_qp_destroy(dev->qp); + return -ENOMEM; + } + } + + ch->poller = SPDK_POLLER_REGISTER(accel_mlx5_poller, ch, 0); + + return 0; + +err_out: + accel_mlx5_destroy_cb(&g_accel_mlx5, ctx_buf); + return rc; +} + +void +accel_mlx5_get_default_attr(struct accel_mlx5_attr *attr) +{ + assert(attr); + + attr->qp_size = ACCEL_MLX5_QP_SIZE; + attr->num_requests = ACCEL_MLX5_NUM_REQUESTS; +} + +int +accel_mlx5_enable(struct accel_mlx5_attr *attr) +{ + if (g_accel_mlx5.enabled) { + return -EEXIST; + } + if (attr) { + g_accel_mlx5.attr = *attr; + } else { + accel_mlx5_get_default_attr(&g_accel_mlx5.attr); + } + + g_accel_mlx5.enabled = true; + spdk_accel_module_list_add(&g_accel_mlx5.module); + + return 0; +} + +static void +accel_mlx5_release_crypto_req(struct spdk_mempool *mp, void *cb_arg, void *_req, unsigned obj_idx) +{ + struct accel_mlx5_req *req = _req; + + if (req->mkey) { + mlx5dv_destroy_mkey(req->mkey); + } +} + + +static void +accel_mlx5_release_reqs(struct accel_mlx5_crypto_dev_ctx *dev_ctx) +{ + if (!dev_ctx->requests_pool) { + return; + } + + spdk_mempool_obj_iter(dev_ctx->requests_pool, accel_mlx5_release_crypto_req, NULL); +} + +static void +accel_mlx5_free_resources(void) +{ + uint32_t i; + + for (i = 0; i < g_accel_mlx5.num_crypto_ctxs; i++) { + accel_mlx5_release_reqs(&g_accel_mlx5.crypto_ctxs[i]); + spdk_rdma_put_pd(g_accel_mlx5.crypto_ctxs[i].pd); + } + + free(g_accel_mlx5.crypto_ctxs); + g_accel_mlx5.crypto_ctxs = NULL; +} + +static void +accel_mlx5_deinit_cb(void *ctx) +{ + accel_mlx5_free_resources(); + spdk_accel_module_finish(); +} + +static void +accel_mlx5_deinit(void *ctx) +{ + if (g_accel_mlx5.crypto_ctxs) { + spdk_io_device_unregister(&g_accel_mlx5, accel_mlx5_deinit_cb); + } else { + spdk_accel_module_finish(); + } +} + +static void +accel_mlx5_configure_crypto_req(struct spdk_mempool *mp, void *cb_arg, void *_req, unsigned obj_idx) +{ + struct accel_mlx5_req *req = _req; + struct accel_mlx5_req_init_ctx *ctx = cb_arg; + struct mlx5dv_mkey_init_attr mkey_attr = { + .pd = ctx->pd, + .max_entries = ACCEL_MLX5_MAX_SGE, /* This MKEY refers to N base MKEYs/buffers */ + .create_flags = MLX5DV_MKEY_INIT_ATTR_FLAGS_INDIRECT | /* This MKEY refers to another MKEYs */ + MLX5DV_MKEY_INIT_ATTR_FLAGS_CRYPTO + }; + + memset(req, 0, sizeof(*req)); + if (ctx->rc) { + return; + } + + req->mkey = mlx5dv_create_mkey(&mkey_attr); + if (!req->mkey) { + SPDK_ERRLOG("Failed to create mkey on dev %s, errno %d\n", ctx->pd->context->device->name, errno); + ctx->rc = errno; + return; + } + + req->mkey_wrid.wrid = ACCEL_MLX5_WRID_MKEY; + req->write_wrid.wrid = ACCEL_MLX5_WRID_WRITE; +} + +static int +accel_mlx5_crypto_ctx_mempool_create(struct accel_mlx5_crypto_dev_ctx *crypto_dev_ctx, + size_t num_entries) +{ + struct accel_mlx5_req_init_ctx init_ctx = {.pd = crypto_dev_ctx->pd }; + char pool_name[32]; + int rc; + + /* Compiler may produce a warning like + * warning: ā€˜%s’ directive output may be truncated writing up to 63 bytes into a region of size 21 + * [-Wformat-truncation=] + * That is expected and that is due to ibv device name is 64 bytes while DPDK mempool API allows + * name to be max 32 bytes. + * To suppress this warning check the value returned by snprintf */ + rc = snprintf(pool_name, 32, "accel_mlx5_%s", crypto_dev_ctx->context->device->name); + if (rc < 0) { + assert(0); + return -EINVAL; + } + crypto_dev_ctx->requests_pool = spdk_mempool_create_ctor(pool_name, num_entries, + sizeof(struct accel_mlx5_req), + SPDK_MEMPOOL_DEFAULT_CACHE_SIZE, SPDK_ENV_SOCKET_ID_ANY, + accel_mlx5_configure_crypto_req, &init_ctx); + if (!crypto_dev_ctx->requests_pool || init_ctx.rc) { + SPDK_ERRLOG("Failed to create memory pool\n"); + return init_ctx.rc ? : -ENOMEM; + } + + return 0; +} + +static int +accel_mlx5_init(void) +{ + struct accel_mlx5_crypto_dev_ctx *crypto_dev_ctx; + struct ibv_context **rdma_devs, *dev; + struct ibv_pd *pd; + int num_devs = 0, rc = 0, i; + + if (!g_accel_mlx5.enabled) { + return -EINVAL; + } + + rdma_devs = spdk_mlx5_crypto_devs_get(&num_devs); + if (!rdma_devs || !num_devs) { + SPDK_NOTICELOG("No crypto devs found\n"); + return -ENOTSUP; + } + + g_accel_mlx5.crypto_ctxs = calloc(num_devs, sizeof(*g_accel_mlx5.crypto_ctxs)); + if (!g_accel_mlx5.crypto_ctxs) { + SPDK_ERRLOG("Memory allocation failed\n"); + rc = -ENOMEM; + goto cleanup; + } + + for (i = 0; i < num_devs; i++) { + crypto_dev_ctx = &g_accel_mlx5.crypto_ctxs[i]; + dev = rdma_devs[i]; + pd = spdk_rdma_get_pd(dev); + if (!pd) { + SPDK_ERRLOG("Failed to get PD for context %p, dev %s\n", dev, dev->device->name); + rc = -EINVAL; + goto cleanup; + } + crypto_dev_ctx->context = dev; + crypto_dev_ctx->pd = pd; + g_accel_mlx5.num_crypto_ctxs++; + rc = accel_mlx5_crypto_ctx_mempool_create(crypto_dev_ctx, g_accel_mlx5.attr.num_requests); + if (rc) { + goto cleanup; + } + } + + SPDK_NOTICELOG("Accel framework mlx5 initialized, found %d devices.\n", num_devs); + spdk_io_device_register(&g_accel_mlx5, accel_mlx5_create_cb, accel_mlx5_destroy_cb, + sizeof(struct accel_mlx5_io_channel), "accel_mlx5"); + + spdk_mlx5_crypto_devs_release(rdma_devs); + + return rc; + +cleanup: + spdk_mlx5_crypto_devs_release(rdma_devs); + accel_mlx5_free_resources(); + + return rc; +} + +static void +accel_mlx5_write_config_json(struct spdk_json_write_ctx *w) +{ + if (g_accel_mlx5.enabled) { + spdk_json_write_object_begin(w); + spdk_json_write_named_string(w, "method", "mlx5_scan_accel_module"); + spdk_json_write_named_object_begin(w, "params"); + spdk_json_write_named_uint16(w, "qp_size", g_accel_mlx5.attr.qp_size); + spdk_json_write_named_uint32(w, "num_requests", g_accel_mlx5.attr.num_requests); + spdk_json_write_object_end(w); + spdk_json_write_object_end(w); + } +} + +static size_t +accel_mlx5_get_ctx_size(void) +{ + return sizeof(struct accel_mlx5_task); +} + +static int +accel_mlx5_crypto_key_init(struct spdk_accel_crypto_key *key) +{ + struct spdk_mlx5_crypto_dek_create_attr attr = {}; + struct spdk_mlx5_crypto_keytag *keytag; + int rc; + + if (!key || !key->key || !key->key2 || !key->key_size || !key->key2_size) { + return -EINVAL; + } + + attr.dek = calloc(1, key->key_size + key->key2_size); + if (!attr.dek) { + return -ENOMEM; + } + + memcpy(attr.dek, key->key, key->key_size); + memcpy(attr.dek + key->key_size, key->key2, key->key2_size); + attr.dek_len = key->key_size + key->key2_size; + + rc = spdk_mlx5_crypto_keytag_create(&attr, &keytag); + spdk_memset_s(attr.dek, attr.dek_len, 0, attr.dek_len); + free(attr.dek); + if (rc) { + SPDK_ERRLOG("Failed to create a keytag, rc %d\n", rc); + return rc; + } + + key->priv = keytag; + + return 0; +} + +static void +accel_mlx5_crypto_key_deinit(struct spdk_accel_crypto_key *key) +{ + if (!key || key->module_if != &g_accel_mlx5.module || !key->priv) { + return; + } + + spdk_mlx5_crypto_keytag_destroy(key->priv); +} + +static struct accel_mlx5_module g_accel_mlx5 = { + .module = { + .module_init = accel_mlx5_init, + .module_fini = accel_mlx5_deinit, + .write_config_json = accel_mlx5_write_config_json, + .get_ctx_size = accel_mlx5_get_ctx_size, + .name = "mlx5", + .supports_opcode = accel_mlx5_supports_opcode, + .get_io_channel = accel_mlx5_get_io_channel, + .submit_tasks = accel_mlx5_submit_tasks, + .crypto_key_init = accel_mlx5_crypto_key_init, + .crypto_key_deinit = accel_mlx5_crypto_key_deinit, + } +}; + +SPDK_LOG_REGISTER_COMPONENT(accel_mlx5) diff --git a/module/accel/mlx5/accel_mlx5.h b/module/accel/mlx5/accel_mlx5.h new file mode 100644 index 000000000..028a0eb07 --- /dev/null +++ b/module/accel/mlx5/accel_mlx5.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#include "spdk/stdinc.h" + +struct accel_mlx5_attr { + /* The number of entries in qp submission/receive queue */ + uint16_t qp_size; + /* The number of requests in the global pool */ + uint32_t num_requests; +}; + +void accel_mlx5_get_default_attr(struct accel_mlx5_attr *attr); +int accel_mlx5_enable(struct accel_mlx5_attr *attr); diff --git a/module/accel/mlx5/accel_mlx5_rpc.c b/module/accel/mlx5/accel_mlx5_rpc.c new file mode 100644 index 000000000..156cd7098 --- /dev/null +++ b/module/accel/mlx5/accel_mlx5_rpc.c @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#include "spdk/rpc.h" +#include "spdk/util.h" +#include "spdk/log.h" + +#include "accel_mlx5.h" + +static const struct spdk_json_object_decoder rpc_mlx5_module_decoder[] = { + {"qp_size", offsetof(struct accel_mlx5_attr, qp_size), spdk_json_decode_uint16, true}, + {"num_requests", offsetof(struct accel_mlx5_attr, num_requests), spdk_json_decode_uint32, true}, +}; + +static void +rpc_mlx5_scan_accel_module(struct spdk_jsonrpc_request *request, + const struct spdk_json_val *params) +{ + struct accel_mlx5_attr attr; + int rc; + + accel_mlx5_get_default_attr(&attr); + + if (params != NULL) { + if (spdk_json_decode_object(params, rpc_mlx5_module_decoder, + SPDK_COUNTOF(rpc_mlx5_module_decoder), + &attr)) { + SPDK_ERRLOG("spdk_json_decode_object() failed\n"); + spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_PARSE_ERROR, + "spdk_json_decode_object failed"); + return; + } + } + + rc = accel_mlx5_enable(&attr); + if (rc) { + spdk_jsonrpc_send_error_response_fmt(request, rc, "mlx5 scan failed with %d\n", rc); + } else { + spdk_jsonrpc_send_bool_response(request, true); + } +} +SPDK_RPC_REGISTER("mlx5_scan_accel_module", rpc_mlx5_scan_accel_module, SPDK_RPC_STARTUP) diff --git a/python/spdk/rpc/__init__.py b/python/spdk/rpc/__init__.py index 471ffe292..836f76176 100644 --- a/python/spdk/rpc/__init__.py +++ b/python/spdk/rpc/__init__.py @@ -33,6 +33,7 @@ from . import sock from . import vfio_user from . import iobuf from . import dpdk_cryptodev +from . import mlx5 from . import client as rpc_client diff --git a/python/spdk/rpc/mlx5.py b/python/spdk/rpc/mlx5.py new file mode 100644 index 000000000..2c1424db1 --- /dev/null +++ b/python/spdk/rpc/mlx5.py @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. + +from spdk.rpc.helpers import deprecated_alias + + +def mlx5_scan_accel_module(client, qp_size=None, num_requests=None): + """Enable mlx5 accel module. Scans all mlx5 devices which can perform needed operations + + Args: + qp_size: Qpair size. (optional) + num_requests: size of a global requests pool per mlx5 device (optional) + """ + params = {} + + if qp_size is not None: + params['qp_size'] = qp_size + if num_requests is not None: + params['num_requests'] = num_requests + return client.call('mlx5_scan_accel_module', params) diff --git a/scripts/rpc.py b/scripts/rpc.py index 6a3a36d47..fa6483f64 100755 --- a/scripts/rpc.py +++ b/scripts/rpc.py @@ -2908,6 +2908,17 @@ Format: 'user:u1 secret:s1 muser:mu1 msecret:ms1,user:u2 secret:s2 muser:mu2 mse p = subparsers.add_parser('dpdk_cryptodev_get_driver', help='Get the DPDK cryptodev driver') p.set_defaults(func=dpdk_cryptodev_get_driver) + # mlx5 + def mlx5_scan_accel_module(args): + rpc.mlx5.mlx5_scan_accel_module(args.client, + qp_size=args.qp_size, + num_requests=args.num_requests) + + p = subparsers.add_parser('mlx5_scan_accel_module', help='Enable mlx5 accel module.') + p.add_argument('-q', '--qp-size', type=int, help='QP size') + p.add_argument('-r', '--num-requests', type=int, help='Size of the shared requests pool') + p.set_defaults(func=mlx5_scan_accel_module) + # opal def bdev_nvme_opal_init(args): rpc.nvme.bdev_nvme_opal_init(args.client, diff --git a/test/common/skipped_build_files.txt b/test/common/skipped_build_files.txt index 08842fad5..609a48716 100644 --- a/test/common/skipped_build_files.txt +++ b/test/common/skipped_build_files.txt @@ -56,3 +56,8 @@ module/bdev/daos/bdev_daos_rpc # Not configured to test xNVMe bdev module/bdev/xnvme/bdev_xnvme module/bdev/xnvme/bdev_xnvme_rpc + +# Not configured to test mlx5 accel module & crypto lib +module/accel/mlx5/accel_mlx5 +module/accel/mlx5/accel_mlx5_rpc +lib/mlx5/mlx5_crypto