diff --git a/CHANGELOG.md b/CHANGELOG.md index 85882b25e..8029678c9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,9 @@ New library isa-l-crypto has been added, it is used by accel library in crypto o New functions `spdk_accel_submit_encrypt` and `spdk_accel_submit_decrypt` were added. +New accel module `dpdk_cryptodev` has been added. It uses DPDK crypto PMD and support encrypt and +decrypt operations. New RPC `dpdk_cryptodev_scan_accel_module` has been added to enable this accel module. + ### bdev Both of interleaved and separated metadata are now supported by the malloc bdev module. diff --git a/doc/accel_fw.md b/doc/accel_fw.md index 9ab9d9d85..e2be9e845 100644 --- a/doc/accel_fw.md +++ b/doc/accel_fw.md @@ -86,7 +86,7 @@ The DSA hardware supports a limited queue depth and channels. This means that only a limited number of `spdk_thread`s will be able to acquire a channel. Design software to deal with the inability to get a channel. -### How to use kernel idxd driver {#accel_idxd_kernel} +#### How to use kernel idxd driver {#accel_idxd_kernel} There are several dependencies to leverage the Linux idxd driver for driving DSA devices. @@ -139,6 +139,22 @@ enabled via startup RPC as discussed earlier, the software module will use ISA-L if available for functions such as CRC32C. Otherwise, standard glibc calls are used to back the framework API. +### dpdk_cryptodev {#accel_dpdk_cryptodev} + +The dpdk_cryptodev module uses DPDK CryptoDev API to implement crypto operations. +The following ciphers and PMDs are supported: + +- AESN-NI Multi Buffer Crypto Poll Mode Driver: RTE_CRYPTO_CIPHER_AES128_CBC +- Intel(R) QuickAssist (QAT) Crypto Poll Mode Driver: RTE_CRYPTO_CIPHER_AES128_CBC, + RTE_CRYPTO_CIPHER_AES128_XTS + (Note: QAT is functional however is marked as experimental until the hardware has + been fully integrated with the SPDK CI system.) +- MLX5 Crypto Poll Mode Driver: RTE_CRYPTO_CIPHER_AES256_XTS, RTE_CRYPTO_CIPHER_AES512_XTS + +To enable this module, use [`dpdk_cryptodev_scan_accel_module`](https://spdk.io/doc/jsonrpc.html), +this RPC is available in STARTUP state and the SPDK application needs to be run with `--wait-for-rpc` +CLI parameter. To select a specific PMD, use [`dpdk_cryptodev_set_driver`](https://spdk.io/doc/jsonrpc.html) + ### Module to Operation Code Assignment {#accel_assignments} When multiple modules are initialized, the accel framework will assign op codes to diff --git a/doc/jsonrpc.md b/doc/jsonrpc.md index a52d08acd..52e8096a5 100644 --- a/doc/jsonrpc.md +++ b/doc/jsonrpc.md @@ -446,6 +446,9 @@ Example response: "accel_crypto_keys_get", "ioat_scan_accel_module", "dsa_scan_accel_module", + "dpdk_cryptodev_scan_accel_module", + "dpdk_cryptodev_set_driver", + "dpdk_cryptodev_get_driver", "bdev_virtio_attach_controller", "bdev_virtio_scsi_get_devices", "bdev_virtio_detach_controller", @@ -1961,6 +1964,101 @@ Example response: } ~~~ +### dpdk_cryptodev_scan_accel_module {#rpc_dpdk_cryptodev_scan_accel_module} + +Enable dpdk_cryptodev accel offload + +#### Parameters + +None + +#### Example + +Example request: + +~~~json +{ + "jsonrpc": "2.0", + "method": "dpdk_cryptodev_scan_accel_module", + "id": 1 +} +~~~ + +Example response: + +~~~json +{ + "jsonrpc": "2.0", + "id": 1, + "result": true +} +~~~ + +### dpdk_cryptodev_set_driver {#rpc_dpdk_cryptodev_set_driver} + +Set the DPDK cryptodev driver + +#### Parameters + +Name | Optional | Type | Description +----------------------- |----------|--------| ----------- +driver_name | Required | string | The driver, can be one of crypto_aesni_mb, crypto_qat or mlx5_pci + +#### Example + +Example request: + +~~~json +{ + "jsonrpc": "2.0", + "method": "dpdk_cryptodev_set_driver", + "id": 1, + "params": { + "driver_name": "crypto_aesni_mb" + } +} +~~~ + +Example response: + +~~~json +{ + "jsonrpc": "2.0", + "id": 1, + "result": true +} +~~~ + +### dpdk_cryptodev_get_driver {#rpc_dpdk_cryptodev_get_driver} + +Get the DPDK cryptodev driver + +#### Parameters + +None + +#### Example + +Example request: + +~~~json +{ + "jsonrpc": "2.0", + "method": "dpdk_cryptodev_get_driver", + "id": 1 +} +~~~ + +Example response: + +~~~json +{ + "jsonrpc": "2.0", + "id": 1, + "result": "crypto_aesni_mb" +} +~~~ + ## Block Device Abstraction Layer {#jsonrpc_components_bdev} ### bdev_set_options {#rpc_bdev_set_options} diff --git a/mk/spdk.lib_deps.mk b/mk/spdk.lib_deps.mk index 48cbea439..4ad25f174 100644 --- a/mk/spdk.lib_deps.mk +++ b/mk/spdk.lib_deps.mk @@ -1,5 +1,6 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright (C) 2015 Intel Corporation. +# Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES # All rights reserved. # @@ -96,6 +97,7 @@ endif DEPDIRS-accel_ioat := log ioat thread jsonrpc rpc accel DEPDIRS-accel_dsa := log idxd thread $(JSON_LIBS) accel trace DEPDIRS-accel_iaa := log idxd thread $(JSON_LIBS) accel trace +DEPDIRS-accel_dpdk_cryptodev := log thread $(JSON_LIBS) accel # module/env_dpdk DEPDIRS-env_dpdk_rpc := log $(JSON_LIBS) diff --git a/mk/spdk.modules.mk b/mk/spdk.modules.mk index fef20914e..0ecf4fb9c 100644 --- a/mk/spdk.modules.mk +++ b/mk/spdk.modules.mk @@ -1,7 +1,7 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright (C) 2016 Intel Corporation. +# Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. # All rights reserved. -# Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # BLOCKDEV_MODULES_LIST = bdev_malloc bdev_null bdev_nvme bdev_passthru bdev_lvol @@ -101,6 +101,9 @@ ACCEL_MODULES_LIST = accel_ioat ioat ifeq ($(CONFIG_IDXD),y) ACCEL_MODULES_LIST += accel_dsa accel_iaa idxd endif +ifeq ($(CONFIG_CRYPTO),y) +ACCEL_MODULES_LIST += accel_dpdk_cryptodev +endif SCHEDULER_MODULES_LIST = scheduler_dynamic ifeq (y,$(DPDK_POWER)) diff --git a/module/accel/Makefile b/module/accel/Makefile index 56ea19d4a..1360c2204 100644 --- a/module/accel/Makefile +++ b/module/accel/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright (C) 2015 Intel Corporation. +# Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES # All rights reserved. # @@ -10,6 +11,7 @@ DIRS-y = ioat DIRS-$(CONFIG_IDXD) += dsa DIRS-$(CONFIG_IDXD) += iaa +DIRS-$(CONFIG_CRYPTO) += dpdk_cryptodev .PHONY: all clean $(DIRS-y) diff --git a/module/accel/dpdk_cryptodev/Makefile b/module/accel/dpdk_cryptodev/Makefile new file mode 100644 index 000000000..666b73ad2 --- /dev/null +++ b/module/accel/dpdk_cryptodev/Makefile @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright (c) Intel Corporation. +# Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES +# All rights reserved. +# + +SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..) +include $(SPDK_ROOT_DIR)/mk/spdk.common.mk + +SO_VER := 1 +SO_MINOR := 0 + +CFLAGS += $(ENV_CFLAGS) + +LIBNAME = accel_dpdk_cryptodev +C_SRCS = accel_dpdk_cryptodev.c accel_dpdk_cryptodev_rpc.c + +SPDK_MAP_FILE = $(SPDK_ROOT_DIR)/mk/spdk_blank.map + +include $(SPDK_ROOT_DIR)/mk/spdk.lib.mk diff --git a/module/accel/dpdk_cryptodev/accel_dpdk_cryptodev.c b/module/accel/dpdk_cryptodev/accel_dpdk_cryptodev.c index fa52169aa..9a7be552e 100644 --- a/module/accel/dpdk_cryptodev/accel_dpdk_cryptodev.c +++ b/module/accel/dpdk_cryptodev/accel_dpdk_cryptodev.c @@ -1,231 +1,930 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright (C) 2018 Intel Corporation. - * All rights reserved. * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. * All rights reserved. */ -#include "vbdev_crypto.h" +#include "accel_dpdk_cryptodev.h" +#include "spdk/accel.h" +#include "spdk_internal/accel_module.h" #include "spdk/env.h" #include "spdk/likely.h" -#include "spdk/endian.h" #include "spdk/thread.h" -#include "spdk/bdev_module.h" +#include "spdk/util.h" #include "spdk/log.h" -#include "spdk/hexlify.h" +#include "spdk/json.h" +#include "spdk_internal/sgl.h" -#include #include #include #include #include -#include - -/* Used to store IO context in mbuf */ -static const struct rte_mbuf_dynfield rte_mbuf_dynfield_io_context = { - .name = "context_bdev_io", - .size = sizeof(uint64_t), - .align = __alignof__(uint64_t), - .flags = 0, -}; -static int g_mbuf_offset; - -/* To add support for new device types, follow the examples of the following... - * Note that the string names are defined by the DPDK PMD in question so be - * sure to use the exact names. - */ -#define MAX_NUM_DRV_TYPES 3 /* The VF spread is the number of queue pairs between virtual functions, we use this to * load balance the QAT device. */ -#define QAT_VF_SPREAD 32 -static uint8_t g_qat_total_qp = 0; -static uint8_t g_next_qat_index; +#define ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD 32 -const char *g_driver_names[MAX_NUM_DRV_TYPES] = { AESNI_MB, QAT, MLX5 }; - -/* Global list of available crypto devices. */ -struct vbdev_dev { - struct rte_cryptodev_info cdev_info; /* includes device friendly name */ - uint8_t cdev_id; /* identifier for the device */ - TAILQ_ENTRY(vbdev_dev) link; -}; -static TAILQ_HEAD(, vbdev_dev) g_vbdev_devs = TAILQ_HEAD_INITIALIZER(g_vbdev_devs); - -/* Global list and lock for unique device/queue pair combos. We keep 1 list per supported PMD - * so that we can optimize per PMD where it make sense. For example, with QAT there an optimal - * pattern for assigning queue pairs where with AESNI there is not. - */ -struct device_qp { - struct vbdev_dev *device; /* ptr to crypto device */ - uint8_t qp; /* queue pair for this node */ - bool in_use; /* whether this node is in use or not */ - uint8_t index; /* used by QAT to load balance placement of qpairs */ - TAILQ_ENTRY(device_qp) link; -}; -static TAILQ_HEAD(, device_qp) g_device_qp_qat = TAILQ_HEAD_INITIALIZER(g_device_qp_qat); -static TAILQ_HEAD(, device_qp) g_device_qp_aesni_mb = TAILQ_HEAD_INITIALIZER(g_device_qp_aesni_mb); -static TAILQ_HEAD(, device_qp) g_device_qp_mlx5 = TAILQ_HEAD_INITIALIZER(g_device_qp_mlx5); -static pthread_mutex_t g_device_qp_lock = PTHREAD_MUTEX_INITIALIZER; - - -/* In order to limit the number of resources we need to do one crypto - * operation per LBA (we use LBA as IV), we tell the bdev layer that - * our max IO size is something reasonable. Units here are in bytes. - */ -#define CRYPTO_MAX_IO (64 * 1024) +/* Max length in byte of a crypto operation */ +#define ACCEL_DPDK_CRYPTODEV_CRYPTO_MAX_IO (64 * 1024) /* This controls how many ops will be dequeued from the crypto driver in one run * of the poller. It is mainly a performance knob as it effectively determines how * much work the poller has to do. However even that can vary between crypto drivers - * as the AESNI_MB driver for example does all the crypto work on dequeue whereas the + * as the ACCEL_DPDK_CRYPTODEV_AESNI_MB driver for example does all the crypto work on dequeue whereas the * QAT driver just dequeues what has been completed already. */ -#define MAX_DEQUEUE_BURST_SIZE 64 +#define ACCEL_DPDK_CRYPTODEV_MAX_DEQUEUE_BURST_SIZE 64 /* When enqueueing, we need to supply the crypto driver with an array of pointers to - * operation structs. As each of these can be max 512B, we can adjust the CRYPTO_MAX_IO + * operation structs. As each of these can be max 512B, we can adjust the ACCEL_DPDK_CRYPTODEV_CRYPTO_MAX_IO * value in conjunction with the other defines to make sure we're not using crazy amounts * of memory. All of these numbers can and probably should be adjusted based on the * workload. By default we'll use the worst case (smallest) block size for the - * minimum number of array entries. As an example, a CRYPTO_MAX_IO size of 64K with 512B + * minimum number of array entries. As an example, a ACCEL_DPDK_CRYPTODEV_CRYPTO_MAX_IO size of 64K with 512B * blocks would give us an enqueue array size of 128. */ -#define MAX_ENQUEUE_ARRAY_SIZE (CRYPTO_MAX_IO / 512) +#define ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE (ACCEL_DPDK_CRYPTODEV_CRYPTO_MAX_IO / 512) /* The number of MBUFS we need must be a power of two and to support other small IOs * in addition to the limits mentioned above, we go to the next power of two. It is * big number because it is one mempool for source and destination mbufs. It may * need to be bigger to support multiple crypto drivers at once. */ -#define NUM_MBUFS 32768 -#define POOL_CACHE_SIZE 256 -#define MAX_CRYPTO_VOLUMES 128 -#define NUM_SESSIONS (2 * MAX_CRYPTO_VOLUMES) -#define SESS_MEMPOOL_CACHE_SIZE 0 -uint8_t g_number_of_claimed_volumes = 0; +#define ACCEL_DPDK_CRYPTODEV_NUM_MBUFS 32768 +#define ACCEL_DPDK_CRYPTODEV_POOL_CACHE_SIZE 256 +#define ACCEL_DPDK_CRYPTODEV_MAX_CRYPTO_VOLUMES 128 +#define ACCEL_DPDK_CRYPTODEV_NUM_SESSIONS (2 * ACCEL_DPDK_CRYPTODEV_MAX_CRYPTO_VOLUMES) +#define ACCEL_DPDK_CRYPTODEV_SESS_MEMPOOL_CACHE_SIZE 0 /* This is the max number of IOs we can supply to any crypto device QP at one time. * It can vary between drivers. */ -#define CRYPTO_QP_DESCRIPTORS 2048 +#define ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS 2048 /* At this moment DPDK descriptors allocation for mlx5 has some issues. We use 512 - * as an compromise value between performance and the time spent for initialization. */ -#define CRYPTO_QP_DESCRIPTORS_MLX5 512 + * as a compromise value between performance and the time spent for initialization. */ +#define ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS_MLX5 512 -#define AESNI_MB_NUM_QP 64 +#define ACCEL_DPDK_CRYPTODEV_AESNI_MB_NUM_QP 64 -/* Common for supported devices. */ -#define DEFAULT_NUM_XFORMS 2 -#define IV_OFFSET (sizeof(struct rte_crypto_op) + \ +/* Common for suported devices. */ +#define ACCEL_DPDK_CRYPTODEV_DEFAULT_NUM_XFORMS 2 +#define ACCEL_DPDK_CRYPTODEV_IV_OFFSET (sizeof(struct rte_crypto_op) + \ sizeof(struct rte_crypto_sym_op) + \ - (DEFAULT_NUM_XFORMS * \ + (ACCEL_DPDK_CRYPTODEV_DEFAULT_NUM_XFORMS * \ sizeof(struct rte_crypto_sym_xform))) -#define IV_LENGTH 16 -#define QUEUED_OP_OFFSET (IV_OFFSET + IV_LENGTH) +#define ACCEL_DPDK_CRYPTODEV_IV_LENGTH 16 +#define ACCEL_DPDK_CRYPTODEV_QUEUED_OP_OFFSET (ACCEL_DPDK_CRYPTODEV_IV_OFFSET + ACCEL_DPDK_CRYPTODEV_IV_LENGTH) -static void _complete_internal_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg); -static void _complete_internal_read(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg); -static void _complete_internal_write(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg); -static void vbdev_crypto_examine(struct spdk_bdev *bdev); -static int vbdev_crypto_claim(const char *bdev_name); -static void vbdev_crypto_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io); +/* Driver names */ +#define ACCEL_DPDK_CRYPTODEV_AESNI_MB "crypto_aesni_mb" +#define ACCEL_DPDK_CRYPTODEV_QAT "crypto_qat" +#define ACCEL_DPDK_CRYPTODEV_QAT_ASYM "crypto_qat_asym" +#define ACCEL_DPDK_CRYPTODEV_MLX5 "mlx5_pci" -struct bdev_names { - struct vbdev_crypto_opts *opts; - TAILQ_ENTRY(bdev_names) link; +/* Supported ciphers */ +#define ACCEL_DPDK_CRYPTODEV_AES_CBC "AES_CBC" /* QAT and ACCEL_DPDK_CRYPTODEV_AESNI_MB */ +#define ACCEL_DPDK_CRYPTODEV_AES_XTS "AES_XTS" /* QAT and MLX5 */ + +/* Specific to AES_CBC. */ +#define ACCEL_DPDK_CRYPTODEV_AES_CBC_KEY_LENGTH 16 +#define ACCEL_DPDK_CRYPTODEV_AES_XTS_128_BLOCK_KEY_LENGTH 16 /* AES-XTS-128 block key size. */ +#define ACCEL_DPDK_CRYPTODEV_AES_XTS_256_BLOCK_KEY_LENGTH 32 /* AES-XTS-256 block key size. */ +#define ACCEL_DPDK_CRYPTODEV_AES_XTS_512_BLOCK_KEY_LENGTH 64 /* AES-XTS-512 block key size. */ + +#define ACCEL_DPDK_CRYPTODEV_AES_XTS_TWEAK_KEY_LENGTH 16 /* XTS part key size is always 128 bit. */ + +/* Used to store IO context in mbuf */ +static const struct rte_mbuf_dynfield rte_mbuf_dynfield_io_context = { + .name = "context_accel_dpdk_cryptodev", + .size = sizeof(uint64_t), + .align = __alignof__(uint64_t), + .flags = 0, }; -/* List of crypto_bdev names and their base bdevs via configuration file. */ -static TAILQ_HEAD(, bdev_names) g_bdev_names = TAILQ_HEAD_INITIALIZER(g_bdev_names); +struct accel_dpdk_cryptodev_device; -struct vbdev_crypto { - struct spdk_bdev *base_bdev; /* the thing we're attaching to */ - struct spdk_bdev_desc *base_desc; /* its descriptor we get from open */ - struct spdk_bdev crypto_bdev; /* the crypto virtual bdev */ - struct vbdev_crypto_opts *opts; /* crypto options such as key, cipher */ - uint32_t qp_desc_nr; /* number of qp descriptors */ - void *session_encrypt; /* encryption session for this bdev */ - void *session_decrypt; /* decryption session for this bdev */ - struct rte_crypto_sym_xform cipher_xform; /* crypto control struct for this bdev */ - TAILQ_ENTRY(vbdev_crypto) link; - struct spdk_thread *thread; /* thread where base device is opened */ +enum accel_dpdk_cryptodev_driver_type { + ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB = 0, + ACCEL_DPDK_CRYPTODEV_DRIVER_QAT, + ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI, + ACCEL_DPDK_CRYPTODEV_DRIVER_LAST }; -/* List of virtual bdevs and associated info for each. We keep the device friendly name here even - * though its also in the device struct because we use it early on. +enum accel_dpdk_crypto_dev_cipher_type { + ACCEL_DPDK_CRYPTODEV_CIPHER_AES_CBC, + ACCEL_DPDK_CRYPTODEV_CIPHER_AES_XTS +}; + +struct accel_dpdk_cryptodev_qp { + struct accel_dpdk_cryptodev_device *device; /* ptr to crypto device */ + uint32_t num_enqueued_ops; /* Used to decide whether to poll the qp or not */ + uint8_t qp; /* queue identifier */ + bool in_use; /* whether this node is in use or not */ + uint8_t index; /* used by QAT to load balance placement of qpairs */ + TAILQ_ENTRY(accel_dpdk_cryptodev_qp) link; +}; + +struct accel_dpdk_cryptodev_device { + enum accel_dpdk_cryptodev_driver_type type; + struct rte_cryptodev_info cdev_info; /* includes DPDK device friendly name */ + uint32_t qp_desc_nr; /* max number of qp descriptors to be enqueued in burst */ + uint8_t cdev_id; /* identifier for the device */ + TAILQ_HEAD(, accel_dpdk_cryptodev_qp) qpairs; + TAILQ_ENTRY(accel_dpdk_cryptodev_device) link; +}; + +struct accel_dpdk_cryptodev_key_handle { + struct accel_dpdk_cryptodev_device *device; + TAILQ_ENTRY(accel_dpdk_cryptodev_key_handle) link; + struct rte_cryptodev_sym_session *session_encrypt; /* encryption session for this key */ + struct rte_cryptodev_sym_session *session_decrypt; /* decryption session for this key */ + struct rte_crypto_sym_xform cipher_xform; /* crypto control struct for this key */ +}; + +struct accel_dpdk_cryptodev_key_priv { + enum accel_dpdk_cryptodev_driver_type driver; + enum accel_dpdk_crypto_dev_cipher_type cipher; + char *xts_key; + TAILQ_HEAD(, accel_dpdk_cryptodev_key_handle) dev_keys; +}; + +/* For queueing up crypto operations that we can't submit for some reason */ +struct accel_dpdk_cryptodev_queued_op { + struct accel_dpdk_cryptodev_qp *qp; + struct rte_crypto_op *crypto_op; + struct accel_dpdk_cryptodev_task *task; + TAILQ_ENTRY(accel_dpdk_cryptodev_queued_op) link; +}; +#define ACCEL_DPDK_CRYPTODEV_QUEUED_OP_LENGTH (sizeof(struct accel_dpdk_cryptodev_queued_op)) + +/* The crypto channel struct. It is allocated and freed on my behalf by the io channel code. + * We store things in here that are needed on per thread basis like the base_channel for this thread, + * and the poller for this thread. */ -static TAILQ_HEAD(, vbdev_crypto) g_vbdev_crypto = TAILQ_HEAD_INITIALIZER(g_vbdev_crypto); +struct accel_dpdk_cryptodev_io_channel { + /* completion poller */ + struct spdk_poller *poller; + /* Array of qpairs for each available device. The specific device will be selected depending on the crypto key */ + struct accel_dpdk_cryptodev_qp *device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_LAST]; + /* queued for re-submission to CryptoDev */ + TAILQ_HEAD(, accel_dpdk_cryptodev_queued_op) queued_cry_ops; +}; + +struct accel_dpdk_cryptodev_task { + struct spdk_accel_task base; + uint32_t cryop_cnt_remaining; + bool is_failed; + TAILQ_ENTRY(accel_dpdk_cryptodev_task) link; +}; /* Shared mempools between all devices on this system */ static struct rte_mempool *g_session_mp = NULL; static struct rte_mempool *g_session_mp_priv = NULL; static struct rte_mempool *g_mbuf_mp = NULL; /* mbuf mempool */ +static int g_mbuf_offset; static struct rte_mempool *g_crypto_op_mp = NULL; /* crypto operations, must be rte* mempool */ static struct rte_mbuf_ext_shared_info g_shinfo = {}; /* used by DPDK mbuf macro */ -/* For queueing up crypto operations that we can't submit for some reason */ -struct vbdev_crypto_op { - uint8_t cdev_id; - uint8_t qp; - struct rte_crypto_op *crypto_op; - struct spdk_bdev_io *bdev_io; - TAILQ_ENTRY(vbdev_crypto_op) link; -}; -#define QUEUED_OP_LENGTH (sizeof(struct vbdev_crypto_op)) +static uint8_t g_qat_total_qp = 0; +static uint8_t g_next_qat_index; -/* The crypto vbdev channel struct. It is allocated and freed on my behalf by the io channel code. - * We store things in here that are needed on per thread basis like the base_channel for this thread, - * and the poller for this thread. - */ -struct crypto_io_channel { - struct spdk_io_channel *base_ch; /* IO channel of base device */ - struct spdk_poller *poller; /* completion poller */ - struct device_qp *device_qp; /* unique device/qp combination for this channel */ - TAILQ_HEAD(, spdk_bdev_io) pending_cry_ios; /* outstanding operations to the crypto device */ - struct spdk_io_channel_iter *iter; /* used with for_each_channel in reset */ - TAILQ_HEAD(, vbdev_crypto_op) queued_cry_ops; /* queued for re-submission to CryptoDev */ +static const char *g_driver_names[] = { + [ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB] = ACCEL_DPDK_CRYPTODEV_AESNI_MB, + [ACCEL_DPDK_CRYPTODEV_DRIVER_QAT] = ACCEL_DPDK_CRYPTODEV_QAT, + [ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI] = ACCEL_DPDK_CRYPTODEV_MLX5 +}; +static const char *g_cipher_names[] = { + [ACCEL_DPDK_CRYPTODEV_CIPHER_AES_CBC] = ACCEL_DPDK_CRYPTODEV_AES_CBC, + [ACCEL_DPDK_CRYPTODEV_CIPHER_AES_XTS] = ACCEL_DPDK_CRYPTODEV_AES_XTS, }; -/* This is the crypto per IO context that the bdev layer allocates for us opaquely and attaches to - * each IO for us. - */ -struct crypto_bdev_io { - int cryop_cnt_remaining; /* counter used when completing crypto ops */ - struct crypto_io_channel *crypto_ch; /* need to store for crypto completion handling */ - struct vbdev_crypto *crypto_bdev; /* the crypto node struct associated with this IO */ - struct spdk_bdev_io *orig_io; /* the original IO */ - struct spdk_bdev_io *read_io; /* the read IO we issued */ - int8_t bdev_io_status; /* the status we'll report back on the bdev IO */ - bool on_pending_list; - /* Used for the single contiguous buffer that serves as the crypto destination target for writes */ - uint64_t aux_num_blocks; /* num of blocks for the contiguous buffer */ - uint64_t aux_offset_blocks; /* block offset on media */ - void *aux_buf_raw; /* raw buffer that the bdev layer gave us for write buffer */ - struct iovec aux_buf_iov; /* iov representing aligned contig write buffer */ +static enum accel_dpdk_cryptodev_driver_type g_dpdk_cryptodev_driver = + ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB; - /* for bdev_io_wait */ - struct spdk_bdev_io_wait_entry bdev_io_wait; - struct spdk_io_channel *ch; -}; +/* Global list of all crypto devices */ +static TAILQ_HEAD(, accel_dpdk_cryptodev_device) g_crypto_devices = TAILQ_HEAD_INITIALIZER( + g_crypto_devices); +static pthread_mutex_t g_device_lock = PTHREAD_MUTEX_INITIALIZER; -/* Called by vbdev_crypto_init_crypto_drivers() to init each discovered crypto device */ -static int -create_vbdev_dev(uint8_t index, uint16_t num_lcores) +static struct spdk_accel_module_if g_accel_dpdk_cryptodev_module; + +void +accel_dpdk_cryptodev_enable(void) { - struct vbdev_dev *device; - uint8_t j, cdev_id, cdrv_id; - struct device_qp *dev_qp; - struct device_qp *tmp_qp; - uint32_t qp_desc_nr; - int rc; - TAILQ_HEAD(device_qps, device_qp) *dev_qp_head; + spdk_accel_module_list_add(&g_accel_dpdk_cryptodev_module); +} - device = calloc(1, sizeof(struct vbdev_dev)); +int +accel_dpdk_cryptodev_set_driver(const char *driver_name) +{ + if (strcmp(driver_name, ACCEL_DPDK_CRYPTODEV_QAT) == 0) { + g_dpdk_cryptodev_driver = ACCEL_DPDK_CRYPTODEV_DRIVER_QAT; + } else if (strcmp(driver_name, ACCEL_DPDK_CRYPTODEV_AESNI_MB) == 0) { + g_dpdk_cryptodev_driver = ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB; + } else if (strcmp(driver_name, ACCEL_DPDK_CRYPTODEV_MLX5) == 0) { + g_dpdk_cryptodev_driver = ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI; + } else { + SPDK_ERRLOG("Unsupported driver %s\n", driver_name); + return -EINVAL; + } + + SPDK_NOTICELOG("Using driver %s\n", driver_name); + + return 0; +} + +const char * +accel_dpdk_cryptodev_get_driver(void) +{ + return g_driver_names[g_dpdk_cryptodev_driver]; +} + +static void +cancel_queued_crypto_ops(struct accel_dpdk_cryptodev_io_channel *crypto_ch, + struct accel_dpdk_cryptodev_task *task) +{ + struct rte_mbuf *mbufs_to_free[2 * ACCEL_DPDK_CRYPTODEV_MAX_DEQUEUE_BURST_SIZE]; + struct rte_crypto_op *cancelled_ops[ACCEL_DPDK_CRYPTODEV_MAX_DEQUEUE_BURST_SIZE]; + struct accel_dpdk_cryptodev_queued_op *op_to_cancel, *tmp_op; + struct rte_crypto_op *crypto_op; + int num_mbufs = 0, num_dequeued_ops = 0; + + /* Remove all ops from the failed IO. Since we don't know the + * order we have to check them all. */ + TAILQ_FOREACH_SAFE(op_to_cancel, &crypto_ch->queued_cry_ops, link, tmp_op) { + /* Checking if this is our op. One IO contains multiple ops. */ + if (task == op_to_cancel->task) { + crypto_op = op_to_cancel->crypto_op; + TAILQ_REMOVE(&crypto_ch->queued_cry_ops, op_to_cancel, link); + + /* Populating lists for freeing mbufs and ops. */ + mbufs_to_free[num_mbufs++] = (void *)crypto_op->sym->m_src; + if (crypto_op->sym->m_dst) { + mbufs_to_free[num_mbufs++] = (void *)crypto_op->sym->m_dst; + } + cancelled_ops[num_dequeued_ops++] = crypto_op; + } + } + + /* Now bulk free both mbufs and crypto operations. */ + if (num_dequeued_ops > 0) { + rte_mempool_put_bulk(g_crypto_op_mp, (void **)cancelled_ops, + num_dequeued_ops); + assert(num_mbufs > 0); + /* This also releases chained mbufs if any. */ + rte_pktmbuf_free_bulk(mbufs_to_free, num_mbufs); + } +} + +static inline uint16_t +accel_dpdk_cryptodev_poll_qp(struct accel_dpdk_cryptodev_qp *qp) +{ + struct rte_crypto_op *dequeued_ops[ACCEL_DPDK_CRYPTODEV_MAX_DEQUEUE_BURST_SIZE]; + struct rte_mbuf *mbufs_to_free[2 * ACCEL_DPDK_CRYPTODEV_MAX_DEQUEUE_BURST_SIZE]; + struct accel_dpdk_cryptodev_task *task; + uint32_t num_mbufs = 0; + int i; + uint16_t num_dequeued_ops; + + /* Each run of the poller will get just what the device has available + * at the moment we call it, we don't check again after draining the + * first batch. + */ + num_dequeued_ops = rte_cryptodev_dequeue_burst(qp->device->cdev_id, qp->qp, + dequeued_ops, ACCEL_DPDK_CRYPTODEV_MAX_DEQUEUE_BURST_SIZE); + /* Check if operation was processed successfully */ + for (i = 0; i < num_dequeued_ops; i++) { + + /* We don't know the order or association of the crypto ops wrt any + * particular task so need to look at each and determine if it's + * the last one for it's task or not. + */ + task = (struct accel_dpdk_cryptodev_task *)*RTE_MBUF_DYNFIELD(dequeued_ops[i]->sym->m_src, + g_mbuf_offset, uint64_t *); + assert(task != NULL); + + if (dequeued_ops[i]->status != RTE_CRYPTO_OP_STATUS_SUCCESS) { + SPDK_ERRLOG("error with op %d status %u\n", i, dequeued_ops[i]->status); + /* Update the task status to error, we'll still process the + * rest of the crypto ops for this task though so they + * aren't left hanging. + */ + task->is_failed = true; + } + + /* Return the associated src and dst mbufs by collecting them into + * an array that we can use the bulk API to free after the loop. + */ + *RTE_MBUF_DYNFIELD(dequeued_ops[i]->sym->m_src, g_mbuf_offset, uint64_t *) = 0; + mbufs_to_free[num_mbufs++] = (void *)dequeued_ops[i]->sym->m_src; + if (dequeued_ops[i]->sym->m_dst) { + mbufs_to_free[num_mbufs++] = (void *)dequeued_ops[i]->sym->m_dst; + } + + assert(task->cryop_cnt_remaining > 0); + /* done encrypting, complete the task */ + if (--task->cryop_cnt_remaining == 0) { + /* Complete the IO */ + spdk_accel_task_complete(&task->base, task->is_failed ? -EINVAL : 0); + } + } + + /* Now bulk free both mbufs and crypto operations. */ + if (num_dequeued_ops > 0) { + rte_mempool_put_bulk(g_crypto_op_mp, (void **)dequeued_ops, num_dequeued_ops); + assert(num_mbufs > 0); + /* This also releases chained mbufs if any. */ + rte_pktmbuf_free_bulk(mbufs_to_free, num_mbufs); + } + + assert(qp->num_enqueued_ops >= num_dequeued_ops); + qp->num_enqueued_ops -= num_dequeued_ops; + + return num_dequeued_ops; +} + +/* This is the poller for the crypto module. It uses a single API to dequeue whatever is ready at + * the device. Then we need to decide if what we've got so far (including previous poller + * runs) totals up to one or more complete task */ +static int +accel_dpdk_cryptodev_poller(void *args) +{ + struct accel_dpdk_cryptodev_io_channel *crypto_ch = args; + struct accel_dpdk_cryptodev_qp *qp; + struct accel_dpdk_cryptodev_task *task; + struct accel_dpdk_cryptodev_queued_op *op_to_resubmit; + uint32_t num_dequeued_ops = 0, num_enqueued_ops = 0; + uint16_t enqueued; + int i; + + for (i = 0; i < ACCEL_DPDK_CRYPTODEV_DRIVER_LAST; i++) { + qp = crypto_ch->device_qp[i]; + /* Avoid polling "idle" qps since it may affect performance */ + if (qp && qp->num_enqueued_ops) { + num_dequeued_ops += accel_dpdk_cryptodev_poll_qp(qp); + } + } + + /* Check if there are any queued crypto ops to process */ + while (!TAILQ_EMPTY(&crypto_ch->queued_cry_ops)) { + op_to_resubmit = TAILQ_FIRST(&crypto_ch->queued_cry_ops); + task = op_to_resubmit->task; + qp = op_to_resubmit->qp; + enqueued = rte_cryptodev_enqueue_burst(qp->device->cdev_id, + qp->qp, + &op_to_resubmit->crypto_op, + 1); + if (enqueued == 1) { + TAILQ_REMOVE(&crypto_ch->queued_cry_ops, op_to_resubmit, link); + qp->num_enqueued_ops++; + num_enqueued_ops++; + } else { + if (op_to_resubmit->crypto_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) { + /* If we couldn't get one, just break and try again later. */ + break; + } else { + /* Something is really wrong with the op. Most probably the + * mbuf is broken or the HW is not able to process the request. + * Fail the IO and remove its ops from the queued ops list. */ + task->is_failed = true; + + cancel_queued_crypto_ops(crypto_ch, task); + + /* Fail the IO if there is nothing left on device. */ + if (--task->cryop_cnt_remaining == 0) { + spdk_accel_task_complete(&task->base, -EFAULT); + } + } + } + } + + return !!(num_dequeued_ops + num_enqueued_ops); +} + +/* Allocate the new mbuf of @remainder size with data pointed by @addr and attach + * it to the @orig_mbuf. */ +static inline int +accel_dpdk_cryptodev_mbuf_chain_remainder(struct accel_dpdk_cryptodev_task *task, + struct rte_mbuf *orig_mbuf, uint8_t *addr, uint64_t *_remainder) +{ + uint64_t phys_addr, phys_len, remainder = *_remainder; + struct rte_mbuf *chain_mbuf; + int rc; + + phys_len = remainder; + phys_addr = spdk_vtophys((void *)addr, &phys_len); + if (spdk_unlikely(phys_addr == SPDK_VTOPHYS_ERROR)) { + return -EFAULT; + } + remainder = spdk_min(remainder, phys_len); + rc = rte_pktmbuf_alloc_bulk(g_mbuf_mp, (struct rte_mbuf **)&chain_mbuf, 1); + if (spdk_unlikely(rc)) { + return -ENOMEM; + } + /* Store context in every mbuf as we don't know anything about completion order */ + *RTE_MBUF_DYNFIELD(chain_mbuf, g_mbuf_offset, uint64_t *) = (uint64_t)task; + rte_pktmbuf_attach_extbuf(chain_mbuf, addr, phys_addr, remainder, &g_shinfo); + rte_pktmbuf_append(chain_mbuf, remainder); + + /* Chained buffer is released by rte_pktbuf_free_bulk() automagicaly. */ + rte_pktmbuf_chain(orig_mbuf, chain_mbuf); + *_remainder = remainder; + + return 0; +} + +/* Attach data buffer pointed by @addr to @mbuf. Return utilized len of the + * contiguous space that was physically available. */ +static inline uint64_t +accel_dpdk_cryptodev_mbuf_attach_buf(struct accel_dpdk_cryptodev_task *task, struct rte_mbuf *mbuf, + uint8_t *addr, uint32_t len) +{ + uint64_t phys_addr, phys_len; + + /* Store context in every mbuf as we don't know anything about completion order */ + *RTE_MBUF_DYNFIELD(mbuf, g_mbuf_offset, uint64_t *) = (uint64_t)task; + + phys_len = len; + phys_addr = spdk_vtophys((void *)addr, &phys_len); + if (spdk_unlikely(phys_addr == SPDK_VTOPHYS_ERROR || phys_len == 0)) { + return 0; + } + assert(phys_len <= len); + + /* Set the mbuf elements address and length. */ + rte_pktmbuf_attach_extbuf(mbuf, addr, phys_addr, phys_len, &g_shinfo); + rte_pktmbuf_append(mbuf, phys_len); + + return phys_len; +} + +static inline struct accel_dpdk_cryptodev_key_handle * +accel_dpdk_find_key_handle_in_channel(struct accel_dpdk_cryptodev_io_channel *crypto_ch, + struct accel_dpdk_cryptodev_key_priv *key) +{ + struct accel_dpdk_cryptodev_key_handle *key_handle; + + if (key->driver == ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI) { + /* Crypto key is registered on all available devices while io_channel opens CQ/QP on a single device. + * We need to iterate a list of key entries to find a suitable device */ + TAILQ_FOREACH(key_handle, &key->dev_keys, link) { + if (key_handle->device->cdev_id == + crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI]->device->cdev_id) { + return key_handle; + } + } + return NULL; + } else { + return TAILQ_FIRST(&key->dev_keys); + } +} + +static inline int +accel_dpdk_cryptodev_task_alloc_resources(struct rte_mbuf **src_mbufs, struct rte_mbuf **dst_mbufs, + struct rte_crypto_op **crypto_ops, int count) +{ + int rc; + + /* Get the number of source mbufs that we need. These will always be 1:1 because we + * don't support chaining. The reason we don't is because of our decision to use + * LBA as IV, there can be no case where we'd need >1 mbuf per crypto op or the + * op would be > 1 LBA. + */ + rc = rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, count); + if (rc) { + SPDK_ERRLOG("Failed to get src_mbufs!\n"); + return -ENOMEM; + } + + /* Get the same amount to describe destination. If crypto operation is inline then we don't just skip it */ + if (dst_mbufs) { + rc = rte_pktmbuf_alloc_bulk(g_mbuf_mp, dst_mbufs, count); + if (rc) { + SPDK_ERRLOG("Failed to get dst_mbufs!\n"); + goto err_free_src; + } + } + +#ifdef __clang_analyzer__ + /* silence scan-build false positive */ + SPDK_CLANG_ANALYZER_PREINIT_PTR_ARRAY(crypto_ops, ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE, + 0x1000); +#endif + /* Allocate crypto operations. */ + rc = rte_crypto_op_bulk_alloc(g_crypto_op_mp, + RTE_CRYPTO_OP_TYPE_SYMMETRIC, + crypto_ops, count); + if (rc < count) { + SPDK_ERRLOG("Failed to allocate crypto ops!\n"); + goto err_free_ops; + } + + return 0; + +err_free_ops: + if (rc > 0) { + rte_mempool_put_bulk(g_crypto_op_mp, (void **)crypto_ops, rc); + } + if (dst_mbufs) { + /* This also releases chained mbufs if any. */ + rte_pktmbuf_free_bulk(dst_mbufs, count); + } +err_free_src: + /* This also releases chained mbufs if any. */ + rte_pktmbuf_free_bulk(src_mbufs, count); + + return -ENOMEM; +} + +static inline int +accel_dpdk_cryptodev_mbuf_add_single_block(struct spdk_iov_sgl *sgl, struct rte_mbuf *mbuf, + struct accel_dpdk_cryptodev_task *task) +{ + int rc; + uint8_t *buf_addr; + uint64_t phys_len; + uint64_t remainder; + uint64_t buf_len = spdk_min(task->base.block_size, sgl->iov->iov_len - sgl->iov_offset); + + buf_addr = sgl->iov->iov_base + sgl->iov_offset; + phys_len = accel_dpdk_cryptodev_mbuf_attach_buf(task, mbuf, buf_addr, buf_len); + if (spdk_unlikely(phys_len == 0)) { + return -EFAULT; + } + buf_len = spdk_min(buf_len, phys_len); + spdk_iov_sgl_advance(sgl, buf_len); + + /* Handle the case of page boundary. */ + remainder = task->base.block_size - buf_len; + while (remainder) { + buf_len = spdk_min(remainder, sgl->iov->iov_len - sgl->iov_offset); + buf_addr = sgl->iov->iov_base + sgl->iov_offset; + rc = accel_dpdk_cryptodev_mbuf_chain_remainder(task, mbuf, buf_addr, &buf_len); + if (spdk_unlikely(rc)) { + return rc; + } + spdk_iov_sgl_advance(sgl, buf_len); + remainder -= buf_len; + } + + return 0; +} + +static inline void +accel_dpdk_cryptodev_op_set_iv(struct rte_crypto_op *crypto_op, uint64_t iv) +{ + uint8_t *iv_ptr = rte_crypto_op_ctod_offset(crypto_op, uint8_t *, ACCEL_DPDK_CRYPTODEV_IV_OFFSET); + + /* Set the IV - we use the LBA of the crypto_op */ + memset(iv_ptr, 0, ACCEL_DPDK_CRYPTODEV_IV_LENGTH); + rte_memcpy(iv_ptr, &iv, sizeof(uint64_t)); +} + +static int +accel_dpdk_cryptodev_process_task(struct accel_dpdk_cryptodev_io_channel *crypto_ch, + struct accel_dpdk_cryptodev_task *task) +{ + uint16_t num_enqueued_ops; + uint32_t cryop_cnt; + uint32_t crypto_len = task->base.block_size; + uint64_t total_length = task->base.nbytes; + uint64_t iv_start = task->base.iv; + struct accel_dpdk_cryptodev_queued_op *op_to_queue; + uint32_t crypto_index; + struct rte_crypto_op *crypto_ops[ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE]; + struct rte_mbuf *src_mbufs[ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE]; + struct rte_mbuf *dst_mbufs[ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE]; + struct rte_cryptodev_sym_session *session; + struct accel_dpdk_cryptodev_key_priv *priv; + struct accel_dpdk_cryptodev_key_handle *key_handle; + struct accel_dpdk_cryptodev_qp *qp; + struct accel_dpdk_cryptodev_device *dev; + struct spdk_iov_sgl src, dst = {}; + bool inplace = true; + int rc; + + if (spdk_unlikely(!task->base.crypto_key || + task->base.crypto_key->module_if != &g_accel_dpdk_cryptodev_module)) { + return -EINVAL; + } + priv = task->base.crypto_key->priv; + + assert(task->base.nbytes); + assert(task->base.block_size); + assert(task->base.nbytes % task->base.block_size == 0); + assert(priv->driver < ACCEL_DPDK_CRYPTODEV_DRIVER_LAST); + + if (total_length > ACCEL_DPDK_CRYPTODEV_CRYPTO_MAX_IO) { + return -E2BIG; + } + + cryop_cnt = task->base.nbytes / task->base.block_size; + qp = crypto_ch->device_qp[priv->driver]; + assert(qp); + dev = qp->device; + assert(dev); + + key_handle = accel_dpdk_find_key_handle_in_channel(crypto_ch, priv); + if (spdk_unlikely(!key_handle)) { + SPDK_ERRLOG("Failed to find a key handle, driver %s, cipher %s\n", g_driver_names[priv->driver], + g_cipher_names[priv->cipher]); + return -EINVAL; + } + /* mlx5_pci binds keys to a specific device, we can't use a key with any device */ + assert(dev == key_handle->device || priv->driver != ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI); + + if (task->base.op_code == ACCEL_OPC_ENCRYPT) { + session = key_handle->session_encrypt; + } else if (task->base.op_code == ACCEL_OPC_DECRYPT) { + session = key_handle->session_decrypt; + } else { + return -EINVAL; + } + + /* Check if crypto operation is inplace: no destination or source == destination */ + if (task->base.s.iovcnt == task->base.d.iovcnt) { + if (memcmp(task->base.s.iovs, task->base.d.iovs, sizeof(struct iovec) * task->base.s.iovcnt) != 0) { + inplace = false; + } + } else if (task->base.d.iovcnt != 0) { + inplace = false; + } + + rc = accel_dpdk_cryptodev_task_alloc_resources(src_mbufs, inplace ? NULL : dst_mbufs, crypto_ops, + cryop_cnt); + if (rc) { + return rc; + } + /* This value is used in the completion callback to determine when the accel task is complete. + */ + task->cryop_cnt_remaining = cryop_cnt; + + /* As we don't support chaining because of a decision to use LBA as IV, construction + * of crypto operations is straightforward. We build both the op, the mbuf and the + * dst_mbuf in our local arrays by looping through the length of the accel task and + * picking off LBA sized blocks of memory from the IOVs as we walk through them. Each + * LBA sized chunk of memory will correspond 1:1 to a crypto operation and a single + * mbuf per crypto operation. + */ + spdk_iov_sgl_init(&src, task->base.s.iovs, task->base.s.iovcnt, 0); + if (!inplace) { + spdk_iov_sgl_init(&dst, task->base.d.iovs, task->base.d.iovcnt, 0); + } + + for (crypto_index = 0; crypto_index < cryop_cnt; crypto_index++) { + rc = accel_dpdk_cryptodev_mbuf_add_single_block(&src, src_mbufs[crypto_index], task); + if (spdk_unlikely(rc)) { + goto err_free_ops; + } + accel_dpdk_cryptodev_op_set_iv(crypto_ops[crypto_index], iv_start); + iv_start++; + + /* Set the data to encrypt/decrypt length */ + crypto_ops[crypto_index]->sym->cipher.data.length = crypto_len; + crypto_ops[crypto_index]->sym->cipher.data.offset = 0; + rte_crypto_op_attach_sym_session(crypto_ops[crypto_index], session); + + /* link the mbuf to the crypto op. */ + crypto_ops[crypto_index]->sym->m_src = src_mbufs[crypto_index]; + + if (inplace) { + crypto_ops[crypto_index]->sym->m_dst = NULL; + } else { + rc = accel_dpdk_cryptodev_mbuf_add_single_block(&dst, dst_mbufs[crypto_index], task); + if (spdk_unlikely(rc)) { + goto err_free_ops; + } + crypto_ops[crypto_index]->sym->m_dst = dst_mbufs[crypto_index]; + } + } + + /* Enqueue everything we've got but limit by the max number of descriptors we + * configured the crypto device for. + */ + num_enqueued_ops = rte_cryptodev_enqueue_burst(dev->cdev_id, qp->qp, crypto_ops, spdk_min(cryop_cnt, + dev->qp_desc_nr)); + + qp->num_enqueued_ops += num_enqueued_ops; + /* We were unable to enqueue everything but did get some, so need to decide what + * to do based on the status of the last op. + */ + if (num_enqueued_ops < cryop_cnt) { + switch (crypto_ops[num_enqueued_ops]->status) { + case RTE_CRYPTO_OP_STATUS_NOT_PROCESSED: + /* Queue them up on a linked list to be resubmitted via the poller. */ + for (crypto_index = num_enqueued_ops; crypto_index < cryop_cnt; crypto_index++) { + op_to_queue = (struct accel_dpdk_cryptodev_queued_op *)rte_crypto_op_ctod_offset( + crypto_ops[crypto_index], + uint8_t *, ACCEL_DPDK_CRYPTODEV_QUEUED_OP_OFFSET); + op_to_queue->qp = qp; + op_to_queue->crypto_op = crypto_ops[crypto_index]; + op_to_queue->task = task; + TAILQ_INSERT_TAIL(&crypto_ch->queued_cry_ops, op_to_queue, link); + } + break; + default: + /* For all other statuses, mark task as failed so that the poller will pick + * the failure up for the overall task status. + */ + task->is_failed = true; + if (num_enqueued_ops == 0) { + /* If nothing was enqueued, but the last one wasn't because of + * busy, fail it now as the poller won't know anything about it. + */ + rc = -EINVAL; + goto err_free_ops; + } + break; + } + } + + return 0; + + /* Error cleanup paths. */ +err_free_ops: + if (!inplace) { + /* This also releases chained mbufs if any. */ + rte_pktmbuf_free_bulk(dst_mbufs, cryop_cnt); + } + rte_mempool_put_bulk(g_crypto_op_mp, (void **)crypto_ops, cryop_cnt); + /* This also releases chained mbufs if any. */ + rte_pktmbuf_free_bulk(src_mbufs, cryop_cnt); + return rc; +} + +static inline struct accel_dpdk_cryptodev_qp * +accel_dpdk_cryptodev_get_next_device_qpair(enum accel_dpdk_cryptodev_driver_type type) +{ + struct accel_dpdk_cryptodev_device *device, *device_tmp; + struct accel_dpdk_cryptodev_qp *qpair; + + TAILQ_FOREACH_SAFE(device, &g_crypto_devices, link, device_tmp) { + if (device->type != type) { + continue; + } + TAILQ_FOREACH(qpair, &device->qpairs, link) { + if (!qpair->in_use) { + qpair->in_use = true; + return qpair; + } + } + } + + return NULL; +} + +/* Helper function for the channel creation callback. + * Returns the number of drivers assigned to the channel */ +static uint32_t +accel_dpdk_cryptodev_assign_device_qps(struct accel_dpdk_cryptodev_io_channel *crypto_ch) +{ + struct accel_dpdk_cryptodev_device *device; + struct accel_dpdk_cryptodev_qp *device_qp; + uint32_t num_drivers = 0; + bool qat_found = false; + + pthread_mutex_lock(&g_device_lock); + + TAILQ_FOREACH(device, &g_crypto_devices, link) { + if (device->type == ACCEL_DPDK_CRYPTODEV_DRIVER_QAT && !qat_found) { + /* For some QAT devices, the optimal qp to use is every 32nd as this spreads the + * workload out over the multiple virtual functions in the device. For the devices + * where this isn't the case, it doesn't hurt. + */ + TAILQ_FOREACH(device_qp, &device->qpairs, link) { + if (device_qp->index != g_next_qat_index) { + continue; + } + if (device_qp->in_use == false) { + assert(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_QAT] == NULL); + crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_QAT] = device_qp; + device_qp->in_use = true; + g_next_qat_index = (g_next_qat_index + ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD) % g_qat_total_qp; + qat_found = true; + num_drivers++; + break; + } else { + /* if the preferred index is used, skip to the next one in this set. */ + g_next_qat_index = (g_next_qat_index + 1) % g_qat_total_qp; + } + } + } + } + + /* For ACCEL_DPDK_CRYPTODEV_AESNI_MB and MLX5_PCI select devices in round-robin manner */ + device_qp = accel_dpdk_cryptodev_get_next_device_qpair(ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB); + if (device_qp) { + assert(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB] == NULL); + crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB] = device_qp; + num_drivers++; + } + + device_qp = accel_dpdk_cryptodev_get_next_device_qpair(ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI); + if (device_qp) { + assert(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI] == NULL); + crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI] = device_qp; + num_drivers++; + } + + pthread_mutex_unlock(&g_device_lock); + + return num_drivers; +} + +static void +_accel_dpdk_cryptodev_destroy_cb(void *io_device, void *ctx_buf) +{ + struct accel_dpdk_cryptodev_io_channel *crypto_ch = (struct accel_dpdk_cryptodev_io_channel *) + ctx_buf; + int i; + + pthread_mutex_lock(&g_device_lock); + for (i = 0; i < ACCEL_DPDK_CRYPTODEV_DRIVER_LAST; i++) { + if (crypto_ch->device_qp[i]) { + crypto_ch->device_qp[i]->in_use = false; + } + } + pthread_mutex_unlock(&g_device_lock); + + spdk_poller_unregister(&crypto_ch->poller); +} + +static int +_accel_dpdk_cryptodev_create_cb(void *io_device, void *ctx_buf) +{ + struct accel_dpdk_cryptodev_io_channel *crypto_ch = (struct accel_dpdk_cryptodev_io_channel *) + ctx_buf; + + crypto_ch->poller = SPDK_POLLER_REGISTER(accel_dpdk_cryptodev_poller, crypto_ch, 0); + if (!accel_dpdk_cryptodev_assign_device_qps(crypto_ch)) { + SPDK_ERRLOG("No crypto drivers assigned\n"); + spdk_poller_unregister(&crypto_ch->poller); + return -EINVAL; + } + + /* We use this to queue up crypto ops when the device is busy. */ + TAILQ_INIT(&crypto_ch->queued_cry_ops); + + return 0; +} + +static struct spdk_io_channel * +accel_dpdk_cryptodev_get_io_channel(void) +{ + return spdk_get_io_channel(&g_accel_dpdk_cryptodev_module); +} + +static size_t +accel_dpdk_cryptodev_ctx_size(void) +{ + return sizeof(struct accel_dpdk_cryptodev_task); +} + +static bool +accel_dpdk_cryptodev_supports_opcode(enum accel_opcode opc) +{ + switch (opc) { + case ACCEL_OPC_ENCRYPT: + case ACCEL_OPC_DECRYPT: + return true; + default: + return false; + } +} + +static int +accel_dpdk_cryptodev_submit_tasks(struct spdk_io_channel *_ch, struct spdk_accel_task *_task) +{ + struct accel_dpdk_cryptodev_task *task = SPDK_CONTAINEROF(_task, struct accel_dpdk_cryptodev_task, + base); + struct accel_dpdk_cryptodev_io_channel *ch = spdk_io_channel_get_ctx(_ch); + + return accel_dpdk_cryptodev_process_task(ch, task); +} + +/* Dummy function used by DPDK to free ext attached buffers to mbufs, we free them ourselves but + * this callback has to be here. */ +static void +shinfo_free_cb(void *arg1, void *arg2) +{ +} + +static int +accel_dpdk_cryptodev_create(uint8_t index, uint16_t num_lcores) +{ + struct rte_cryptodev_qp_conf qp_conf = { .mp_session = g_session_mp, .mp_session_private = g_session_mp_priv }; + /* Setup queue pairs. */ + struct rte_cryptodev_config conf = { .socket_id = SPDK_ENV_SOCKET_ID_ANY }; + struct accel_dpdk_cryptodev_device *device; + uint8_t j, cdev_id, cdrv_id; + struct accel_dpdk_cryptodev_qp *dev_qp; + int rc; + + device = calloc(1, sizeof(*device)); if (!device) { return -ENOMEM; } @@ -235,14 +934,28 @@ create_vbdev_dev(uint8_t index, uint16_t num_lcores) cdrv_id = device->cdev_info.driver_id; cdev_id = device->cdev_id = index; - /* QAT_ASYM devices are not supported at this time. */ - if (strcmp(device->cdev_info.driver_name, QAT_ASYM) == 0) { - free(device); - return 0; + if (strcmp(device->cdev_info.driver_name, ACCEL_DPDK_CRYPTODEV_QAT) == 0) { + device->qp_desc_nr = ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS; + device->type = ACCEL_DPDK_CRYPTODEV_DRIVER_QAT; + } else if (strcmp(device->cdev_info.driver_name, ACCEL_DPDK_CRYPTODEV_AESNI_MB) == 0) { + device->qp_desc_nr = ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS; + device->type = ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB; + } else if (strcmp(device->cdev_info.driver_name, ACCEL_DPDK_CRYPTODEV_MLX5) == 0) { + device->qp_desc_nr = ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS_MLX5; + device->type = ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI; + } else if (strcmp(device->cdev_info.driver_name, ACCEL_DPDK_CRYPTODEV_QAT_ASYM) == 0) { + /* ACCEL_DPDK_CRYPTODEV_QAT_ASYM devices are not supported at this time. */ + rc = 0; + goto err; + } else { + SPDK_ERRLOG("Failed to start device %u. Invalid driver name \"%s\"\n", + cdev_id, device->cdev_info.driver_name); + rc = -EINVAL; + goto err; } /* Before going any further, make sure we have enough resources for this - * device type to function. We need a unique queue pair per core across each + * device type to function. We need a unique queue pair per core accross each * device type to remain lockless.... */ if ((rte_cryptodev_device_count_by_driver(cdrv_id) * @@ -254,12 +967,7 @@ create_vbdev_dev(uint8_t index, uint16_t num_lcores) goto err; } - /* Setup queue pairs. */ - struct rte_cryptodev_config conf = { - .nb_queue_pairs = device->cdev_info.max_nb_queue_pairs, - .socket_id = SPDK_ENV_SOCKET_ID_ANY - }; - + conf.nb_queue_pairs = device->cdev_info.max_nb_queue_pairs; rc = rte_cryptodev_configure(cdev_id, &conf); if (rc < 0) { SPDK_ERRLOG("Failed to configure cryptodev %u: error %d\n", @@ -268,38 +976,12 @@ create_vbdev_dev(uint8_t index, uint16_t num_lcores) goto err; } - /* Select the right device/qp list based on driver name - * or error if it does not exist. - */ - if (strcmp(device->cdev_info.driver_name, QAT) == 0) { - dev_qp_head = (struct device_qps *)&g_device_qp_qat; - qp_desc_nr = CRYPTO_QP_DESCRIPTORS; - } else if (strcmp(device->cdev_info.driver_name, AESNI_MB) == 0) { - dev_qp_head = (struct device_qps *)&g_device_qp_aesni_mb; - qp_desc_nr = CRYPTO_QP_DESCRIPTORS; - } else if (strcmp(device->cdev_info.driver_name, MLX5) == 0) { - dev_qp_head = (struct device_qps *)&g_device_qp_mlx5; - qp_desc_nr = CRYPTO_QP_DESCRIPTORS_MLX5; - } else { - SPDK_ERRLOG("Failed to start device %u. Invalid driver name \"%s\"\n", - cdev_id, device->cdev_info.driver_name); - rc = -EINVAL; - goto err_qp_setup; - } - - struct rte_cryptodev_qp_conf qp_conf = { - .nb_descriptors = qp_desc_nr, - .mp_session = g_session_mp, -#if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0) - .mp_session_private = g_session_mp_priv, -#endif - }; - /* Pre-setup all potential qpairs now and assign them in the channel * callback. If we were to create them there, we'd have to stop the * entire device affecting all other threads that might be using it * even on other queue pairs. */ + qp_conf.nb_descriptors = device->qp_desc_nr; for (j = 0; j < device->cdev_info.max_nb_queue_pairs; j++) { rc = rte_cryptodev_queue_pair_setup(cdev_id, j, &qp_conf, SOCKET_ID_ANY); if (rc < 0) { @@ -312,15 +994,15 @@ create_vbdev_dev(uint8_t index, uint16_t num_lcores) rc = rte_cryptodev_start(cdev_id); if (rc < 0) { - SPDK_ERRLOG("Failed to start device %u: error %d\n", - cdev_id, rc); + SPDK_ERRLOG("Failed to start device %u: error %d\n", cdev_id, rc); rc = -EINVAL; goto err_dev_start; } + TAILQ_INIT(&device->qpairs); /* Build up lists of device/qp combinations per PMD */ for (j = 0; j < device->cdev_info.max_nb_queue_pairs; j++) { - dev_qp = calloc(1, sizeof(struct device_qp)); + dev_qp = calloc(1, sizeof(*dev_qp)); if (!dev_qp) { rc = -ENOMEM; goto err_qp_alloc; @@ -328,26 +1010,26 @@ create_vbdev_dev(uint8_t index, uint16_t num_lcores) dev_qp->device = device; dev_qp->qp = j; dev_qp->in_use = false; - if (strcmp(device->cdev_info.driver_name, QAT) == 0) { - g_qat_total_qp++; + TAILQ_INSERT_TAIL(&device->qpairs, dev_qp, link); + if (device->type == ACCEL_DPDK_CRYPTODEV_DRIVER_QAT) { + dev_qp->index = g_qat_total_qp++; } - TAILQ_INSERT_TAIL(dev_qp_head, dev_qp, link); } - /* Add to our list of available crypto devices. */ - TAILQ_INSERT_TAIL(&g_vbdev_devs, device, link); + TAILQ_INSERT_TAIL(&g_crypto_devices, device, link); return 0; + err_qp_alloc: - TAILQ_FOREACH_SAFE(dev_qp, dev_qp_head, link, tmp_qp) { + TAILQ_FOREACH(dev_qp, &device->qpairs, link) { if (dev_qp->device->cdev_id != device->cdev_id) { continue; } - TAILQ_REMOVE(dev_qp_head, dev_qp, link); - if (dev_qp_head == (struct device_qps *)&g_device_qp_qat) { + free(dev_qp); + if (device->type == ACCEL_DPDK_CRYPTODEV_DRIVER_QAT) { + assert(g_qat_total_qp); g_qat_total_qp--; } - free(dev_qp); } rte_cryptodev_stop(cdev_id); err_dev_start: @@ -360,77 +1042,48 @@ err: } static void -release_vbdev_dev(struct vbdev_dev *device) +accel_dpdk_cryptodev_release(struct accel_dpdk_cryptodev_device *device) { - struct device_qp *dev_qp; - struct device_qp *tmp_qp; - TAILQ_HEAD(device_qps, device_qp) *dev_qp_head = NULL; + struct accel_dpdk_cryptodev_qp *dev_qp, *tmp; assert(device); - /* Select the right device/qp list based on driver name. */ - if (strcmp(device->cdev_info.driver_name, QAT) == 0) { - dev_qp_head = (struct device_qps *)&g_device_qp_qat; - } else if (strcmp(device->cdev_info.driver_name, AESNI_MB) == 0) { - dev_qp_head = (struct device_qps *)&g_device_qp_aesni_mb; - } else if (strcmp(device->cdev_info.driver_name, MLX5) == 0) { - dev_qp_head = (struct device_qps *)&g_device_qp_mlx5; + TAILQ_FOREACH_SAFE(dev_qp, &device->qpairs, link, tmp) { + free(dev_qp); } - if (dev_qp_head) { - TAILQ_FOREACH_SAFE(dev_qp, dev_qp_head, link, tmp_qp) { - /* Remove only qps of our device even if the driver names matches. */ - if (dev_qp->device->cdev_id != device->cdev_id) { - continue; - } - TAILQ_REMOVE(dev_qp_head, dev_qp, link); - if (dev_qp_head == (struct device_qps *)&g_device_qp_qat) { - g_qat_total_qp--; - } - free(dev_qp); - } + if (device->type == ACCEL_DPDK_CRYPTODEV_DRIVER_QAT) { + assert(g_qat_total_qp >= device->cdev_info.max_nb_queue_pairs); + g_qat_total_qp -= device->cdev_info.max_nb_queue_pairs; } rte_cryptodev_stop(device->cdev_id); rte_cryptodev_close(device->cdev_id); free(device); } -/* Dummy function used by DPDK to free ext attached buffers to mbufs, we free them ourselves but - * this callback has to be here. */ -static void -shinfo_free_cb(void *arg1, void *arg2) -{ -} - -/* This is called from the module's init function. We setup all crypto devices early on as we are unable - * to easily dynamically configure queue pairs after the drivers are up and running. So, here, we - * configure the max capabilities of each device and assign threads to queue pairs as channels are - * requested. - */ static int -vbdev_crypto_init_crypto_drivers(void) +accel_dpdk_cryptodev_init(void) { uint8_t cdev_count; uint8_t cdev_id; int i, rc; - struct vbdev_dev *device; - struct vbdev_dev *tmp_dev; - struct device_qp *dev_qp; + struct accel_dpdk_cryptodev_device *device, *tmp_dev; unsigned int max_sess_size = 0, sess_size; uint16_t num_lcores = rte_lcore_count(); char aesni_args[32]; - /* Only the first call, via RPC or module init should init the crypto drivers. */ + /* Only the first call via module init should init the crypto drivers. */ if (g_session_mp != NULL) { return 0; } - /* We always init AESNI_MB */ - snprintf(aesni_args, sizeof(aesni_args), "max_nb_queue_pairs=%d", AESNI_MB_NUM_QP); - rc = rte_vdev_init(AESNI_MB, aesni_args); + /* We always init ACCEL_DPDK_CRYPTODEV_AESNI_MB */ + snprintf(aesni_args, sizeof(aesni_args), "max_nb_queue_pairs=%d", + ACCEL_DPDK_CRYPTODEV_AESNI_MB_NUM_QP); + rc = rte_vdev_init(ACCEL_DPDK_CRYPTODEV_AESNI_MB, aesni_args); if (rc) { SPDK_NOTICELOG("Failed to create virtual PMD %s: error %d. " "Possibly %s is not supported by DPDK library. " - "Keep going...\n", AESNI_MB, rc, AESNI_MB); + "Keep going...\n", ACCEL_DPDK_CRYPTODEV_AESNI_MB, rc, ACCEL_DPDK_CRYPTODEV_AESNI_MB); } /* If we have no crypto devices, there's no reason to continue. */ @@ -446,13 +1099,9 @@ vbdev_crypto_init_crypto_drivers(void) return -EINVAL; } - /* - * Create global mempools, shared by all devices regardless of type. - */ - + /* Create global mempools, shared by all devices regardless of type */ /* First determine max session size, most pools are shared by all the devices, - * so we need to find the global max sessions size. - */ + * so we need to find the global max sessions size. */ for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) { sess_size = rte_cryptodev_sym_get_private_session_size(cdev_id); if (sess_size > max_sess_size) { @@ -460,30 +1109,25 @@ vbdev_crypto_init_crypto_drivers(void) } } -#if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0) - g_session_mp_priv = rte_mempool_create("session_mp_priv", NUM_SESSIONS, max_sess_size, - SESS_MEMPOOL_CACHE_SIZE, 0, NULL, NULL, NULL, - NULL, SOCKET_ID_ANY, 0); + g_session_mp_priv = rte_mempool_create("dpdk_crypto_ses_mp_priv", + ACCEL_DPDK_CRYPTODEV_NUM_SESSIONS, max_sess_size, ACCEL_DPDK_CRYPTODEV_SESS_MEMPOOL_CACHE_SIZE, 0, + NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0); if (g_session_mp_priv == NULL) { SPDK_ERRLOG("Cannot create private session pool max size 0x%x\n", max_sess_size); return -ENOMEM; } - /* When session private data mempool allocated, the element size for the session mempool - * should be 0. */ - max_sess_size = 0; -#endif - g_session_mp = rte_cryptodev_sym_session_pool_create( - "session_mp", - NUM_SESSIONS, max_sess_size, SESS_MEMPOOL_CACHE_SIZE, 0, - SOCKET_ID_ANY); + g_session_mp = rte_cryptodev_sym_session_pool_create("dpdk_crypto_ses_mp", + ACCEL_DPDK_CRYPTODEV_NUM_SESSIONS, 0, ACCEL_DPDK_CRYPTODEV_SESS_MEMPOOL_CACHE_SIZE, 0, + SOCKET_ID_ANY); if (g_session_mp == NULL) { SPDK_ERRLOG("Cannot create session pool max size 0x%x\n", max_sess_size); rc = -ENOMEM; goto error_create_session_mp; } - g_mbuf_mp = rte_pktmbuf_pool_create("mbuf_mp", NUM_MBUFS, POOL_CACHE_SIZE, + g_mbuf_mp = rte_pktmbuf_pool_create("dpdk_crypto_mbuf_mp", ACCEL_DPDK_CRYPTODEV_NUM_MBUFS, + ACCEL_DPDK_CRYPTODEV_POOL_CACHE_SIZE, 0, 0, SPDK_ENV_SOCKET_ID_ANY); if (g_mbuf_mp == NULL) { SPDK_ERRLOG("Cannot create mbuf pool\n"); @@ -492,17 +1136,11 @@ vbdev_crypto_init_crypto_drivers(void) } /* We use per op private data as suggested by DPDK and to store the IV and - * our own struct for queueing ops. - */ - g_crypto_op_mp = rte_crypto_op_pool_create("op_mp", - RTE_CRYPTO_OP_TYPE_SYMMETRIC, - NUM_MBUFS, - POOL_CACHE_SIZE, - (DEFAULT_NUM_XFORMS * - sizeof(struct rte_crypto_sym_xform)) + - IV_LENGTH + QUEUED_OP_LENGTH, - rte_socket_id()); - + * our own struct for queueing ops. */ + g_crypto_op_mp = rte_crypto_op_pool_create("dpdk_crypto_op_mp", + RTE_CRYPTO_OP_TYPE_SYMMETRIC, ACCEL_DPDK_CRYPTODEV_NUM_MBUFS, ACCEL_DPDK_CRYPTODEV_POOL_CACHE_SIZE, + (ACCEL_DPDK_CRYPTODEV_DEFAULT_NUM_XFORMS * sizeof(struct rte_crypto_sym_xform)) + + ACCEL_DPDK_CRYPTODEV_IV_LENGTH + ACCEL_DPDK_CRYPTODEV_QUEUED_OP_LENGTH, rte_socket_id()); if (g_crypto_op_mp == NULL) { SPDK_ERRLOG("Cannot create op pool\n"); rc = -ENOMEM; @@ -511,28 +1149,25 @@ vbdev_crypto_init_crypto_drivers(void) /* Init all devices */ for (i = 0; i < cdev_count; i++) { - rc = create_vbdev_dev(i, num_lcores); + rc = accel_dpdk_cryptodev_create(i, num_lcores); if (rc) { goto err; } } - /* Assign index values to the QAT device qp nodes so that we can - * assign them for optimal performance. - */ - i = 0; - TAILQ_FOREACH(dev_qp, &g_device_qp_qat, link) { - dev_qp->index = i++; - } - g_shinfo.free_cb = shinfo_free_cb; + + spdk_io_device_register(&g_accel_dpdk_cryptodev_module, _accel_dpdk_cryptodev_create_cb, + _accel_dpdk_cryptodev_destroy_cb, sizeof(struct accel_dpdk_cryptodev_io_channel), + "accel_dpdk_cryptodev"); + return 0; /* Error cleanup paths. */ err: - TAILQ_FOREACH_SAFE(device, &g_vbdev_devs, link, tmp_dev) { - TAILQ_REMOVE(&g_vbdev_devs, device, link); - release_vbdev_dev(device); + TAILQ_FOREACH_SAFE(device, &g_crypto_devices, link, tmp_dev) { + TAILQ_REMOVE(&g_crypto_devices, device, link); + accel_dpdk_cryptodev_release(device); } rte_mempool_free(g_crypto_op_mp); g_crypto_op_mp = NULL; @@ -550,1289 +1185,16 @@ error_create_session_mp: return rc; } -/* Following an encrypt or decrypt we need to then either write the encrypted data or finish - * the read on decrypted data. Do that here. - */ static void -_crypto_operation_complete(struct spdk_bdev_io *bdev_io) +accel_dpdk_cryptodev_fini_cb(void *io_device) { - struct vbdev_crypto *crypto_bdev = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_crypto, - crypto_bdev); - struct crypto_bdev_io *io_ctx = (struct crypto_bdev_io *)bdev_io->driver_ctx; - struct crypto_io_channel *crypto_ch = io_ctx->crypto_ch; - struct spdk_bdev_io *free_me = io_ctx->read_io; - int rc = 0; + struct accel_dpdk_cryptodev_device *device, *tmp; - /* Can also be called from the crypto_dev_poller() to fail the stuck re-enqueue ops IO. */ - if (io_ctx->on_pending_list) { - TAILQ_REMOVE(&crypto_ch->pending_cry_ios, bdev_io, module_link); - io_ctx->on_pending_list = false; + TAILQ_FOREACH_SAFE(device, &g_crypto_devices, link, tmp) { + TAILQ_REMOVE(&g_crypto_devices, device, link); + accel_dpdk_cryptodev_release(device); } - - if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { - - /* Complete the original IO and then free the one that we created - * as a result of issuing an IO via submit_request. - */ - if (io_ctx->bdev_io_status != SPDK_BDEV_IO_STATUS_FAILED) { - spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS); - } else { - SPDK_ERRLOG("Issue with decryption on bdev_io %p\n", bdev_io); - rc = -EINVAL; - } - spdk_bdev_free_io(free_me); - - } else if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { - - if (io_ctx->bdev_io_status != SPDK_BDEV_IO_STATUS_FAILED) { - /* Write the encrypted data. */ - rc = spdk_bdev_writev_blocks(crypto_bdev->base_desc, crypto_ch->base_ch, - &io_ctx->aux_buf_iov, 1, io_ctx->aux_offset_blocks, - io_ctx->aux_num_blocks, _complete_internal_write, - bdev_io); - } else { - SPDK_ERRLOG("Issue with encryption on bdev_io %p\n", bdev_io); - rc = -EINVAL; - } - - } else { - SPDK_ERRLOG("Unknown bdev type %u on crypto operation completion\n", - bdev_io->type); - rc = -EINVAL; - } - - if (rc) { - spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); - } -} - -static void -cancel_queued_crypto_ops(struct crypto_io_channel *crypto_ch, struct spdk_bdev_io *bdev_io) -{ - struct rte_mbuf *mbufs_to_free[2 * MAX_DEQUEUE_BURST_SIZE]; - struct rte_crypto_op *dequeued_ops[MAX_DEQUEUE_BURST_SIZE]; - struct vbdev_crypto_op *op_to_cancel, *tmp_op; - struct rte_crypto_op *crypto_op; - int num_mbufs, num_dequeued_ops; - - /* Remove all ops from the failed IO. Since we don't know the - * order we have to check them all. */ - num_mbufs = 0; - num_dequeued_ops = 0; - TAILQ_FOREACH_SAFE(op_to_cancel, &crypto_ch->queued_cry_ops, link, tmp_op) { - /* Checking if this is our op. One IO contains multiple ops. */ - if (bdev_io == op_to_cancel->bdev_io) { - crypto_op = op_to_cancel->crypto_op; - TAILQ_REMOVE(&crypto_ch->queued_cry_ops, op_to_cancel, link); - - /* Populating lists for freeing mbufs and ops. */ - mbufs_to_free[num_mbufs++] = (void *)crypto_op->sym->m_src; - if (crypto_op->sym->m_dst) { - mbufs_to_free[num_mbufs++] = (void *)crypto_op->sym->m_dst; - } - dequeued_ops[num_dequeued_ops++] = crypto_op; - } - } - - /* Now bulk free both mbufs and crypto operations. */ - if (num_dequeued_ops > 0) { - rte_mempool_put_bulk(g_crypto_op_mp, (void **)dequeued_ops, - num_dequeued_ops); - assert(num_mbufs > 0); - /* This also releases chained mbufs if any. */ - rte_pktmbuf_free_bulk(mbufs_to_free, num_mbufs); - } -} - -static int _crypto_operation(struct spdk_bdev_io *bdev_io, - enum rte_crypto_cipher_operation crypto_op, - void *aux_buf); - -/* This is the poller for the crypto device. It uses a single API to dequeue whatever is ready at - * the device. Then we need to decide if what we've got so far (including previous poller - * runs) totals up to one or more complete bdev_ios and if so continue with the bdev_io - * accordingly. This means either completing a read or issuing a new write. - */ -static int -crypto_dev_poller(void *args) -{ - struct crypto_io_channel *crypto_ch = args; - uint8_t cdev_id = crypto_ch->device_qp->device->cdev_id; - int i, num_dequeued_ops, num_enqueued_ops; - struct spdk_bdev_io *bdev_io = NULL; - struct crypto_bdev_io *io_ctx = NULL; - struct rte_crypto_op *dequeued_ops[MAX_DEQUEUE_BURST_SIZE]; - struct rte_mbuf *mbufs_to_free[2 * MAX_DEQUEUE_BURST_SIZE]; - int num_mbufs = 0; - struct vbdev_crypto_op *op_to_resubmit; - - /* Each run of the poller will get just what the device has available - * at the moment we call it, we don't check again after draining the - * first batch. - */ - num_dequeued_ops = rte_cryptodev_dequeue_burst(cdev_id, crypto_ch->device_qp->qp, - dequeued_ops, MAX_DEQUEUE_BURST_SIZE); - - /* Check if operation was processed successfully */ - for (i = 0; i < num_dequeued_ops; i++) { - - /* We don't know the order or association of the crypto ops wrt any - * particular bdev_io so need to look at each and determine if it's - * the last one for it's bdev_io or not. - */ - bdev_io = (struct spdk_bdev_io *)*RTE_MBUF_DYNFIELD(dequeued_ops[i]->sym->m_src, g_mbuf_offset, - uint64_t *); - assert(bdev_io != NULL); - io_ctx = (struct crypto_bdev_io *)bdev_io->driver_ctx; - - if (dequeued_ops[i]->status != RTE_CRYPTO_OP_STATUS_SUCCESS) { - SPDK_ERRLOG("error with op %d status %u\n", i, - dequeued_ops[i]->status); - /* Update the bdev status to error, we'll still process the - * rest of the crypto ops for this bdev_io though so they - * aren't left hanging. - */ - io_ctx->bdev_io_status = SPDK_BDEV_IO_STATUS_FAILED; - } - - assert(io_ctx->cryop_cnt_remaining > 0); - - /* Return the associated src and dst mbufs by collecting them into - * an array that we can use the bulk API to free after the loop. - */ - *RTE_MBUF_DYNFIELD(dequeued_ops[i]->sym->m_src, g_mbuf_offset, uint64_t *) = 0; - mbufs_to_free[num_mbufs++] = (void *)dequeued_ops[i]->sym->m_src; - if (dequeued_ops[i]->sym->m_dst) { - mbufs_to_free[num_mbufs++] = (void *)dequeued_ops[i]->sym->m_dst; - } - - /* done encrypting, complete the bdev_io */ - if (--io_ctx->cryop_cnt_remaining == 0) { - - /* If we're completing this with an outstanding reset we need - * to fail it. - */ - if (crypto_ch->iter) { - io_ctx->bdev_io_status = SPDK_BDEV_IO_STATUS_FAILED; - } - - /* Complete the IO */ - _crypto_operation_complete(bdev_io); - } - } - - /* Now bulk free both mbufs and crypto operations. */ - if (num_dequeued_ops > 0) { - rte_mempool_put_bulk(g_crypto_op_mp, - (void **)dequeued_ops, - num_dequeued_ops); - assert(num_mbufs > 0); - /* This also releases chained mbufs if any. */ - rte_pktmbuf_free_bulk(mbufs_to_free, num_mbufs); - } - - /* Check if there are any pending crypto ops to process */ - while (!TAILQ_EMPTY(&crypto_ch->queued_cry_ops)) { - op_to_resubmit = TAILQ_FIRST(&crypto_ch->queued_cry_ops); - bdev_io = op_to_resubmit->bdev_io; - io_ctx = (struct crypto_bdev_io *)bdev_io->driver_ctx; - num_enqueued_ops = rte_cryptodev_enqueue_burst(op_to_resubmit->cdev_id, - op_to_resubmit->qp, - &op_to_resubmit->crypto_op, - 1); - if (num_enqueued_ops == 1) { - /* Make sure we don't put this on twice as one bdev_io is made up - * of many crypto ops. - */ - if (io_ctx->on_pending_list == false) { - TAILQ_INSERT_TAIL(&crypto_ch->pending_cry_ios, bdev_io, module_link); - io_ctx->on_pending_list = true; - } - TAILQ_REMOVE(&crypto_ch->queued_cry_ops, op_to_resubmit, link); - } else { - if (op_to_resubmit->crypto_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) { - /* If we couldn't get one, just break and try again later. */ - break; - } else { - /* Something is really wrong with the op. Most probably the - * mbuf is broken or the HW is not able to process the request. - * Fail the IO and remove its ops from the queued ops list. */ - io_ctx->bdev_io_status = SPDK_BDEV_IO_STATUS_FAILED; - - cancel_queued_crypto_ops(crypto_ch, bdev_io); - - /* Fail the IO if there is nothing left on device. */ - if (--io_ctx->cryop_cnt_remaining == 0) { - _crypto_operation_complete(bdev_io); - } - } - - } - } - - /* If the channel iter is not NULL, we need to continue to poll - * until the pending list is empty, then we can move on to the - * next channel. - */ - if (crypto_ch->iter && TAILQ_EMPTY(&crypto_ch->pending_cry_ios)) { - SPDK_NOTICELOG("Channel %p has been quiesced.\n", crypto_ch); - spdk_for_each_channel_continue(crypto_ch->iter, 0); - crypto_ch->iter = NULL; - } - - return num_dequeued_ops; -} - -/* Allocate the new mbuf of @remainder size with data pointed by @addr and attach - * it to the @orig_mbuf. */ -static int -mbuf_chain_remainder(struct spdk_bdev_io *bdev_io, struct rte_mbuf *orig_mbuf, - uint8_t *addr, uint32_t remainder) -{ - uint64_t phys_addr, phys_len; - struct rte_mbuf *chain_mbuf; - int rc; - - phys_len = remainder; - phys_addr = spdk_vtophys((void *)addr, &phys_len); - if (spdk_unlikely(phys_addr == SPDK_VTOPHYS_ERROR || phys_len != remainder)) { - return -EFAULT; - } - rc = rte_pktmbuf_alloc_bulk(g_mbuf_mp, (struct rte_mbuf **)&chain_mbuf, 1); - if (spdk_unlikely(rc)) { - return -ENOMEM; - } - /* Store context in every mbuf as we don't know anything about completion order */ - *RTE_MBUF_DYNFIELD(chain_mbuf, g_mbuf_offset, uint64_t *) = (uint64_t)bdev_io; - rte_pktmbuf_attach_extbuf(chain_mbuf, addr, phys_addr, phys_len, &g_shinfo); - rte_pktmbuf_append(chain_mbuf, phys_len); - - /* Chained buffer is released by rte_pktbuf_free_bulk() automagicaly. */ - rte_pktmbuf_chain(orig_mbuf, chain_mbuf); - return 0; -} - -/* Attach data buffer pointed by @addr to @mbuf. Return utilized len of the - * contiguous space that was physically available. */ -static uint64_t -mbuf_attach_buf(struct spdk_bdev_io *bdev_io, struct rte_mbuf *mbuf, - uint8_t *addr, uint32_t len) -{ - uint64_t phys_addr, phys_len; - - /* Store context in every mbuf as we don't know anything about completion order */ - *RTE_MBUF_DYNFIELD(mbuf, g_mbuf_offset, uint64_t *) = (uint64_t)bdev_io; - - phys_len = len; - phys_addr = spdk_vtophys((void *)addr, &phys_len); - if (spdk_unlikely(phys_addr == SPDK_VTOPHYS_ERROR || phys_len == 0)) { - return 0; - } - assert(phys_len <= len); - - /* Set the mbuf elements address and length. */ - rte_pktmbuf_attach_extbuf(mbuf, addr, phys_addr, phys_len, &g_shinfo); - rte_pktmbuf_append(mbuf, phys_len); - - return phys_len; -} - -/* We're either encrypting on the way down or decrypting on the way back. */ -static int -_crypto_operation(struct spdk_bdev_io *bdev_io, enum rte_crypto_cipher_operation crypto_op, - void *aux_buf) -{ - uint16_t num_enqueued_ops = 0; - uint32_t cryop_cnt = bdev_io->u.bdev.num_blocks; - struct crypto_bdev_io *io_ctx = (struct crypto_bdev_io *)bdev_io->driver_ctx; - struct crypto_io_channel *crypto_ch = io_ctx->crypto_ch; - uint8_t cdev_id = crypto_ch->device_qp->device->cdev_id; - uint32_t crypto_len = io_ctx->crypto_bdev->crypto_bdev.blocklen; - uint64_t total_length = bdev_io->u.bdev.num_blocks * crypto_len; - int rc; - uint32_t iov_index = 0; - uint32_t allocated = 0; - uint8_t *current_iov = NULL; - uint64_t total_remaining = 0; - uint64_t current_iov_remaining = 0; - uint32_t crypto_index = 0; - uint32_t en_offset = 0; - struct rte_crypto_op *crypto_ops[MAX_ENQUEUE_ARRAY_SIZE]; - struct rte_mbuf *src_mbufs[MAX_ENQUEUE_ARRAY_SIZE]; - struct rte_mbuf *dst_mbufs[MAX_ENQUEUE_ARRAY_SIZE]; - int burst; - struct vbdev_crypto_op *op_to_queue; - uint64_t alignment = spdk_bdev_get_buf_align(&io_ctx->crypto_bdev->crypto_bdev); - - assert((bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen) <= CRYPTO_MAX_IO); - - /* Get the number of source mbufs that we need. These will always be 1:1 because we - * don't support chaining. The reason we don't is because of our decision to use - * LBA as IV, there can be no case where we'd need >1 mbuf per crypto op or the - * op would be > 1 LBA. - */ - rc = rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, cryop_cnt); - if (rc) { - SPDK_ERRLOG("Failed to get src_mbufs!\n"); - return -ENOMEM; - } - - /* Get the same amount but these buffers to describe the encrypted data location (dst). */ - if (crypto_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) { - rc = rte_pktmbuf_alloc_bulk(g_mbuf_mp, dst_mbufs, cryop_cnt); - if (rc) { - SPDK_ERRLOG("Failed to get dst_mbufs!\n"); - rc = -ENOMEM; - goto error_get_dst; - } - } - -#ifdef __clang_analyzer__ - /* silence scan-build false positive */ - SPDK_CLANG_ANALYZER_PREINIT_PTR_ARRAY(crypto_ops, MAX_ENQUEUE_ARRAY_SIZE, 0x1000); -#endif - /* Allocate crypto operations. */ - allocated = rte_crypto_op_bulk_alloc(g_crypto_op_mp, - RTE_CRYPTO_OP_TYPE_SYMMETRIC, - crypto_ops, cryop_cnt); - if (allocated < cryop_cnt) { - SPDK_ERRLOG("Failed to allocate crypto ops!\n"); - rc = -ENOMEM; - goto error_get_ops; - } - - /* For encryption, we need to prepare a single contiguous buffer as the encryption - * destination, we'll then pass that along for the write after encryption is done. - * This is done to avoiding encrypting the provided write buffer which may be - * undesirable in some use cases. - */ - if (crypto_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) { - io_ctx->aux_buf_iov.iov_len = total_length; - io_ctx->aux_buf_raw = aux_buf; - io_ctx->aux_buf_iov.iov_base = (void *)(((uintptr_t)aux_buf + (alignment - 1)) & ~(alignment - 1)); - io_ctx->aux_offset_blocks = bdev_io->u.bdev.offset_blocks; - io_ctx->aux_num_blocks = bdev_io->u.bdev.num_blocks; - } - - /* This value is used in the completion callback to determine when the bdev_io is - * complete. - */ - io_ctx->cryop_cnt_remaining = cryop_cnt; - - /* As we don't support chaining because of a decision to use LBA as IV, construction - * of crypto operations is straightforward. We build both the op, the mbuf and the - * dst_mbuf in our local arrays by looping through the length of the bdev IO and - * picking off LBA sized blocks of memory from the IOVs as we walk through them. Each - * LBA sized chunk of memory will correspond 1:1 to a crypto operation and a single - * mbuf per crypto operation. - */ - total_remaining = total_length; - current_iov = bdev_io->u.bdev.iovs[iov_index].iov_base; - current_iov_remaining = bdev_io->u.bdev.iovs[iov_index].iov_len; - do { - uint8_t *iv_ptr; - uint8_t *buf_addr; - uint64_t phys_len; - uint32_t remainder; - uint64_t op_block_offset; - - phys_len = mbuf_attach_buf(bdev_io, src_mbufs[crypto_index], - current_iov, crypto_len); - if (spdk_unlikely(phys_len == 0)) { - goto error_attach_session; - rc = -EFAULT; - } - - /* Handle the case of page boundary. */ - remainder = crypto_len - phys_len; - if (spdk_unlikely(remainder > 0)) { - rc = mbuf_chain_remainder(bdev_io, src_mbufs[crypto_index], - current_iov + phys_len, remainder); - if (spdk_unlikely(rc)) { - goto error_attach_session; - } - } - - /* Set the IV - we use the LBA of the crypto_op */ - iv_ptr = rte_crypto_op_ctod_offset(crypto_ops[crypto_index], uint8_t *, - IV_OFFSET); - memset(iv_ptr, 0, IV_LENGTH); - op_block_offset = bdev_io->u.bdev.offset_blocks + crypto_index; - rte_memcpy(iv_ptr, &op_block_offset, sizeof(uint64_t)); - - /* Set the data to encrypt/decrypt length */ - crypto_ops[crypto_index]->sym->cipher.data.length = crypto_len; - crypto_ops[crypto_index]->sym->cipher.data.offset = 0; - - /* link the mbuf to the crypto op. */ - crypto_ops[crypto_index]->sym->m_src = src_mbufs[crypto_index]; - - /* For encrypt, point the destination to a buffer we allocate and redirect the bdev_io - * that will be used to process the write on completion to the same buffer. Setting - * up the en_buffer is a little simpler as we know the destination buffer is single IOV. - */ - if (crypto_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) { - buf_addr = io_ctx->aux_buf_iov.iov_base + en_offset; - phys_len = mbuf_attach_buf(bdev_io, dst_mbufs[crypto_index], - buf_addr, crypto_len); - if (spdk_unlikely(phys_len == 0)) { - rc = -EFAULT; - goto error_attach_session; - } - - crypto_ops[crypto_index]->sym->m_dst = dst_mbufs[crypto_index]; - en_offset += phys_len; - - /* Handle the case of page boundary. */ - remainder = crypto_len - phys_len; - if (spdk_unlikely(remainder > 0)) { - rc = mbuf_chain_remainder(bdev_io, dst_mbufs[crypto_index], - buf_addr + phys_len, remainder); - if (spdk_unlikely(rc)) { - goto error_attach_session; - } - en_offset += remainder; - } - - /* Attach the crypto session to the operation */ - rc = rte_crypto_op_attach_sym_session(crypto_ops[crypto_index], - io_ctx->crypto_bdev->session_encrypt); - if (rc) { - rc = -EINVAL; - goto error_attach_session; - } - } else { - crypto_ops[crypto_index]->sym->m_dst = NULL; - - /* Attach the crypto session to the operation */ - rc = rte_crypto_op_attach_sym_session(crypto_ops[crypto_index], - io_ctx->crypto_bdev->session_decrypt); - if (rc) { - rc = -EINVAL; - goto error_attach_session; - } - } - - /* Subtract our running totals for the op in progress and the overall bdev io */ - total_remaining -= crypto_len; - current_iov_remaining -= crypto_len; - - /* move our current IOV pointer accordingly. */ - current_iov += crypto_len; - - /* move on to the next crypto operation */ - crypto_index++; - - /* If we're done with this IOV, move to the next one. */ - if (current_iov_remaining == 0 && total_remaining > 0) { - iov_index++; - current_iov = bdev_io->u.bdev.iovs[iov_index].iov_base; - current_iov_remaining = bdev_io->u.bdev.iovs[iov_index].iov_len; - } - } while (total_remaining > 0); - - /* Enqueue everything we've got but limit by the max number of descriptors we - * configured the crypto device for. - */ - burst = spdk_min(cryop_cnt, io_ctx->crypto_bdev->qp_desc_nr); - num_enqueued_ops = rte_cryptodev_enqueue_burst(cdev_id, crypto_ch->device_qp->qp, - &crypto_ops[0], - burst); - - /* Add this bdev_io to our outstanding list if any of its crypto ops made it. */ - if (num_enqueued_ops > 0) { - TAILQ_INSERT_TAIL(&crypto_ch->pending_cry_ios, bdev_io, module_link); - io_ctx->on_pending_list = true; - } - /* We were unable to enqueue everything but did get some, so need to decide what - * to do based on the status of the last op. - */ - if (num_enqueued_ops < cryop_cnt) { - switch (crypto_ops[num_enqueued_ops]->status) { - case RTE_CRYPTO_OP_STATUS_NOT_PROCESSED: - /* Queue them up on a linked list to be resubmitted via the poller. */ - for (crypto_index = num_enqueued_ops; crypto_index < cryop_cnt; crypto_index++) { - op_to_queue = (struct vbdev_crypto_op *)rte_crypto_op_ctod_offset(crypto_ops[crypto_index], - uint8_t *, QUEUED_OP_OFFSET); - op_to_queue->cdev_id = cdev_id; - op_to_queue->qp = crypto_ch->device_qp->qp; - op_to_queue->crypto_op = crypto_ops[crypto_index]; - op_to_queue->bdev_io = bdev_io; - TAILQ_INSERT_TAIL(&crypto_ch->queued_cry_ops, - op_to_queue, - link); - } - break; - default: - /* For all other statuses, set the io_ctx bdev_io status so that - * the poller will pick the failure up for the overall bdev status. - */ - io_ctx->bdev_io_status = SPDK_BDEV_IO_STATUS_FAILED; - if (num_enqueued_ops == 0) { - /* If nothing was enqueued, but the last one wasn't because of - * busy, fail it now as the poller won't know anything about it. - */ - rc = -EINVAL; - goto error_attach_session; - } - break; - } - } - - return rc; - - /* Error cleanup paths. */ -error_attach_session: -error_get_ops: - if (crypto_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) { - /* This also releases chained mbufs if any. */ - rte_pktmbuf_free_bulk(dst_mbufs, cryop_cnt); - } - if (allocated > 0) { - rte_mempool_put_bulk(g_crypto_op_mp, (void **)crypto_ops, - allocated); - } -error_get_dst: - /* This also releases chained mbufs if any. */ - rte_pktmbuf_free_bulk(src_mbufs, cryop_cnt); - return rc; -} - -/* This function is called after all channels have been quiesced following - * a bdev reset. - */ -static void -_ch_quiesce_done(struct spdk_io_channel_iter *i, int status) -{ - struct crypto_bdev_io *io_ctx = spdk_io_channel_iter_get_ctx(i); - - assert(TAILQ_EMPTY(&io_ctx->crypto_ch->pending_cry_ios)); - assert(io_ctx->orig_io != NULL); - - spdk_bdev_io_complete(io_ctx->orig_io, SPDK_BDEV_IO_STATUS_SUCCESS); -} - -/* This function is called per channel to quiesce IOs before completing a - * bdev reset that we received. - */ -static void -_ch_quiesce(struct spdk_io_channel_iter *i) -{ - struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i); - struct crypto_io_channel *crypto_ch = spdk_io_channel_get_ctx(ch); - - crypto_ch->iter = i; - /* When the poller runs, it will see the non-NULL iter and handle - * the quiesce. - */ -} - -/* Completion callback for IO that were issued from this bdev other than read/write. - * They have their own for readability. - */ -static void -_complete_internal_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) -{ - struct spdk_bdev_io *orig_io = cb_arg; - int status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED; - - if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) { - struct crypto_bdev_io *orig_ctx = (struct crypto_bdev_io *)orig_io->driver_ctx; - - assert(orig_io == orig_ctx->orig_io); - - spdk_bdev_free_io(bdev_io); - - spdk_for_each_channel(orig_ctx->crypto_bdev, - _ch_quiesce, - orig_ctx, - _ch_quiesce_done); - return; - } - - spdk_bdev_io_complete(orig_io, status); - spdk_bdev_free_io(bdev_io); -} - -/* Completion callback for writes that were issued from this bdev. */ -static void -_complete_internal_write(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) -{ - struct spdk_bdev_io *orig_io = cb_arg; - int status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED; - struct crypto_bdev_io *orig_ctx = (struct crypto_bdev_io *)orig_io->driver_ctx; - - spdk_bdev_io_put_aux_buf(orig_io, orig_ctx->aux_buf_raw); - - spdk_bdev_io_complete(orig_io, status); - spdk_bdev_free_io(bdev_io); -} - -/* Completion callback for reads that were issued from this bdev. */ -static void -_complete_internal_read(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) -{ - struct spdk_bdev_io *orig_io = cb_arg; - struct crypto_bdev_io *orig_ctx = (struct crypto_bdev_io *)orig_io->driver_ctx; - - if (success) { - - /* Save off this bdev_io so it can be freed after decryption. */ - orig_ctx->read_io = bdev_io; - - if (!_crypto_operation(orig_io, RTE_CRYPTO_CIPHER_OP_DECRYPT, NULL)) { - return; - } else { - SPDK_ERRLOG("Failed to decrypt!\n"); - } - } else { - SPDK_ERRLOG("Failed to read prior to decrypting!\n"); - } - - spdk_bdev_io_complete(orig_io, SPDK_BDEV_IO_STATUS_FAILED); - spdk_bdev_free_io(bdev_io); -} - -static void -vbdev_crypto_resubmit_io(void *arg) -{ - struct spdk_bdev_io *bdev_io = (struct spdk_bdev_io *)arg; - struct crypto_bdev_io *io_ctx = (struct crypto_bdev_io *)bdev_io->driver_ctx; - - vbdev_crypto_submit_request(io_ctx->ch, bdev_io); -} - -static void -vbdev_crypto_queue_io(struct spdk_bdev_io *bdev_io) -{ - struct crypto_bdev_io *io_ctx = (struct crypto_bdev_io *)bdev_io->driver_ctx; - int rc; - - io_ctx->bdev_io_wait.bdev = bdev_io->bdev; - io_ctx->bdev_io_wait.cb_fn = vbdev_crypto_resubmit_io; - io_ctx->bdev_io_wait.cb_arg = bdev_io; - - rc = spdk_bdev_queue_io_wait(bdev_io->bdev, io_ctx->crypto_ch->base_ch, &io_ctx->bdev_io_wait); - if (rc != 0) { - SPDK_ERRLOG("Queue io failed in vbdev_crypto_queue_io, rc=%d.\n", rc); - spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); - } -} - -/* Callback for getting a buf from the bdev pool in the event that the caller passed - * in NULL, we need to own the buffer so it doesn't get freed by another vbdev module - * beneath us before we're done with it. - */ -static void -crypto_read_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, - bool success) -{ - struct vbdev_crypto *crypto_bdev = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_crypto, - crypto_bdev); - struct crypto_io_channel *crypto_ch = spdk_io_channel_get_ctx(ch); - struct crypto_bdev_io *io_ctx = (struct crypto_bdev_io *)bdev_io->driver_ctx; - int rc; - - if (!success) { - spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); - return; - } - - rc = spdk_bdev_readv_blocks(crypto_bdev->base_desc, crypto_ch->base_ch, bdev_io->u.bdev.iovs, - bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.offset_blocks, - bdev_io->u.bdev.num_blocks, _complete_internal_read, - bdev_io); - if (rc != 0) { - if (rc == -ENOMEM) { - SPDK_DEBUGLOG(vbdev_crypto, "No memory, queue the IO.\n"); - io_ctx->ch = ch; - vbdev_crypto_queue_io(bdev_io); - } else { - SPDK_ERRLOG("Failed to submit bdev_io!\n"); - spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); - } - } -} - -/* For encryption we don't want to encrypt the data in place as the host isn't - * expecting us to mangle its data buffers so we need to encrypt into the bdev - * aux buffer, then we can use that as the source for the disk data transfer. - */ -static void -crypto_write_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, - void *aux_buf) -{ - struct crypto_bdev_io *io_ctx = (struct crypto_bdev_io *)bdev_io->driver_ctx; - int rc = 0; - - rc = _crypto_operation(bdev_io, RTE_CRYPTO_CIPHER_OP_ENCRYPT, aux_buf); - if (rc != 0) { - spdk_bdev_io_put_aux_buf(bdev_io, aux_buf); - if (rc == -ENOMEM) { - SPDK_DEBUGLOG(vbdev_crypto, "No memory, queue the IO.\n"); - io_ctx->ch = ch; - vbdev_crypto_queue_io(bdev_io); - } else { - SPDK_ERRLOG("Failed to submit bdev_io!\n"); - spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); - } - } -} - -/* Called when someone submits IO to this crypto vbdev. For IO's not relevant to crypto, - * we're simply passing it on here via SPDK IO calls which in turn allocate another bdev IO - * and call our cpl callback provided below along with the original bdev_io so that we can - * complete it once this IO completes. For crypto operations, we'll either encrypt it first - * (writes) then call back into bdev to submit it or we'll submit a read and then catch it - * on the way back for decryption. - */ -static void -vbdev_crypto_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) -{ - struct vbdev_crypto *crypto_bdev = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_crypto, - crypto_bdev); - struct crypto_io_channel *crypto_ch = spdk_io_channel_get_ctx(ch); - struct crypto_bdev_io *io_ctx = (struct crypto_bdev_io *)bdev_io->driver_ctx; - int rc = 0; - - memset(io_ctx, 0, sizeof(struct crypto_bdev_io)); - io_ctx->crypto_bdev = crypto_bdev; - io_ctx->crypto_ch = crypto_ch; - io_ctx->orig_io = bdev_io; - io_ctx->bdev_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; - - switch (bdev_io->type) { - case SPDK_BDEV_IO_TYPE_READ: - spdk_bdev_io_get_buf(bdev_io, crypto_read_get_buf_cb, - bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); - break; - case SPDK_BDEV_IO_TYPE_WRITE: - /* Tell the bdev layer that we need an aux buf in addition to the data - * buf already associated with the bdev. - */ - spdk_bdev_io_get_aux_buf(bdev_io, crypto_write_get_buf_cb); - break; - case SPDK_BDEV_IO_TYPE_UNMAP: - rc = spdk_bdev_unmap_blocks(crypto_bdev->base_desc, crypto_ch->base_ch, - bdev_io->u.bdev.offset_blocks, - bdev_io->u.bdev.num_blocks, - _complete_internal_io, bdev_io); - break; - case SPDK_BDEV_IO_TYPE_FLUSH: - rc = spdk_bdev_flush_blocks(crypto_bdev->base_desc, crypto_ch->base_ch, - bdev_io->u.bdev.offset_blocks, - bdev_io->u.bdev.num_blocks, - _complete_internal_io, bdev_io); - break; - case SPDK_BDEV_IO_TYPE_RESET: - rc = spdk_bdev_reset(crypto_bdev->base_desc, crypto_ch->base_ch, - _complete_internal_io, bdev_io); - break; - case SPDK_BDEV_IO_TYPE_WRITE_ZEROES: - default: - SPDK_ERRLOG("crypto: unknown I/O type %d\n", bdev_io->type); - spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); - return; - } - - if (rc != 0) { - if (rc == -ENOMEM) { - SPDK_DEBUGLOG(vbdev_crypto, "No memory, queue the IO.\n"); - io_ctx->ch = ch; - vbdev_crypto_queue_io(bdev_io); - } else { - SPDK_ERRLOG("Failed to submit bdev_io!\n"); - spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); - } - } -} - -/* We'll just call the base bdev and let it answer except for WZ command which - * we always say we don't support so that the bdev layer will actually send us - * real writes that we can encrypt. - */ -static bool -vbdev_crypto_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type) -{ - struct vbdev_crypto *crypto_bdev = (struct vbdev_crypto *)ctx; - - switch (io_type) { - case SPDK_BDEV_IO_TYPE_WRITE: - case SPDK_BDEV_IO_TYPE_UNMAP: - case SPDK_BDEV_IO_TYPE_RESET: - case SPDK_BDEV_IO_TYPE_READ: - case SPDK_BDEV_IO_TYPE_FLUSH: - return spdk_bdev_io_type_supported(crypto_bdev->base_bdev, io_type); - case SPDK_BDEV_IO_TYPE_WRITE_ZEROES: - /* Force the bdev layer to issue actual writes of zeroes so we can - * encrypt them as regular writes. - */ - default: - return false; - } -} - -static struct vbdev_dev * -_vdev_dev_get(struct vbdev_crypto *vbdev) -{ - struct vbdev_dev *device; - - TAILQ_FOREACH(device, &g_vbdev_devs, link) { - if (strcmp(device->cdev_info.driver_name, vbdev->opts->drv_name) == 0) { - return device; - } - } - return NULL; -} - -static void -_cryptodev_sym_session_free(struct vbdev_crypto *vbdev, void *session) -{ -#if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0) - struct vbdev_dev *device = _vdev_dev_get(vbdev); - - assert(device != NULL); - - rte_cryptodev_sym_session_free(device->cdev_id, session); -#else - rte_cryptodev_sym_session_free(session); -#endif -} - -static void * -_cryptodev_sym_session_create(struct vbdev_crypto *vbdev, struct rte_crypto_sym_xform *xforms) -{ - void *session; - struct vbdev_dev *device; - - device = _vdev_dev_get(vbdev); - if (!device) { - SPDK_ERRLOG("Failed to match crypto device driver to crypto vbdev.\n"); - return NULL; - } - -#if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0) - session = rte_cryptodev_sym_session_create(device->cdev_id, xforms, g_session_mp); -#else - session = rte_cryptodev_sym_session_create(g_session_mp); - if (!session) { - return NULL; - } - - if (rte_cryptodev_sym_session_init(device->cdev_id, session, xforms, g_session_mp_priv) < 0) { - _cryptodev_sym_session_free(vbdev, session); - return NULL; - } -#endif - - return session; -} - -/* Callback for unregistering the IO device. */ -static void -_device_unregister_cb(void *io_device) -{ - struct vbdev_crypto *crypto_bdev = io_device; - - /* Done with this crypto_bdev. */ - _cryptodev_sym_session_free(crypto_bdev, crypto_bdev->session_decrypt); - _cryptodev_sym_session_free(crypto_bdev, crypto_bdev->session_encrypt); - crypto_bdev->opts = NULL; - free(crypto_bdev->crypto_bdev.name); - free(crypto_bdev); -} - -/* Wrapper for the bdev close operation. */ -static void -_vbdev_crypto_destruct(void *ctx) -{ - struct spdk_bdev_desc *desc = ctx; - - spdk_bdev_close(desc); -} - -/* Called after we've unregistered following a hot remove callback. - * Our finish entry point will be called next. - */ -static int -vbdev_crypto_destruct(void *ctx) -{ - struct vbdev_crypto *crypto_bdev = (struct vbdev_crypto *)ctx; - - /* Remove this device from the internal list */ - TAILQ_REMOVE(&g_vbdev_crypto, crypto_bdev, link); - - /* Unclaim the underlying bdev. */ - spdk_bdev_module_release_bdev(crypto_bdev->base_bdev); - - /* Close the underlying bdev on its same opened thread. */ - if (crypto_bdev->thread && crypto_bdev->thread != spdk_get_thread()) { - spdk_thread_send_msg(crypto_bdev->thread, _vbdev_crypto_destruct, crypto_bdev->base_desc); - } else { - spdk_bdev_close(crypto_bdev->base_desc); - } - - /* Unregister the io_device. */ - spdk_io_device_unregister(crypto_bdev, _device_unregister_cb); - - g_number_of_claimed_volumes--; - - return 0; -} - -/* We supplied this as an entry point for upper layers who want to communicate to this - * bdev. This is how they get a channel. We are passed the same context we provided when - * we created our crypto vbdev in examine() which, for this bdev, is the address of one of - * our context nodes. From here we'll ask the SPDK channel code to fill out our channel - * struct and we'll keep it in our crypto node. - */ -static struct spdk_io_channel * -vbdev_crypto_get_io_channel(void *ctx) -{ - struct vbdev_crypto *crypto_bdev = (struct vbdev_crypto *)ctx; - - /* The IO channel code will allocate a channel for us which consists of - * the SPDK channel structure plus the size of our crypto_io_channel struct - * that we passed in when we registered our IO device. It will then call - * our channel create callback to populate any elements that we need to - * update. - */ - return spdk_get_io_channel(crypto_bdev); -} - -/* This is the output for bdev_get_bdevs() for this vbdev */ -static int -vbdev_crypto_dump_info_json(void *ctx, struct spdk_json_write_ctx *w) -{ - struct vbdev_crypto *crypto_bdev = (struct vbdev_crypto *)ctx; - char *hexkey = NULL, *hexkey2 = NULL; - int rc = 0; - - hexkey = spdk_hexlify(crypto_bdev->opts->key, - crypto_bdev->opts->key_size); - if (!hexkey) { - return -ENOMEM; - } - - if (crypto_bdev->opts->key2) { - hexkey2 = spdk_hexlify(crypto_bdev->opts->key2, - crypto_bdev->opts->key2_size); - if (!hexkey2) { - rc = -ENOMEM; - goto out_err; - } - } - - spdk_json_write_name(w, "crypto"); - spdk_json_write_object_begin(w); - spdk_json_write_named_string(w, "base_bdev_name", spdk_bdev_get_name(crypto_bdev->base_bdev)); - spdk_json_write_named_string(w, "name", spdk_bdev_get_name(&crypto_bdev->crypto_bdev)); - spdk_json_write_named_string(w, "crypto_pmd", crypto_bdev->opts->drv_name); - spdk_json_write_named_string(w, "key", hexkey); - if (hexkey2) { - spdk_json_write_named_string(w, "key2", hexkey2); - } - spdk_json_write_named_string(w, "cipher", crypto_bdev->opts->cipher); - spdk_json_write_object_end(w); -out_err: - if (hexkey) { - memset(hexkey, 0, strlen(hexkey)); - free(hexkey); - } - if (hexkey2) { - memset(hexkey2, 0, strlen(hexkey2)); - free(hexkey2); - } - return rc; -} - -static int -vbdev_crypto_config_json(struct spdk_json_write_ctx *w) -{ - struct vbdev_crypto *crypto_bdev; - - TAILQ_FOREACH(crypto_bdev, &g_vbdev_crypto, link) { - char *hexkey = NULL, *hexkey2 = NULL; - - hexkey = spdk_hexlify(crypto_bdev->opts->key, - crypto_bdev->opts->key_size); - if (!hexkey) { - return -ENOMEM; - } - - if (crypto_bdev->opts->key2) { - hexkey2 = spdk_hexlify(crypto_bdev->opts->key2, - crypto_bdev->opts->key2_size); - if (!hexkey2) { - memset(hexkey, 0, strlen(hexkey)); - free(hexkey); - return -ENOMEM; - } - } - - spdk_json_write_object_begin(w); - spdk_json_write_named_string(w, "method", "bdev_crypto_create"); - spdk_json_write_named_object_begin(w, "params"); - spdk_json_write_named_string(w, "base_bdev_name", spdk_bdev_get_name(crypto_bdev->base_bdev)); - spdk_json_write_named_string(w, "name", spdk_bdev_get_name(&crypto_bdev->crypto_bdev)); - spdk_json_write_named_string(w, "crypto_pmd", crypto_bdev->opts->drv_name); - spdk_json_write_named_string(w, "key", hexkey); - if (hexkey2) { - spdk_json_write_named_string(w, "key2", hexkey2); - } - spdk_json_write_named_string(w, "cipher", crypto_bdev->opts->cipher); - spdk_json_write_object_end(w); - spdk_json_write_object_end(w); - - if (hexkey) { - memset(hexkey, 0, strlen(hexkey)); - free(hexkey); - } - if (hexkey2) { - memset(hexkey2, 0, strlen(hexkey2)); - free(hexkey2); - } - } - return 0; -} - -/* Helper function for the channel creation callback. */ -static void -_assign_device_qp(struct vbdev_crypto *crypto_bdev, struct device_qp *device_qp, - struct crypto_io_channel *crypto_ch) -{ - pthread_mutex_lock(&g_device_qp_lock); - if (strcmp(crypto_bdev->opts->drv_name, QAT) == 0) { - /* For some QAT devices, the optimal qp to use is every 32nd as this spreads the - * workload out over the multiple virtual functions in the device. For the devices - * where this isn't the case, it doesn't hurt. - */ - TAILQ_FOREACH(device_qp, &g_device_qp_qat, link) { - if (device_qp->index != g_next_qat_index) { - continue; - } - if (device_qp->in_use == false) { - crypto_ch->device_qp = device_qp; - device_qp->in_use = true; - g_next_qat_index = (g_next_qat_index + QAT_VF_SPREAD) % g_qat_total_qp; - break; - } else { - /* if the preferred index is used, skip to the next one in this set. */ - g_next_qat_index = (g_next_qat_index + 1) % g_qat_total_qp; - } - } - } else if (strcmp(crypto_bdev->opts->drv_name, AESNI_MB) == 0) { - TAILQ_FOREACH(device_qp, &g_device_qp_aesni_mb, link) { - if (device_qp->in_use == false) { - crypto_ch->device_qp = device_qp; - device_qp->in_use = true; - break; - } - } - } else if (strcmp(crypto_bdev->opts->drv_name, MLX5) == 0) { - TAILQ_FOREACH(device_qp, &g_device_qp_mlx5, link) { - if (device_qp->in_use == false) { - crypto_ch->device_qp = device_qp; - device_qp->in_use = true; - break; - } - } - } - pthread_mutex_unlock(&g_device_qp_lock); -} - -/* We provide this callback for the SPDK channel code to create a channel using - * the channel struct we provided in our module get_io_channel() entry point. Here - * we get and save off an underlying base channel of the device below us so that - * we can communicate with the base bdev on a per channel basis. We also register the - * poller used to complete crypto operations from the device. - */ -static int -crypto_bdev_ch_create_cb(void *io_device, void *ctx_buf) -{ - struct crypto_io_channel *crypto_ch = ctx_buf; - struct vbdev_crypto *crypto_bdev = io_device; - struct device_qp *device_qp = NULL; - - crypto_ch->base_ch = spdk_bdev_get_io_channel(crypto_bdev->base_desc); - crypto_ch->poller = SPDK_POLLER_REGISTER(crypto_dev_poller, crypto_ch, 0); - crypto_ch->device_qp = NULL; - - /* Assign a device/qp combination that is unique per channel per PMD. */ - _assign_device_qp(crypto_bdev, device_qp, crypto_ch); - assert(crypto_ch->device_qp); - - /* We use this queue to track outstanding IO in our layer. */ - TAILQ_INIT(&crypto_ch->pending_cry_ios); - - /* We use this to queue up crypto ops when the device is busy. */ - TAILQ_INIT(&crypto_ch->queued_cry_ops); - - return 0; -} - -/* We provide this callback for the SPDK channel code to destroy a channel - * created with our create callback. We just need to undo anything we did - * when we created. - */ -static void -crypto_bdev_ch_destroy_cb(void *io_device, void *ctx_buf) -{ - struct crypto_io_channel *crypto_ch = ctx_buf; - - pthread_mutex_lock(&g_device_qp_lock); - crypto_ch->device_qp->in_use = false; - pthread_mutex_unlock(&g_device_qp_lock); - - spdk_poller_unregister(&crypto_ch->poller); - spdk_put_io_channel(crypto_ch->base_ch); -} - -/* Create the association from the bdev and vbdev name and insert - * on the global list. */ -static int -vbdev_crypto_insert_name(struct vbdev_crypto_opts *opts, struct bdev_names **out) -{ - struct bdev_names *name; - bool found = false; - int j; - - assert(opts); - assert(out); - - TAILQ_FOREACH(name, &g_bdev_names, link) { - if (strcmp(opts->vbdev_name, name->opts->vbdev_name) == 0) { - SPDK_ERRLOG("Crypto bdev %s already exists\n", opts->vbdev_name); - return -EEXIST; - } - } - - for (j = 0; j < MAX_NUM_DRV_TYPES ; j++) { - if (strcmp(opts->drv_name, g_driver_names[j]) == 0) { - found = true; - break; - } - } - if (!found) { - SPDK_ERRLOG("Crypto PMD type %s is not supported.\n", opts->drv_name); - return -EINVAL; - } - - name = calloc(1, sizeof(struct bdev_names)); - if (!name) { - SPDK_ERRLOG("Failed to allocate memory for bdev_names.\n"); - return -ENOMEM; - } - - name->opts = opts; - TAILQ_INSERT_TAIL(&g_bdev_names, name, link); - *out = name; - - return 0; -} - -void -free_crypto_opts(struct vbdev_crypto_opts *opts) -{ - free(opts->bdev_name); - free(opts->vbdev_name); - free(opts->drv_name); - if (opts->xts_key) { - memset(opts->xts_key, 0, - opts->key_size + opts->key2_size); - free(opts->xts_key); - } - memset(opts->key, 0, opts->key_size); - free(opts->key); - opts->key_size = 0; - if (opts->key2) { - memset(opts->key2, 0, opts->key2_size); - free(opts->key2); - } - opts->key2_size = 0; - free(opts); -} - -static void -vbdev_crypto_delete_name(struct bdev_names *name) -{ - TAILQ_REMOVE(&g_bdev_names, name, link); - if (name->opts) { - free_crypto_opts(name->opts); - name->opts = NULL; - } - free(name); -} - -/* RPC entry point for crypto creation. */ -int -create_crypto_disk(struct vbdev_crypto_opts *opts) -{ - struct bdev_names *name = NULL; - int rc; - - rc = vbdev_crypto_insert_name(opts, &name); - if (rc) { - return rc; - } - - rc = vbdev_crypto_claim(opts->bdev_name); - if (rc == -ENODEV) { - SPDK_NOTICELOG("vbdev creation deferred pending base bdev arrival\n"); - rc = 0; - } - - if (rc) { - assert(name != NULL); - /* In case of error we let the caller function to deallocate @opts - * since it is its responsibiltiy. Setting name->opts = NULL let's - * vbdev_crypto_delete_name() know it does not have to do anything - * about @opts. - */ - name->opts = NULL; - vbdev_crypto_delete_name(name); - } - return rc; -} - -/* Called at driver init time, parses config file to prepare for examine calls, - * also fully initializes the crypto drivers. - */ -static int -vbdev_crypto_init(void) -{ - int rc = 0; - - /* Fully configure both SW and HW drivers. */ - rc = vbdev_crypto_init_crypto_drivers(); - if (rc) { - SPDK_ERRLOG("Error setting up crypto devices\n"); - } - - return rc; -} - -/* Called when the entire module is being torn down. */ -static void -vbdev_crypto_finish(void) -{ - struct bdev_names *name; - struct vbdev_dev *device; - - while ((name = TAILQ_FIRST(&g_bdev_names))) { - vbdev_crypto_delete_name(name); - } - - while ((device = TAILQ_FIRST(&g_vbdev_devs))) { - TAILQ_REMOVE(&g_vbdev_devs, device, link); - release_vbdev_dev(device); - } - rte_vdev_uninit(AESNI_MB); - - /* These are removed in release_vbdev_dev() */ - assert(TAILQ_EMPTY(&g_device_qp_qat)); - assert(TAILQ_EMPTY(&g_device_qp_aesni_mb)); - assert(TAILQ_EMPTY(&g_device_qp_mlx5)); + rte_vdev_uninit(ACCEL_DPDK_CRYPTODEV_AESNI_MB); rte_mempool_free(g_crypto_op_mp); rte_mempool_free(g_mbuf_mp); @@ -1840,289 +1202,295 @@ vbdev_crypto_finish(void) if (g_session_mp_priv != NULL) { rte_mempool_free(g_session_mp_priv); } + + spdk_accel_module_finish(); } -/* During init we'll be asked how much memory we'd like passed to us - * in bev_io structures as context. Here's where we specify how - * much context we want per IO. - */ -static int -vbdev_crypto_get_ctx_size(void) -{ - return sizeof(struct crypto_bdev_io); -} - +/* Called when the entire module is being torn down. */ static void -vbdev_crypto_base_bdev_hotremove_cb(struct spdk_bdev *bdev_find) +accel_dpdk_cryptodev_fini(void *ctx) { - struct vbdev_crypto *crypto_bdev, *tmp; - - TAILQ_FOREACH_SAFE(crypto_bdev, &g_vbdev_crypto, link, tmp) { - if (bdev_find == crypto_bdev->base_bdev) { - spdk_bdev_unregister(&crypto_bdev->crypto_bdev, NULL, NULL); - } - } + spdk_io_device_unregister(&g_accel_dpdk_cryptodev_module, accel_dpdk_cryptodev_fini_cb); } -/* Called when the underlying base bdev triggers asynchronous event such as bdev removal. */ -static void -vbdev_crypto_base_bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, - void *event_ctx) -{ - switch (type) { - case SPDK_BDEV_EVENT_REMOVE: - vbdev_crypto_base_bdev_hotremove_cb(bdev); - break; - default: - SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type); - break; - } -} - -static void -vbdev_crypto_write_config_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w) -{ - /* No config per bdev needed */ -} - -/* When we register our bdev this is how we specify our entry points. */ -static const struct spdk_bdev_fn_table vbdev_crypto_fn_table = { - .destruct = vbdev_crypto_destruct, - .submit_request = vbdev_crypto_submit_request, - .io_type_supported = vbdev_crypto_io_type_supported, - .get_io_channel = vbdev_crypto_get_io_channel, - .dump_info_json = vbdev_crypto_dump_info_json, - .write_config_json = vbdev_crypto_write_config_json -}; - -static struct spdk_bdev_module crypto_if = { - .name = "crypto", - .module_init = vbdev_crypto_init, - .get_ctx_size = vbdev_crypto_get_ctx_size, - .examine_config = vbdev_crypto_examine, - .module_fini = vbdev_crypto_finish, - .config_json = vbdev_crypto_config_json -}; - -SPDK_BDEV_MODULE_REGISTER(crypto, &crypto_if) - static int -vbdev_crypto_claim(const char *bdev_name) +accel_dpdk_cryptodev_key_handle_configure(struct spdk_accel_crypto_key *key, + struct accel_dpdk_cryptodev_key_handle *key_handle) { - struct bdev_names *name; - struct vbdev_crypto *vbdev; - struct spdk_bdev *bdev; - uint8_t key_size; - int rc = 0; - - if (g_number_of_claimed_volumes >= MAX_CRYPTO_VOLUMES) { - SPDK_DEBUGLOG(vbdev_crypto, "Reached max number of claimed volumes\n"); - return -EINVAL; - } - g_number_of_claimed_volumes++; - - /* Check our list of names from config versus this bdev and if - * there's a match, create the crypto_bdev & bdev accordingly. - */ - TAILQ_FOREACH(name, &g_bdev_names, link) { - if (strcmp(name->opts->bdev_name, bdev_name) != 0) { - continue; - } - SPDK_DEBUGLOG(vbdev_crypto, "Match on %s\n", bdev_name); - - vbdev = calloc(1, sizeof(struct vbdev_crypto)); - if (!vbdev) { - SPDK_ERRLOG("Failed to allocate memory for crypto_bdev.\n"); - rc = -ENOMEM; - goto error_vbdev_alloc; - } - vbdev->crypto_bdev.product_name = "crypto"; - - vbdev->crypto_bdev.name = strdup(name->opts->vbdev_name); - if (!vbdev->crypto_bdev.name) { - SPDK_ERRLOG("Failed to allocate memory for crypto_bdev name.\n"); - rc = -ENOMEM; - goto error_bdev_name; - } - - rc = spdk_bdev_open_ext(bdev_name, true, vbdev_crypto_base_bdev_event_cb, - NULL, &vbdev->base_desc); - if (rc) { - if (rc != -ENODEV) { - SPDK_ERRLOG("Failed to open bdev %s: error %d\n", bdev_name, rc); - } - goto error_open; - } - - bdev = spdk_bdev_desc_get_bdev(vbdev->base_desc); - vbdev->base_bdev = bdev; - - if (strcmp(name->opts->drv_name, MLX5) == 0) { - vbdev->qp_desc_nr = CRYPTO_QP_DESCRIPTORS_MLX5; - } else { - vbdev->qp_desc_nr = CRYPTO_QP_DESCRIPTORS; - } - - vbdev->crypto_bdev.write_cache = bdev->write_cache; - if (strcmp(name->opts->drv_name, QAT) == 0) { - vbdev->crypto_bdev.required_alignment = - spdk_max(spdk_u32log2(bdev->blocklen), bdev->required_alignment); - SPDK_NOTICELOG("QAT in use: Required alignment set to %u\n", - vbdev->crypto_bdev.required_alignment); - SPDK_NOTICELOG("QAT using cipher: %s\n", name->opts->cipher); - } else if (strcmp(name->opts->drv_name, MLX5) == 0) { - vbdev->crypto_bdev.required_alignment = bdev->required_alignment; - SPDK_NOTICELOG("MLX5 using cipher: %s\n", name->opts->cipher); - } else { - vbdev->crypto_bdev.required_alignment = bdev->required_alignment; - SPDK_NOTICELOG("AESNI_MB using cipher: %s\n", name->opts->cipher); - } - vbdev->cipher_xform.cipher.iv.length = IV_LENGTH; - - /* Note: CRYPTO_MAX_IO is in units of bytes, optimal_io_boundary is - * in units of blocks. - */ - if (bdev->optimal_io_boundary > 0) { - vbdev->crypto_bdev.optimal_io_boundary = - spdk_min((CRYPTO_MAX_IO / bdev->blocklen), bdev->optimal_io_boundary); - } else { - vbdev->crypto_bdev.optimal_io_boundary = (CRYPTO_MAX_IO / bdev->blocklen); - } - vbdev->crypto_bdev.split_on_optimal_io_boundary = true; - vbdev->crypto_bdev.blocklen = bdev->blocklen; - vbdev->crypto_bdev.blockcnt = bdev->blockcnt; - - /* This is the context that is passed to us when the bdev - * layer calls in so we'll save our crypto_bdev node here. - */ - vbdev->crypto_bdev.ctxt = vbdev; - vbdev->crypto_bdev.fn_table = &vbdev_crypto_fn_table; - vbdev->crypto_bdev.module = &crypto_if; - - /* Assign crypto opts from the name. The pointer is valid up to the point - * the module is unloaded and all names removed from the list. */ - vbdev->opts = name->opts; - - TAILQ_INSERT_TAIL(&g_vbdev_crypto, vbdev, link); - - spdk_io_device_register(vbdev, crypto_bdev_ch_create_cb, crypto_bdev_ch_destroy_cb, - sizeof(struct crypto_io_channel), vbdev->crypto_bdev.name); - - /* Save the thread where the base device is opened */ - vbdev->thread = spdk_get_thread(); - - rc = spdk_bdev_module_claim_bdev(bdev, vbdev->base_desc, vbdev->crypto_bdev.module); - if (rc) { - SPDK_ERRLOG("Failed to claim bdev %s\n", spdk_bdev_get_name(bdev)); - goto error_claim; - } - - /* Init our per vbdev xform with the desired cipher options. */ - vbdev->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; - vbdev->cipher_xform.cipher.iv.offset = IV_OFFSET; - if (strcmp(vbdev->opts->cipher, AES_CBC) == 0) { - vbdev->cipher_xform.cipher.key.data = vbdev->opts->key; - vbdev->cipher_xform.cipher.key.length = vbdev->opts->key_size; - vbdev->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC; - } else if (strcmp(vbdev->opts->cipher, AES_XTS) == 0) { - key_size = vbdev->opts->key_size + vbdev->opts->key2_size; - vbdev->cipher_xform.cipher.key.data = vbdev->opts->xts_key; - vbdev->cipher_xform.cipher.key.length = key_size; - vbdev->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_XTS; - } else { - SPDK_ERRLOG("Invalid cipher name %s.\n", vbdev->opts->cipher); - rc = -EINVAL; - goto error_session_de_create; - } - vbdev->cipher_xform.cipher.iv.length = IV_LENGTH; - - vbdev->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT; - vbdev->session_encrypt = _cryptodev_sym_session_create(vbdev, &vbdev->cipher_xform); - if (NULL == vbdev->session_encrypt) { - SPDK_ERRLOG("Failed to create encrypt crypto session.\n"); - rc = -EINVAL; - goto error_session_en_create; - } - - vbdev->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT; - vbdev->session_decrypt = _cryptodev_sym_session_create(vbdev, &vbdev->cipher_xform); - if (NULL == vbdev->session_decrypt) { - SPDK_ERRLOG("Failed to create decrypt crypto session.\n"); - rc = -EINVAL; - goto error_session_de_create; - } - - rc = spdk_bdev_register(&vbdev->crypto_bdev); - if (rc < 0) { - SPDK_ERRLOG("Failed to register vbdev: error %d\n", rc); - rc = -EINVAL; - goto error_bdev_register; - } - SPDK_DEBUGLOG(vbdev_crypto, "Registered io_device and virtual bdev for: %s\n", - vbdev->opts->vbdev_name); - break; - } - - return rc; - - /* Error cleanup paths. */ -error_bdev_register: - _cryptodev_sym_session_free(vbdev, vbdev->session_decrypt); -error_session_de_create: - _cryptodev_sym_session_free(vbdev, vbdev->session_encrypt); -error_session_en_create: - spdk_bdev_module_release_bdev(vbdev->base_bdev); -error_claim: - TAILQ_REMOVE(&g_vbdev_crypto, vbdev, link); - spdk_io_device_unregister(vbdev, NULL); - spdk_bdev_close(vbdev->base_desc); -error_open: - free(vbdev->crypto_bdev.name); -error_bdev_name: - free(vbdev); -error_vbdev_alloc: - g_number_of_claimed_volumes--; - return rc; -} - -/* RPC entry for deleting a crypto vbdev. */ -void -delete_crypto_disk(const char *bdev_name, spdk_delete_crypto_complete cb_fn, - void *cb_arg) -{ - struct bdev_names *name; + struct accel_dpdk_cryptodev_key_priv *priv = key->priv; int rc; - /* Some cleanup happens in the destruct callback. */ - rc = spdk_bdev_unregister_by_name(bdev_name, &crypto_if, cb_fn, cb_arg); - if (rc == 0) { - /* Remove the association (vbdev, bdev) from g_bdev_names. This is required so that the - * vbdev does not get re-created if the same bdev is constructed at some other time, - * unless the underlying bdev was hot-removed. - */ - TAILQ_FOREACH(name, &g_bdev_names, link) { - if (strcmp(name->opts->vbdev_name, bdev_name) == 0) { - vbdev_crypto_delete_name(name); - break; - } - } - } else { - cb_fn(cb_arg, rc); + key_handle->session_encrypt = rte_cryptodev_sym_session_create(g_session_mp); + if (!key_handle->session_encrypt) { + SPDK_ERRLOG("Failed to create encrypt crypto session.\n"); + return -EINVAL; } + key_handle->session_decrypt = rte_cryptodev_sym_session_create(g_session_mp); + if (!key_handle->session_decrypt) { + SPDK_ERRLOG("Failed to create decrypt crypto session.\n"); + rc = -EINVAL; + goto err_ses_encrypt; + } + key_handle->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; + key_handle->cipher_xform.cipher.iv.offset = ACCEL_DPDK_CRYPTODEV_IV_OFFSET; + key_handle->cipher_xform.cipher.iv.length = ACCEL_DPDK_CRYPTODEV_IV_LENGTH; + + switch (priv->cipher) { + case ACCEL_DPDK_CRYPTODEV_CIPHER_AES_CBC: + key_handle->cipher_xform.cipher.key.data = key->key; + key_handle->cipher_xform.cipher.key.length = key->key_size; + key_handle->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC; + break; + case ACCEL_DPDK_CRYPTODEV_CIPHER_AES_XTS: + key_handle->cipher_xform.cipher.key.data = priv->xts_key; + key_handle->cipher_xform.cipher.key.length = key->key_size + key->key2_size; + key_handle->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_XTS; + break; + default: + SPDK_ERRLOG("Invalid cipher name %s.\n", key->param.cipher); + rc = -EINVAL; + goto err_ses_decrypt; + } + + key_handle->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT; + rc = rte_cryptodev_sym_session_init(key_handle->device->cdev_id, key_handle->session_encrypt, + &key_handle->cipher_xform, + g_session_mp_priv ? g_session_mp_priv : g_session_mp); + if (rc < 0) { + SPDK_ERRLOG("Failed to init encrypt session: error %d\n", rc); + rc = -EINVAL; + goto err_ses_decrypt; + } + + key_handle->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT; + rc = rte_cryptodev_sym_session_init(key_handle->device->cdev_id, key_handle->session_decrypt, + &key_handle->cipher_xform, + g_session_mp_priv ? g_session_mp_priv : g_session_mp); + if (rc < 0) { + SPDK_ERRLOG("Failed to init decrypt session: error %d\n", rc); + rc = -EINVAL; + goto err_ses_decrypt; + } + + return 0; + +err_ses_decrypt: + rte_cryptodev_sym_session_free(key_handle->session_decrypt); +err_ses_encrypt: + rte_cryptodev_sym_session_free(key_handle->session_encrypt); + + return rc; } -/* Because we specified this function in our crypto bdev function table when we - * registered our crypto bdev, we'll get this call anytime a new bdev shows up. - * Here we need to decide if we care about it and if so what to do. We - * parsed the config file at init so we check the new bdev against the list - * we built up at that time and if the user configured us to attach to this - * bdev, here's where we do it. - */ -static void -vbdev_crypto_examine(struct spdk_bdev *bdev) +static int +accel_dpdk_cryptodev_validate_parameters(enum accel_dpdk_cryptodev_driver_type driver, + enum accel_dpdk_crypto_dev_cipher_type cipher, struct spdk_accel_crypto_key *key) { - vbdev_crypto_claim(spdk_bdev_get_name(bdev)); - spdk_bdev_module_examine_done(&crypto_if); + /* Check that all required parameters exist */ + switch (cipher) { + case ACCEL_DPDK_CRYPTODEV_CIPHER_AES_CBC: + if (!key->key || !key->key_size) { + SPDK_ERRLOG("ACCEL_DPDK_CRYPTODEV_AES_CBC requires a key\n"); + return -1; + } + if (key->key2 || key->key2_size) { + SPDK_ERRLOG("ACCEL_DPDK_CRYPTODEV_AES_CBC doesn't use key2\n"); + return -1; + } + break; + case ACCEL_DPDK_CRYPTODEV_CIPHER_AES_XTS: + if (!key->key || !key->key_size || !key->key2 || !key->key2_size) { + SPDK_ERRLOG("ACCEL_DPDK_CRYPTODEV_AES_XTS requires both key and key2\n"); + return -1; + } + break; + default: + return -1; + } + + /* Check driver/cipher combinations and key lengths */ + switch (cipher) { + case ACCEL_DPDK_CRYPTODEV_CIPHER_AES_CBC: + if (driver == ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI) { + SPDK_ERRLOG("Driver %s only supports cipher %s\n", + g_driver_names[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI], + g_cipher_names[ACCEL_DPDK_CRYPTODEV_CIPHER_AES_XTS]); + return -1; + } + if (key->key_size != ACCEL_DPDK_CRYPTODEV_AES_CBC_KEY_LENGTH) { + SPDK_ERRLOG("Invalid key size %zu for cipher %s, should be %d\n", key->key_size, + g_cipher_names[ACCEL_DPDK_CRYPTODEV_CIPHER_AES_CBC], ACCEL_DPDK_CRYPTODEV_AES_CBC_KEY_LENGTH); + return -1; + } + break; + case ACCEL_DPDK_CRYPTODEV_CIPHER_AES_XTS: + switch (driver) { + case ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI: + if (key->key_size != ACCEL_DPDK_CRYPTODEV_AES_XTS_256_BLOCK_KEY_LENGTH && + key->key_size != ACCEL_DPDK_CRYPTODEV_AES_XTS_512_BLOCK_KEY_LENGTH) { + SPDK_ERRLOG("Invalid key size %zu for driver %s, cipher %s, supported %d or %d\n", + key->key_size, g_driver_names[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI], + g_cipher_names[ACCEL_DPDK_CRYPTODEV_CIPHER_AES_XTS], + ACCEL_DPDK_CRYPTODEV_AES_XTS_256_BLOCK_KEY_LENGTH, + ACCEL_DPDK_CRYPTODEV_AES_XTS_512_BLOCK_KEY_LENGTH); + return -1; + } + break; + case ACCEL_DPDK_CRYPTODEV_DRIVER_QAT: + case ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB: + if (key->key_size != ACCEL_DPDK_CRYPTODEV_AES_XTS_128_BLOCK_KEY_LENGTH) { + SPDK_ERRLOG("Invalid key size %zu, supported %d\n", key->key_size, + ACCEL_DPDK_CRYPTODEV_AES_XTS_128_BLOCK_KEY_LENGTH); + return -1; + } + break; + default: + SPDK_ERRLOG("Incorrect driver type %d\n", driver); + assert(0); + return -1; + } + if (key->key2_size != ACCEL_DPDK_CRYPTODEV_AES_XTS_TWEAK_KEY_LENGTH) { + SPDK_ERRLOG("Cipher %s requires key2 size %d\n", + g_cipher_names[ACCEL_DPDK_CRYPTODEV_CIPHER_AES_CBC], ACCEL_DPDK_CRYPTODEV_AES_XTS_TWEAK_KEY_LENGTH); + return -1; + } + break; + } + + return 0; } -SPDK_LOG_REGISTER_COMPONENT(vbdev_crypto) +static void +accel_dpdk_cryptodev_key_deinit(struct spdk_accel_crypto_key *key) +{ + struct accel_dpdk_cryptodev_key_handle *key_handle, *key_handle_tmp; + struct accel_dpdk_cryptodev_key_priv *priv = key->priv; + + TAILQ_FOREACH_SAFE(key_handle, &priv->dev_keys, link, key_handle_tmp) { + rte_cryptodev_sym_session_free(key_handle->session_encrypt); + rte_cryptodev_sym_session_free(key_handle->session_decrypt); + TAILQ_REMOVE(&priv->dev_keys, key_handle, link); + spdk_memset_s(key_handle, sizeof(*key_handle), 0, sizeof(*key_handle)); + free(key_handle); + } + + if (priv->xts_key) { + spdk_memset_s(priv->xts_key, key->key_size + key->key2_size, 0, key->key_size + key->key2_size); + } + free(priv->xts_key); + free(priv); +} + +static int +accel_dpdk_cryptodev_key_init(struct spdk_accel_crypto_key *key) +{ + struct accel_dpdk_cryptodev_device *device; + struct accel_dpdk_cryptodev_key_priv *priv; + struct accel_dpdk_cryptodev_key_handle *key_handle; + enum accel_dpdk_cryptodev_driver_type driver; + enum accel_dpdk_crypto_dev_cipher_type cipher; + + if (!key->param.cipher) { + SPDK_ERRLOG("Cipher is missing\n"); + return -EINVAL; + } + + if (strcmp(key->param.cipher, ACCEL_DPDK_CRYPTODEV_AES_CBC) == 0) { + cipher = ACCEL_DPDK_CRYPTODEV_CIPHER_AES_CBC; + } else if (strcmp(key->param.cipher, ACCEL_DPDK_CRYPTODEV_AES_XTS) == 0) { + cipher = ACCEL_DPDK_CRYPTODEV_CIPHER_AES_XTS; + } else { + SPDK_ERRLOG("Unsupported cipher name %s.\n", key->param.cipher); + return -EINVAL; + } + + driver = g_dpdk_cryptodev_driver; + + if (accel_dpdk_cryptodev_validate_parameters(driver, cipher, key)) { + return -EINVAL; + } + + priv = calloc(1, sizeof(*priv)); + if (!priv) { + SPDK_ERRLOG("Memory allocation failed\n"); + return -ENOMEM; + } + key->priv = priv; + priv->driver = driver; + priv->cipher = cipher; + TAILQ_INIT(&priv->dev_keys); + + if (cipher == ACCEL_DPDK_CRYPTODEV_CIPHER_AES_XTS) { + /* DPDK expects the keys to be concatenated together. */ + priv->xts_key = calloc(key->key_size + key->key2_size + 1, sizeof(char)); + if (!priv->xts_key) { + SPDK_ERRLOG("Memory allocation failed\n"); + accel_dpdk_cryptodev_key_deinit(key); + return -ENOMEM; + } + memcpy(priv->xts_key, key->key, key->key_size); + memcpy(priv->xts_key + key->key_size, key->key2, key->key2_size); + } + + pthread_mutex_lock(&g_device_lock); + TAILQ_FOREACH(device, &g_crypto_devices, link) { + if (device->type != driver) { + continue; + } + key_handle = calloc(1, sizeof(*key_handle)); + if (!key_handle) { + pthread_mutex_unlock(&g_device_lock); + accel_dpdk_cryptodev_key_deinit(key); + return -ENOMEM; + } + key_handle->device = device; + TAILQ_INSERT_TAIL(&priv->dev_keys, key_handle, link); + if (accel_dpdk_cryptodev_key_handle_configure(key, key_handle)) { + pthread_mutex_unlock(&g_device_lock); + accel_dpdk_cryptodev_key_deinit(key); + return -ENOMEM; + } + if (driver != ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI) { + /* For MLX5_PCI we need to register a key on each device since + * the key is bound to a specific Protection Domain, + * so don't break the loop */ + break; + } + } + pthread_mutex_unlock(&g_device_lock); + + if (TAILQ_EMPTY(&priv->dev_keys)) { + free(priv); + return -ENODEV; + } + + return 0; +} + +static void +accel_dpdk_cryptodev_write_config_json(struct spdk_json_write_ctx *w) +{ + spdk_json_write_object_begin(w); + spdk_json_write_named_string(w, "method", "dpdk_cryptodev_scan_accel_module"); + spdk_json_write_object_end(w); + + spdk_json_write_object_begin(w); + spdk_json_write_named_string(w, "method", "dpdk_cryptodev_set_driver"); + spdk_json_write_named_object_begin(w, "params"); + spdk_json_write_named_string(w, "driver_name", g_driver_names[g_dpdk_cryptodev_driver]); + spdk_json_write_object_end(w); + spdk_json_write_object_end(w); +} + +static struct spdk_accel_module_if g_accel_dpdk_cryptodev_module = { + .module_init = accel_dpdk_cryptodev_init, + .module_fini = accel_dpdk_cryptodev_fini, + .write_config_json = accel_dpdk_cryptodev_write_config_json, + .get_ctx_size = accel_dpdk_cryptodev_ctx_size, + .name = "dpdk_cryptodev", + .supports_opcode = accel_dpdk_cryptodev_supports_opcode, + .get_io_channel = accel_dpdk_cryptodev_get_io_channel, + .submit_tasks = accel_dpdk_cryptodev_submit_tasks, + .crypto_key_init = accel_dpdk_cryptodev_key_init, + .crypto_key_deinit = accel_dpdk_cryptodev_key_deinit, +}; diff --git a/module/accel/dpdk_cryptodev/accel_dpdk_cryptodev.h b/module/accel/dpdk_cryptodev/accel_dpdk_cryptodev.h new file mode 100644 index 000000000..80ec42e8c --- /dev/null +++ b/module/accel/dpdk_cryptodev/accel_dpdk_cryptodev.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) Intel Corporation. + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. + * All rights reserved. + */ + +#ifndef SPDK_ACCEL_DPDK_CRYPTODEV_H +#define SPDK_ACCEL_DPDK_CRYPTODEV_H + +#include "spdk/stdinc.h" + +void accel_dpdk_cryptodev_enable(void); +int accel_dpdk_cryptodev_set_driver(const char *driver_name); +const char *accel_dpdk_cryptodev_get_driver(void); + +#endif /* SPDK_ACCEL_DPDK_CRYPTODEV_H */ diff --git a/module/accel/dpdk_cryptodev/accel_dpdk_cryptodev_rpc.c b/module/accel/dpdk_cryptodev/accel_dpdk_cryptodev_rpc.c new file mode 100644 index 000000000..f7d0bbde1 --- /dev/null +++ b/module/accel/dpdk_cryptodev/accel_dpdk_cryptodev_rpc.c @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) Intel Corporation. + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. + * All rights reserved. + */ + +#include "accel_dpdk_cryptodev.h" + +#include "spdk/rpc.h" +#include "spdk/util.h" + +static void +rpc_dpdk_cryptodev_scan_accel_module(struct spdk_jsonrpc_request *request, + const struct spdk_json_val *params) +{ + if (params != NULL) { + spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, + "No parameters expected"); + return; + } + + accel_dpdk_cryptodev_enable(); + spdk_jsonrpc_send_bool_response(request, true); +} +SPDK_RPC_REGISTER("dpdk_cryptodev_scan_accel_module", rpc_dpdk_cryptodev_scan_accel_module, + SPDK_RPC_STARTUP) + +struct rpc_set_driver { + char *driver_name; +}; + +static const struct spdk_json_object_decoder rpc_set_driver_decoders[] = { + {"driver_name", offsetof(struct rpc_set_driver, driver_name), spdk_json_decode_string}, +}; + +static void +rpc_dpdk_cryptodev_set_driver(struct spdk_jsonrpc_request *request, + const struct spdk_json_val *params) +{ + struct rpc_set_driver req = {}; + int rc; + + if (spdk_json_decode_object(params, rpc_set_driver_decoders, + SPDK_COUNTOF(rpc_set_driver_decoders), &req)) { + spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_PARSE_ERROR, + "spdk_json_decode_object failed"); + return; + } + + rc = accel_dpdk_cryptodev_set_driver(req.driver_name); + free(req.driver_name); + if (rc) { + spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, + "incorrect driver name"); + } else { + spdk_jsonrpc_send_bool_response(request, true); + } +} +SPDK_RPC_REGISTER("dpdk_cryptodev_set_driver", rpc_dpdk_cryptodev_set_driver, SPDK_RPC_STARTUP) + +static void +rpc_dpdk_cryptodev_get_driver(struct spdk_jsonrpc_request *request, + const struct spdk_json_val *params) +{ + struct spdk_json_write_ctx *w; + const char *driver_name; + + if (params) { + spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, + "No parameters expected"); + return; + } + + driver_name = accel_dpdk_cryptodev_get_driver(); + assert(driver_name); + + w = spdk_jsonrpc_begin_result(request); + spdk_json_write_string(w, driver_name); + spdk_jsonrpc_end_result(request, w); +} +SPDK_RPC_REGISTER("dpdk_cryptodev_get_driver", rpc_dpdk_cryptodev_get_driver, + SPDK_RPC_STARTUP | SPDK_RPC_RUNTIME) diff --git a/python/spdk/rpc/__init__.py b/python/spdk/rpc/__init__.py index 7b003ef5e..d64b9eeef 100644 --- a/python/spdk/rpc/__init__.py +++ b/python/spdk/rpc/__init__.py @@ -31,6 +31,7 @@ from . import vmd from . import sock from . import vfio_user from . import iobuf +from . import dpdk_cryptodev from . import client as rpc_client diff --git a/python/spdk/rpc/dpdk_cryptodev.py b/python/spdk/rpc/dpdk_cryptodev.py new file mode 100644 index 000000000..5f5f5dec9 --- /dev/null +++ b/python/spdk/rpc/dpdk_cryptodev.py @@ -0,0 +1,25 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. + +def dpdk_cryptodev_scan_accel_module(client): + """Enable dpdk_cryptodev accel module. + """ + return client.call('dpdk_cryptodev_scan_accel_module') + + +def dpdk_cryptodev_set_driver(client, driver_name): + """Set the DPDK cryptodev driver. + + Args: + driver_name: The driver, can be one of crypto_aesni_mb, crypto_qat or mlx5_pci + """ + params = {'driver_name': driver_name} + + return client.call('dpdk_cryptodev_set_driver', params) + + +def dpdk_cryptodev_get_driver(client): + """Get the DPDK cryptodev driver. + """ + return client.call('dpdk_cryptodev_get_driver') diff --git a/scripts/rpc.py b/scripts/rpc.py index 8ee6fd116..5de7cba08 100755 --- a/scripts/rpc.py +++ b/scripts/rpc.py @@ -2839,6 +2839,28 @@ Format: 'user:u1 secret:s1 muser:mu1 msecret:ms1,user:u2 secret:s2 muser:mu2 mse help='Set config and enable iaa accel module offload.') p.set_defaults(func=iaa_scan_accel_module) + def dpdk_cryptodev_scan_accel_module(args): + rpc.dpdk_cryptodev.dpdk_cryptodev_scan_accel_module(args.client) + + p = subparsers.add_parser('dpdk_cryptodev_scan_accel_module', + help='Enable dpdk_cryptodev accel module offload.') + p.set_defaults(func=dpdk_cryptodev_scan_accel_module) + + def dpdk_cryptodev_set_driver(args): + rpc.dpdk_cryptodev.dpdk_cryptodev_set_driver(args.client, + driver_name=args.driver_name) + + p = subparsers.add_parser('dpdk_cryptodev_set_driver', + help='Set the DPDK cryptodev driver.') + p.add_argument('-d', '--driver-name', help='The driver, can be one of crypto_aesni_mb, crypto_qat or mlx5_pci', type=str) + p.set_defaults(func=dpdk_cryptodev_set_driver) + + def dpdk_cryptodev_get_driver(args): + print_dict(rpc.dpdk_cryptodev.dpdk_cryptodev_get_driver(args.client)) + + p = subparsers.add_parser('dpdk_cryptodev_get_driver', help='Get the DPDK cryptodev driver') + p.set_defaults(func=dpdk_cryptodev_get_driver) + # opal def bdev_nvme_opal_init(args): rpc.nvme.bdev_nvme_opal_init(args.client, diff --git a/test/common/skipped_build_files.txt b/test/common/skipped_build_files.txt index 3ea3d8ad6..08842fad5 100644 --- a/test/common/skipped_build_files.txt +++ b/test/common/skipped_build_files.txt @@ -56,7 +56,3 @@ module/bdev/daos/bdev_daos_rpc # Not configured to test xNVMe bdev module/bdev/xnvme/bdev_xnvme module/bdev/xnvme/bdev_xnvme_rpc - -# Temporary added, will be remove in the next patch -module/accel/dpdk_cryptodev/accel_dpdk_cryptodev -test/unit/lib/accel/dpdk_cryptodev.c/accel_dpdk_cryptodev_ut diff --git a/test/unit/lib/accel/Makefile b/test/unit/lib/accel/Makefile index 5e259e42e..6b58b057f 100644 --- a/test/unit/lib/accel/Makefile +++ b/test/unit/lib/accel/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright (C) 2015 Intel Corporation. +# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. # All rights reserved. # @@ -7,6 +8,7 @@ SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..) include $(SPDK_ROOT_DIR)/mk/spdk.common.mk DIRS-y = accel.c +DIRS-$(CONFIG_CRYPTO) += dpdk_cryptodev.c .PHONY: all clean $(DIRS-y) diff --git a/test/unit/lib/accel/dpdk_cryptodev.c/Makefile b/test/unit/lib/accel/dpdk_cryptodev.c/Makefile new file mode 100644 index 000000000..2e59104f7 --- /dev/null +++ b/test/unit/lib/accel/dpdk_cryptodev.c/Makefile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# + +SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..) + +TEST_FILE = accel_dpdk_cryptodev_ut.c +CFLAGS += $(ENV_CFLAGS) + +include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk diff --git a/test/unit/lib/accel/dpdk_cryptodev.c/accel_dpdk_cryptodev_ut.c b/test/unit/lib/accel/dpdk_cryptodev.c/accel_dpdk_cryptodev_ut.c index aca77e64e..9509cbc7f 100644 --- a/test/unit/lib/accel/dpdk_cryptodev.c/accel_dpdk_cryptodev_ut.c +++ b/test/unit/lib/accel/dpdk_cryptodev.c/accel_dpdk_cryptodev_ut.c @@ -1,18 +1,18 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright (C) 2018 Intel Corporation. + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. * All rights reserved. */ #include "spdk_cunit.h" -#include "common/lib/test_env.c" #include "spdk_internal/mock.h" #include "thread/thread_internal.h" #include "unit/lib/json_mock.c" +#include "common/lib/ut_multithread.c" #include #include -#include #define MAX_TEST_BLOCKS 8192 struct rte_crypto_op *g_test_crypto_ops[MAX_TEST_BLOCKS]; @@ -199,14 +199,9 @@ mock_rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table, } #define rte_crypto_op_attach_sym_session mock_rte_crypto_op_attach_sym_session -#if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0) -static inline int -mock_rte_crypto_op_attach_sym_session(struct rte_crypto_op *op, void *sess) -#else static inline int mock_rte_crypto_op_attach_sym_session(struct rte_crypto_op *op, struct rte_cryptodev_sym_session *sess) -#endif { return ut_rte_crypto_op_attach_sym_session; } @@ -218,34 +213,12 @@ mock_rte_lcore_count(void) return 1; } -#include "bdev/crypto/vbdev_crypto.c" +#include "accel/dpdk_cryptodev/accel_dpdk_cryptodev.c" -/* SPDK stubs */ -DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch, - struct spdk_bdev_io_wait_entry *entry), 0); -DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module)); -DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *g_bdev_io)); -DEFINE_STUB_V(spdk_bdev_io_put_aux_buf, (struct spdk_bdev_io *bdev_io, void *aux_buf)); -DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev, - enum spdk_bdev_io_type io_type), 0); -DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev)); -DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc)); -DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), 0); -DEFINE_STUB(spdk_bdev_get_buf_align, size_t, (const struct spdk_bdev *bdev), 64); -DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc), 0); -DEFINE_STUB_V(spdk_bdev_unregister, (struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, - void *cb_arg)); -DEFINE_STUB(spdk_bdev_unregister_by_name, int, (const char *bdev_name, - struct spdk_bdev_module *module, - spdk_bdev_unregister_cb cb_fn, void *cb_arg), 0); -DEFINE_STUB(spdk_bdev_open_ext, int, (const char *bdev_name, bool write, - spdk_bdev_event_cb_t event_cb, - void *event_ctx, struct spdk_bdev_desc **_desc), 0); -DEFINE_STUB(spdk_bdev_desc_get_bdev, struct spdk_bdev *, (struct spdk_bdev_desc *desc), NULL); -DEFINE_STUB(spdk_bdev_module_claim_bdev, int, (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, - struct spdk_bdev_module *module), 0); -DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module)); -DEFINE_STUB(spdk_bdev_register, int, (struct spdk_bdev *vbdev), 0); +/* accel stubs */ +DEFINE_STUB_V(spdk_accel_task_complete, (struct spdk_accel_task *task, int status)); +DEFINE_STUB_V(spdk_accel_module_finish, (void)); +DEFINE_STUB_V(spdk_accel_module_list_add, (struct spdk_accel_module_if *accel_module)); /* DPDK stubs */ #define DPDK_DYNFIELD_OFFSET offsetof(struct rte_mbuf, dynfield1[1]) @@ -260,33 +233,25 @@ DEFINE_STUB(rte_cryptodev_queue_pair_setup, int, (uint8_t dev_id, uint16_t queue DEFINE_STUB(rte_cryptodev_start, int, (uint8_t dev_id), 0); DEFINE_STUB_V(rte_cryptodev_stop, (uint8_t dev_id)); DEFINE_STUB(rte_cryptodev_close, int, (uint8_t dev_id), 0); -DEFINE_STUB(rte_vdev_init, int, (const char *name, const char *args), 0); -DEFINE_STUB(rte_vdev_uninit, int, (const char *name), 0); - -#if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0) -DEFINE_STUB(rte_cryptodev_sym_session_create, void *, - (uint8_t dev_id, struct rte_crypto_sym_xform *xforms, struct rte_mempool *mempool), (void *)1); -DEFINE_STUB(rte_cryptodev_sym_session_free, int, (uint8_t dev_id, void *sess), 0); -#else DEFINE_STUB(rte_cryptodev_sym_session_create, struct rte_cryptodev_sym_session *, - (struct rte_mempool *mempool), (void *)1); + (struct rte_mempool *mempool), (struct rte_cryptodev_sym_session *)1); DEFINE_STUB(rte_cryptodev_sym_session_init, int, (uint8_t dev_id, struct rte_cryptodev_sym_session *sess, struct rte_crypto_sym_xform *xforms, struct rte_mempool *mempool), 0); +DEFINE_STUB(rte_vdev_init, int, (const char *name, const char *args), 0); DEFINE_STUB(rte_cryptodev_sym_session_free, int, (struct rte_cryptodev_sym_session *sess), 0); -#endif +DEFINE_STUB(rte_vdev_uninit, int, (const char *name), 0); struct rte_cryptodev *rte_cryptodevs; /* global vars and setup/cleanup functions used for all test functions */ -struct spdk_bdev_io *g_bdev_io; -struct crypto_bdev_io *g_io_ctx; -struct crypto_io_channel *g_crypto_ch; struct spdk_io_channel *g_io_ch; -struct vbdev_dev g_device; -struct vbdev_crypto g_crypto_bdev; -struct vbdev_crypto_opts g_crypto_bdev_opts; -struct device_qp g_dev_qp; +struct accel_dpdk_cryptodev_io_channel *g_crypto_ch; +struct accel_dpdk_cryptodev_device g_aesni_crypto_dev; +struct accel_dpdk_cryptodev_qp g_aesni_qp; +struct accel_dpdk_cryptodev_key_handle g_key_handle; +struct accel_dpdk_cryptodev_key_priv g_key_priv; +struct spdk_accel_crypto_key g_key; void rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info) @@ -309,83 +274,6 @@ rte_cryptodev_sym_get_private_session_size(uint8_t dev_id) return (unsigned int)dev_id; } -void -spdk_bdev_io_get_aux_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_aux_buf_cb cb) -{ - cb(g_io_ch, g_bdev_io, (void *)0xDEADBEEF); -} - -void -spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len) -{ - cb(g_io_ch, g_bdev_io, true); -} - -/* Mock these functions to call the callback and then return the value we require */ -int ut_spdk_bdev_readv_blocks = 0; -bool ut_spdk_bdev_readv_blocks_mocked = false; -int -spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, - struct iovec *iov, int iovcnt, - uint64_t offset_blocks, uint64_t num_blocks, - spdk_bdev_io_completion_cb cb, void *cb_arg) -{ - cb(g_bdev_io, !ut_spdk_bdev_readv_blocks, cb_arg); - return ut_spdk_bdev_readv_blocks; -} - -int ut_spdk_bdev_writev_blocks = 0; -bool ut_spdk_bdev_writev_blocks_mocked = false; -int -spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, - struct iovec *iov, int iovcnt, - uint64_t offset_blocks, uint64_t num_blocks, - spdk_bdev_io_completion_cb cb, void *cb_arg) -{ - cb(g_bdev_io, !ut_spdk_bdev_writev_blocks, cb_arg); - return ut_spdk_bdev_writev_blocks; -} - -int ut_spdk_bdev_unmap_blocks = 0; -bool ut_spdk_bdev_unmap_blocks_mocked = false; -int -spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, - uint64_t offset_blocks, uint64_t num_blocks, - spdk_bdev_io_completion_cb cb, void *cb_arg) -{ - cb(g_bdev_io, !ut_spdk_bdev_unmap_blocks, cb_arg); - return ut_spdk_bdev_unmap_blocks; -} - -int ut_spdk_bdev_flush_blocks = 0; -bool ut_spdk_bdev_flush_blocks_mocked = false; -int -spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, - uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb, - void *cb_arg) -{ - cb(g_bdev_io, !ut_spdk_bdev_flush_blocks, cb_arg); - return ut_spdk_bdev_flush_blocks; -} - -int ut_spdk_bdev_reset = 0; -bool ut_spdk_bdev_reset_mocked = false; -int -spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, - spdk_bdev_io_completion_cb cb, void *cb_arg) -{ - cb(g_bdev_io, !ut_spdk_bdev_reset, cb_arg); - return ut_spdk_bdev_reset; -} - -bool g_completion_called = false; -void -spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status) -{ - bdev_io->internal.status = status; - g_completion_called = true; -} - /* Global setup for all tests that share a bunch of preparation... */ static int test_setup(void) @@ -393,38 +281,42 @@ test_setup(void) int i, rc; /* Prepare essential variables for test routines */ - g_bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct crypto_bdev_io)); - g_bdev_io->u.bdev.iovs = calloc(1, sizeof(struct iovec) * 128); - g_bdev_io->bdev = &g_crypto_bdev.crypto_bdev; - g_io_ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct crypto_io_channel)); - g_crypto_ch = (struct crypto_io_channel *)spdk_io_channel_get_ctx(g_io_ch); - g_io_ctx = (struct crypto_bdev_io *)g_bdev_io->driver_ctx; - memset(&g_device, 0, sizeof(struct vbdev_dev)); - memset(&g_crypto_bdev, 0, sizeof(struct vbdev_crypto)); - memset(&g_crypto_bdev_opts, 0, sizeof(struct vbdev_crypto_opts)); - g_dev_qp.device = &g_device; - g_io_ctx->crypto_ch = g_crypto_ch; - g_io_ctx->crypto_bdev = &g_crypto_bdev; - g_io_ctx->crypto_bdev->qp_desc_nr = CRYPTO_QP_DESCRIPTORS; - g_io_ctx->crypto_bdev->opts = &g_crypto_bdev_opts; - g_crypto_ch->device_qp = &g_dev_qp; - TAILQ_INIT(&g_crypto_ch->pending_cry_ios); + g_io_ch = calloc(1, sizeof(*g_io_ch) + sizeof(struct accel_dpdk_cryptodev_io_channel)); + g_crypto_ch = (struct accel_dpdk_cryptodev_io_channel *)spdk_io_channel_get_ctx(g_io_ch); TAILQ_INIT(&g_crypto_ch->queued_cry_ops); + g_aesni_crypto_dev.type = ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB; + g_aesni_crypto_dev.qp_desc_nr = ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS; + TAILQ_INIT(&g_aesni_crypto_dev.qpairs); + + g_aesni_qp.device = &g_aesni_crypto_dev; + g_crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB] = &g_aesni_qp; + + g_key_handle.device = &g_aesni_crypto_dev; + g_key_priv.driver = ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB; + g_key_priv.cipher = ACCEL_DPDK_CRYPTODEV_CIPHER_AES_CBC; + TAILQ_INIT(&g_key_priv.dev_keys); + TAILQ_INSERT_TAIL(&g_key_priv.dev_keys, &g_key_handle, link); + g_key.priv = &g_key_priv; + g_key.module_if = &g_accel_dpdk_cryptodev_module; + + /* Allocate a real mbuf pool so we can test error paths */ - g_mbuf_mp = rte_pktmbuf_pool_create("mbuf_mp", NUM_MBUFS, + g_mbuf_mp = rte_pktmbuf_pool_create("mbuf_mp", ACCEL_DPDK_CRYPTODEV_NUM_MBUFS, (unsigned)SPDK_MEMPOOL_DEFAULT_CACHE_SIZE, 0, 0, SPDK_ENV_SOCKET_ID_ANY); /* Instead of allocating real rte mempools for these, it's easier and provides the * same coverage just calloc them here. */ for (i = 0; i < MAX_TEST_BLOCKS; i++) { - size_t size = IV_OFFSET + IV_LENGTH + QUEUED_OP_LENGTH; + size_t size = ACCEL_DPDK_CRYPTODEV_IV_OFFSET + ACCEL_DPDK_CRYPTODEV_IV_LENGTH + + ACCEL_DPDK_CRYPTODEV_QUEUED_OP_LENGTH; rc = posix_memalign((void **)&g_test_crypto_ops[i], 64, size); if (rc != 0) { assert(false); } - memset(g_test_crypto_ops[i], 0, IV_OFFSET + QUEUED_OP_LENGTH); + memset(g_test_crypto_ops[i], 0, + ACCEL_DPDK_CRYPTODEV_IV_OFFSET + ACCEL_DPDK_CRYPTODEV_QUEUED_OP_LENGTH); } g_mbuf_offset = DPDK_DYNFIELD_OFFSET; @@ -458,8 +350,6 @@ test_cleanup(void) for (i = 0; i < MAX_TEST_BLOCKS; i++) { free(g_test_crypto_ops[i]); } - free(g_bdev_io->u.bdev.iovs); - free(g_bdev_io); free(g_io_ch); return 0; } @@ -467,181 +357,351 @@ test_cleanup(void) static void test_error_paths(void) { - /* Single element block size write, just to test error paths - * in vbdev_crypto_submit_request(). - */ - g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; - g_bdev_io->u.bdev.iovcnt = 1; - g_bdev_io->u.bdev.num_blocks = 1; - g_bdev_io->u.bdev.iovs[0].iov_len = 512; - g_bdev_io->u.bdev.iovs[0].iov_base = (void *)0xDEADBEEF; - g_crypto_bdev.crypto_bdev.blocklen = 512; - g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE; + /* Single element block size encrypt, just to test error paths + * in accel_dpdk_cryptodev_submit_tasks() */ + struct iovec src_iov = {.iov_base = (void *)0xDEADBEEF, .iov_len = 512 }; + struct iovec dst_iov = src_iov; + struct accel_dpdk_cryptodev_task task = {}; + struct accel_dpdk_cryptodev_key_priv key_priv = {}; + struct spdk_accel_crypto_key key = {}; + int rc; + + task.base.op_code = ACCEL_OPC_ENCRYPT; + task.base.s.iovcnt = 1; + task.base.s.iovs = &src_iov; + task.base.d.iovcnt = 1; + task.base.d.iovs = &dst_iov; + task.base.nbytes = 512; + task.base.block_size = 512; + task.base.crypto_key = &g_key; + task.base.iv = 1; g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1; - /* test failure of spdk_mempool_get_bulk(), will result in success because it - * will get queued. - */ - g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; - MOCK_SET(spdk_mempool_get, NULL); - vbdev_crypto_submit_request(g_io_ch, g_bdev_io); - CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); + /* case 1 - no crypto key */ + task.base.crypto_key = NULL; + rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); + CU_ASSERT(rc == -EINVAL); + task.base.crypto_key = &g_key; - /* same thing but switch to reads to test error path in _crypto_complete_io() */ - g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ; - g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; - TAILQ_INSERT_TAIL(&g_crypto_ch->pending_cry_ios, g_bdev_io, module_link); - vbdev_crypto_submit_request(g_io_ch, g_bdev_io); - CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); - /* Now with the read_blocks failing */ - g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ; - g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; - MOCK_SET(spdk_bdev_readv_blocks, -1); - vbdev_crypto_submit_request(g_io_ch, g_bdev_io); - CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); - MOCK_SET(spdk_bdev_readv_blocks, 0); + /* case 2 - crypto key with wrong module_if */ + key_priv.driver = ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB; + key_priv.cipher = ACCEL_DPDK_CRYPTODEV_CIPHER_AES_CBC; + TAILQ_INIT(&key_priv.dev_keys); + key.priv = &key_priv; + key.module_if = (struct spdk_accel_module_if *) 0x1; + task.base.crypto_key = &key; + rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); + CU_ASSERT(rc == -EINVAL); + key.module_if = &g_accel_dpdk_cryptodev_module; + + /* case 3 - nbytes too big */ + task.base.nbytes = ACCEL_DPDK_CRYPTODEV_CRYPTO_MAX_IO + 512; + rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); + CU_ASSERT(rc == -E2BIG); + task.base.nbytes = 512; + + /* case 4 - no key handle in the channel */ + rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); + CU_ASSERT(rc == -EINVAL); + task.base.crypto_key = &g_key; + + /* case 5 - invalid op */ + task.base.op_code = ACCEL_OPC_COMPARE; + rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); + CU_ASSERT(rc == -EINVAL); + task.base.op_code = ACCEL_OPC_ENCRYPT; + + /* case 6 - no entries in g_mbuf_mp */ + MOCK_SET(spdk_mempool_get, NULL); + rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); + CU_ASSERT(rc == -ENOMEM); MOCK_CLEAR(spdk_mempool_get); - /* test failure of rte_crypto_op_bulk_alloc() */ - g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; - ut_rte_crypto_op_bulk_alloc = 0; - vbdev_crypto_submit_request(g_io_ch, g_bdev_io); - CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); - ut_rte_crypto_op_bulk_alloc = 1; - - /* test failure of rte_crypto_op_attach_sym_session() */ - g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; - ut_rte_crypto_op_attach_sym_session = -1; - vbdev_crypto_submit_request(g_io_ch, g_bdev_io); - CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); - ut_rte_crypto_op_attach_sym_session = 0; + /* case 7 - vtophys error in accel_dpdk_cryptodev_mbuf_attach_buf */ + MOCK_SET(spdk_vtophys, SPDK_VTOPHYS_ERROR); + rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); + CU_ASSERT(rc == -EFAULT); + MOCK_CLEAR(spdk_vtophys); } static void -test_simple_write(void) +test_simple_encrypt(void) { - /* Single element block size write */ - g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; - g_bdev_io->u.bdev.iovcnt = 1; - g_bdev_io->u.bdev.num_blocks = 1; - g_bdev_io->u.bdev.offset_blocks = 0; - g_bdev_io->u.bdev.iovs[0].iov_len = 512; - g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_write; - g_crypto_bdev.crypto_bdev.blocklen = 512; - g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE; + struct iovec src_iov[4] = {[0] = {.iov_base = (void *)0xDEADBEEF, .iov_len = 512 }}; + struct iovec dst_iov = src_iov[0]; + struct accel_dpdk_cryptodev_task task = {}; + struct rte_mbuf *mbuf; + int rc, i; + + task.base.op_code = ACCEL_OPC_ENCRYPT; + task.base.s.iovcnt = 1; + task.base.s.iovs = src_iov; + task.base.d.iovcnt = 1; + task.base.d.iovs = &dst_iov; + task.base.nbytes = 512; + task.base.block_size = 512; + task.base.crypto_key = &g_key; + task.base.iv = 1; g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1; - vbdev_crypto_submit_request(g_io_ch, g_bdev_io); - CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); - CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1); - CU_ASSERT(g_io_ctx->aux_buf_iov.iov_len == 512); - CU_ASSERT(g_io_ctx->aux_buf_iov.iov_base != NULL); - CU_ASSERT(g_io_ctx->aux_offset_blocks == 0); - CU_ASSERT(g_io_ctx->aux_num_blocks == 1); - CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_write); - CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512); + /* Inplace encryption */ + rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); + CU_ASSERT(rc == 0); + CU_ASSERT(task.cryop_cnt_remaining == 1); + CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == src_iov[0].iov_base); + CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == src_iov[0].iov_len); CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL); CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512); CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0); CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset, - uint64_t *) == (uint64_t)g_bdev_io); - CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr != NULL); - CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == 512); + uint64_t *) == (uint64_t)&task); + CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL); + + rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src); + + /* out-of-place encryption */ + task.cryop_cnt_remaining = 0; + dst_iov.iov_base = (void *)0xFEEDBEEF; + + rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); + CU_ASSERT(rc == 0); + CU_ASSERT(task.cryop_cnt_remaining == 1); + CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == src_iov[0].iov_base); + CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == src_iov[0].iov_len); + CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL); + CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512); + CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0); + CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset, + uint64_t *) == (uint64_t)&task); + CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr == dst_iov.iov_base); + CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == dst_iov.iov_len); + + rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src); + rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_dst); + + /* out-of-place encryption, fragmented payload */ + task.base.s.iovcnt = 4; + for (i = 0; i < 4; i++) { + src_iov[i].iov_base = (void *)0xDEADBEEF + i * 128; + src_iov[i].iov_len = 128; + } + rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); + CU_ASSERT(rc == 0); + CU_ASSERT(task.cryop_cnt_remaining == 1); + mbuf = g_test_crypto_ops[0]->sym->m_src; + CU_ASSERT(mbuf != NULL); + CU_ASSERT(mbuf->buf_addr == src_iov[0].iov_base); + CU_ASSERT(mbuf->data_len == src_iov[0].iov_len); + for (i = 1; i < 4; i++) { + mbuf = mbuf->next; + SPDK_CU_ASSERT_FATAL(mbuf != NULL); + CU_ASSERT(mbuf->buf_addr == src_iov[i].iov_base); + CU_ASSERT(mbuf->data_len == src_iov[i].iov_len); + rte_pktmbuf_free(mbuf); + + } + CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512); + CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0); + CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset, + uint64_t *) == (uint64_t)&task); + CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr == dst_iov.iov_base); + CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == dst_iov.iov_len); rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src); rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_dst); } static void -test_simple_read(void) +test_simple_decrypt(void) { - /* Single element block size read */ - g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; - g_bdev_io->u.bdev.iovcnt = 1; - g_bdev_io->u.bdev.num_blocks = 1; - g_bdev_io->u.bdev.iovs[0].iov_len = 512; - g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_read; - g_crypto_bdev.crypto_bdev.blocklen = 512; - g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ; + struct iovec src_iov[4] = {[0] = {.iov_base = (void *)0xDEADBEEF, .iov_len = 512 }}; + struct iovec dst_iov = src_iov[0]; + struct accel_dpdk_cryptodev_task task = {}; + struct rte_mbuf *mbuf; + int rc, i; + + task.base.op_code = ACCEL_OPC_DECRYPT; + task.base.s.iovcnt = 1; + task.base.s.iovs = src_iov; + task.base.d.iovcnt = 1; + task.base.d.iovs = &dst_iov; + task.base.nbytes = 512; + task.base.block_size = 512; + task.base.crypto_key = &g_key; + task.base.iv = 1; g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1; - vbdev_crypto_submit_request(g_io_ch, g_bdev_io); - CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); - CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1); - CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_read); - CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512); + /* Inplace decryption */ + rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); + CU_ASSERT(rc == 0); + CU_ASSERT(task.cryop_cnt_remaining == 1); + CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == src_iov[0].iov_base); + CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == src_iov[0].iov_len); CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL); CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512); CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0); CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset, - uint64_t *) == (uint64_t)g_bdev_io); + uint64_t *) == (uint64_t)&task); CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL); rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src); + + /* out-of-place decryption */ + task.cryop_cnt_remaining = 0; + dst_iov.iov_base = (void *)0xFEEDBEEF; + + rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); + CU_ASSERT(rc == 0); + CU_ASSERT(task.cryop_cnt_remaining == 1); + CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == src_iov[0].iov_base); + CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == src_iov[0].iov_len); + CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL); + CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512); + CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0); + CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset, + uint64_t *) == (uint64_t)&task); + CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr == dst_iov.iov_base); + CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == dst_iov.iov_len); + + rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src); + rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_dst); + + /* out-of-place decryption, fragmented payload */ + task.base.s.iovcnt = 4; + for (i = 0; i < 4; i++) { + src_iov[i].iov_base = (void *)0xDEADBEEF + i * 128; + src_iov[i].iov_len = 128; + } + rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); + CU_ASSERT(rc == 0); + CU_ASSERT(task.cryop_cnt_remaining == 1); + mbuf = g_test_crypto_ops[0]->sym->m_src; + CU_ASSERT(mbuf != NULL); + CU_ASSERT(mbuf->buf_addr == src_iov[0].iov_base); + CU_ASSERT(mbuf->data_len == src_iov[0].iov_len); + for (i = 1; i < 4; i++) { + mbuf = mbuf->next; + SPDK_CU_ASSERT_FATAL(mbuf != NULL); + CU_ASSERT(mbuf->buf_addr == src_iov[i].iov_base); + CU_ASSERT(mbuf->data_len == src_iov[i].iov_len); + rte_pktmbuf_free(mbuf); + + } + CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512); + CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0); + CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset, + uint64_t *) == (uint64_t)&task); + CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr == dst_iov.iov_base); + CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == dst_iov.iov_len); + + rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src); + rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_dst); } static void -test_large_rw(void) +test_large_enc_dec(void) { - unsigned block_len = 512; - unsigned num_blocks = CRYPTO_MAX_IO / block_len; - unsigned io_len = block_len * num_blocks; - unsigned i; + struct accel_dpdk_cryptodev_task task = {}; + uint32_t block_len = 512; + uint32_t num_blocks = ACCEL_DPDK_CRYPTODEV_CRYPTO_MAX_IO / block_len; + struct iovec src_iov = {.iov_base = (void *)0xDEADBEEF, .iov_len = ACCEL_DPDK_CRYPTODEV_CRYPTO_MAX_IO }; + struct iovec dst_iov = src_iov; + uint32_t i; + int rc; - /* Multi block size read, multi-element */ - g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; - g_bdev_io->u.bdev.iovcnt = 1; - g_bdev_io->u.bdev.num_blocks = num_blocks; - g_bdev_io->u.bdev.iovs[0].iov_len = io_len; - g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw; - g_crypto_bdev.crypto_bdev.blocklen = block_len; - g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ; + task.base.op_code = ACCEL_OPC_DECRYPT; + task.base.s.iovcnt = 1; + task.base.s.iovs = &src_iov; + task.base.d.iovcnt = 1; + task.base.d.iovs = &dst_iov; + task.base.nbytes = ACCEL_DPDK_CRYPTODEV_CRYPTO_MAX_IO; + task.base.block_size = 512; + task.base.crypto_key = &g_key; + task.base.iv = 1; + + /* Multi block size decryption, multi-element, inplace */ g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks; - vbdev_crypto_submit_request(g_io_ch, g_bdev_io); - CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); - CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks); + rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); + CU_ASSERT(rc == 0); + CU_ASSERT(task.cryop_cnt_remaining == num_blocks); for (i = 0; i < num_blocks; i++) { - CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len)); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov.iov_base + (i * block_len)); CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len); CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL); CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len); CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0); CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset, - uint64_t *) == (uint64_t)g_bdev_io); + uint64_t *) == (uint64_t)&task); CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL); rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src); } - /* Multi block size write, multi-element */ - g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; - g_bdev_io->u.bdev.iovcnt = 1; - g_bdev_io->u.bdev.num_blocks = num_blocks; - g_bdev_io->u.bdev.iovs[0].iov_len = io_len; - g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw; - g_crypto_bdev.crypto_bdev.blocklen = block_len; - g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE; + /* Multi block size decryption, multi-element, out-of-place */ + task.cryop_cnt_remaining = 0; + dst_iov.iov_base = (void *)0xFEEDBEEF; g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks; - vbdev_crypto_submit_request(g_io_ch, g_bdev_io); - CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); - CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks); - + rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); + CU_ASSERT(rc == 0); + CU_ASSERT(task.cryop_cnt_remaining == num_blocks); for (i = 0; i < num_blocks; i++) { - CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len)); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov.iov_base + (i * block_len)); CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len); CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL); CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len); CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0); CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset, - uint64_t *) == (uint64_t)g_bdev_io); - CU_ASSERT(g_io_ctx->aux_buf_iov.iov_len == io_len); - CU_ASSERT(g_io_ctx->aux_buf_iov.iov_base != NULL); - CU_ASSERT(g_io_ctx->aux_offset_blocks == 0); - CU_ASSERT(g_io_ctx->aux_num_blocks == num_blocks); - CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr != NULL); + uint64_t *) == (uint64_t)&task); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr == dst_iov.iov_base + (i * block_len)); CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->data_len == block_len); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->next == NULL); + rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src); + rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_dst); + } + + /* Multi block size encryption, multi-element, inplace */ + dst_iov = src_iov; + task.base.op_code = ACCEL_OPC_ENCRYPT; + task.cryop_cnt_remaining = 0; + g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks; + + rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); + CU_ASSERT(rc == 0); + CU_ASSERT(task.cryop_cnt_remaining == num_blocks); + + for (i = 0; i < num_blocks; i++) { + CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov.iov_base + (i * block_len)); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL); + CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len); + CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0); + CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset, + uint64_t *) == (uint64_t)&task); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL); + rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src); + } + + /* Multi block size encryption, multi-element, out-of-place */ + task.cryop_cnt_remaining = 0; + dst_iov.iov_base = (void *)0xFEEDBEEF; + g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks; + + rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); + CU_ASSERT(rc == 0); + CU_ASSERT(task.cryop_cnt_remaining == num_blocks); + for (i = 0; i < num_blocks; i++) { + CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov.iov_base + (i * block_len)); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL); + CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len); + CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0); + CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset, + uint64_t *) == (uint64_t)&task); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr == dst_iov.iov_base + (i * block_len)); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->data_len == block_len); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->next == NULL); rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src); rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_dst); } @@ -650,36 +710,40 @@ test_large_rw(void) static void test_dev_full(void) { - struct vbdev_crypto_op *queued_op; + struct accel_dpdk_cryptodev_task task = {}; + struct accel_dpdk_cryptodev_queued_op *queued_op; struct rte_crypto_sym_op *sym_op; - struct crypto_bdev_io *io_ctx; + struct iovec src_iov = {.iov_base = (void *)0xDEADBEEF, .iov_len = 1024 }; + struct iovec dst_iov = src_iov; + int rc; - /* Two element block size read */ - g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; - g_bdev_io->u.bdev.iovcnt = 1; - g_bdev_io->u.bdev.num_blocks = 2; - g_bdev_io->u.bdev.iovs[0].iov_len = 512; - g_bdev_io->u.bdev.iovs[0].iov_base = (void *)0xDEADBEEF; - g_bdev_io->u.bdev.iovs[1].iov_len = 512; - g_bdev_io->u.bdev.iovs[1].iov_base = (void *)0xFEEDBEEF; - g_crypto_bdev.crypto_bdev.blocklen = 512; - g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ; + task.base.op_code = ACCEL_OPC_DECRYPT; + task.base.s.iovcnt = 1; + task.base.s.iovs = &src_iov; + task.base.d.iovcnt = 1; + task.base.d.iovs = &dst_iov; + task.base.nbytes = 1024; + task.base.block_size = 512; + task.base.crypto_key = &g_key; + task.base.iv = 1; + + /* Two element block size decryption */ g_enqueue_mock = g_dequeue_mock = 1; ut_rte_crypto_op_bulk_alloc = 2; g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true); - vbdev_crypto_submit_request(g_io_ch, g_bdev_io); - CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); - CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 2); + rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); + CU_ASSERT(rc == 0); + CU_ASSERT(task.cryop_cnt_remaining == 2); sym_op = g_test_crypto_ops[0]->sym; - CU_ASSERT(sym_op->m_src->buf_addr == (void *)0xDEADBEEF); + CU_ASSERT(sym_op->m_src->buf_addr == src_iov.iov_base); CU_ASSERT(sym_op->m_src->data_len == 512); CU_ASSERT(sym_op->m_src->next == NULL); CU_ASSERT(sym_op->cipher.data.length == 512); CU_ASSERT(sym_op->cipher.data.offset == 0); - CU_ASSERT(*RTE_MBUF_DYNFIELD(sym_op->m_src, g_mbuf_offset, uint64_t *) == (uint64_t)g_bdev_io); + CU_ASSERT(*RTE_MBUF_DYNFIELD(sym_op->m_src, g_mbuf_offset, uint64_t *) == (uint64_t)&task); CU_ASSERT(sym_op->m_dst == NULL); /* make sure one got queued and confirm its values */ @@ -687,14 +751,14 @@ test_dev_full(void) queued_op = TAILQ_FIRST(&g_crypto_ch->queued_cry_ops); sym_op = queued_op->crypto_op->sym; TAILQ_REMOVE(&g_crypto_ch->queued_cry_ops, queued_op, link); - CU_ASSERT(queued_op->bdev_io == g_bdev_io); + CU_ASSERT(queued_op->task == &task); CU_ASSERT(queued_op->crypto_op == g_test_crypto_ops[1]); - CU_ASSERT(sym_op->m_src->buf_addr == (void *)0xFEEDBEEF); + CU_ASSERT(sym_op->m_src->buf_addr == (void *)0xDEADBEEF + 512); CU_ASSERT(sym_op->m_src->data_len == 512); CU_ASSERT(sym_op->m_src->next == NULL); CU_ASSERT(sym_op->cipher.data.length == 512); CU_ASSERT(sym_op->cipher.data.offset == 0); - CU_ASSERT(*RTE_MBUF_DYNFIELD(sym_op->m_src, g_mbuf_offset, uint64_t *) == (uint64_t)g_bdev_io); + CU_ASSERT(*RTE_MBUF_DYNFIELD(sym_op->m_src, g_mbuf_offset, uint64_t *) == (uint64_t)&task); CU_ASSERT(sym_op->m_dst == NULL); CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true); rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src); @@ -703,128 +767,92 @@ test_dev_full(void) /* Non-busy reason for enqueue failure, all were rejected. */ g_enqueue_mock = 0; g_test_crypto_ops[0]->status = RTE_CRYPTO_OP_STATUS_ERROR; - vbdev_crypto_submit_request(g_io_ch, g_bdev_io); - io_ctx = (struct crypto_bdev_io *)g_bdev_io->driver_ctx; - CU_ASSERT(io_ctx->bdev_io_status == SPDK_BDEV_IO_STATUS_FAILED); + rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); + CU_ASSERT(rc == -EINVAL); } static void test_crazy_rw(void) { - unsigned block_len = 512; - int num_blocks = 4; - int i; + struct accel_dpdk_cryptodev_task task = {}; + struct iovec src_iov[4] = { + [0] = {.iov_base = (void *)0xDEADBEEF, .iov_len = 512 }, + [1] = {.iov_base = (void *)0xDEADBEEF + 512, .iov_len = 1024 }, + [2] = {.iov_base = (void *)0xDEADBEEF + 512 + 1024, .iov_len = 512 } + }; + struct iovec *dst_iov = src_iov; + uint32_t block_len = 512, num_blocks = 4, i; + int rc; + + task.base.op_code = ACCEL_OPC_DECRYPT; + task.base.s.iovcnt = 3; + task.base.s.iovs = src_iov; + task.base.d.iovcnt = 3; + task.base.d.iovs = dst_iov; + task.base.block_size = 512; + task.base.nbytes = num_blocks * task.base.block_size; + task.base.crypto_key = &g_key; + task.base.iv = 1; /* Multi block size read, single element, strange IOV makeup */ - g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; - g_bdev_io->u.bdev.iovcnt = 3; - g_bdev_io->u.bdev.num_blocks = num_blocks; - g_bdev_io->u.bdev.iovs[0].iov_len = 512; - g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw; - g_bdev_io->u.bdev.iovs[1].iov_len = 1024; - g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 512; - g_bdev_io->u.bdev.iovs[2].iov_len = 512; - g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 512 + 1024; - - g_crypto_bdev.crypto_bdev.blocklen = block_len; - g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ; g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks; - vbdev_crypto_submit_request(g_io_ch, g_bdev_io); - CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); - CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks); + rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); + CU_ASSERT(rc == 0); + CU_ASSERT(task.cryop_cnt_remaining == num_blocks); for (i = 0; i < num_blocks; i++) { - CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len)); - CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len); - CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL); CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len); CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0); CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset, - uint64_t *) == (uint64_t)g_bdev_io); - CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src); + uint64_t *) == (uint64_t)&task); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov[0].iov_base + (i * block_len)); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len); CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL); rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src); } /* Multi block size write, single element strange IOV makeup */ num_blocks = 8; - g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; - g_bdev_io->u.bdev.iovcnt = 4; - g_bdev_io->u.bdev.num_blocks = num_blocks; - g_bdev_io->u.bdev.iovs[0].iov_len = 2048; - g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw; - g_bdev_io->u.bdev.iovs[1].iov_len = 512; - g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 2048; - g_bdev_io->u.bdev.iovs[2].iov_len = 512; - g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 2048 + 512; - g_bdev_io->u.bdev.iovs[3].iov_len = 1024; - g_bdev_io->u.bdev.iovs[3].iov_base = &test_crazy_rw + 2048 + 512 + 512; + task.base.op_code = ACCEL_OPC_ENCRYPT; + task.cryop_cnt_remaining = 0; + task.base.nbytes = task.base.block_size * num_blocks; + task.base.s.iovcnt = 4; + task.base.d.iovcnt = 4; + task.base.s.iovs[0].iov_len = 2048; + task.base.s.iovs[0].iov_base = (void *)0xDEADBEEF; + task.base.s.iovs[1].iov_len = 512; + task.base.s.iovs[1].iov_base = (void *)0xDEADBEEF + 2048; + task.base.s.iovs[2].iov_len = 512; + task.base.s.iovs[2].iov_base = (void *)0xDEADBEEF + 2048 + 512; + task.base.s.iovs[3].iov_len = 1024; + task.base.s.iovs[3].iov_base = (void *)0xDEADBEEF + 2048 + 512 + 512; - g_crypto_bdev.crypto_bdev.blocklen = block_len; - g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE; g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks; - vbdev_crypto_submit_request(g_io_ch, g_bdev_io); - CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); - CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks); + rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); + CU_ASSERT(rc == 0); + CU_ASSERT(task.cryop_cnt_remaining == num_blocks); for (i = 0; i < num_blocks; i++) { - CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len)); - CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len); - CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL); CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len); CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0); CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset, - uint64_t *) == (uint64_t)g_bdev_io); - CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src); - CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == g_test_crypto_ops[i]->sym->m_dst); + uint64_t *) == (uint64_t)&task); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov[0].iov_base + (i * block_len)); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len); + CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL); rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src); - rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_dst); } } -static void -test_passthru(void) -{ - /* Make sure these follow our completion callback, test success & fail. */ - g_bdev_io->type = SPDK_BDEV_IO_TYPE_UNMAP; - MOCK_SET(spdk_bdev_unmap_blocks, 0); - vbdev_crypto_submit_request(g_io_ch, g_bdev_io); - CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); - MOCK_SET(spdk_bdev_unmap_blocks, -1); - vbdev_crypto_submit_request(g_io_ch, g_bdev_io); - CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); - MOCK_CLEAR(spdk_bdev_unmap_blocks); - - g_bdev_io->type = SPDK_BDEV_IO_TYPE_FLUSH; - MOCK_SET(spdk_bdev_flush_blocks, 0); - vbdev_crypto_submit_request(g_io_ch, g_bdev_io); - CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); - MOCK_SET(spdk_bdev_flush_blocks, -1); - vbdev_crypto_submit_request(g_io_ch, g_bdev_io); - CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); - MOCK_CLEAR(spdk_bdev_flush_blocks); - - /* We should never get a WZ command, we report that we don't support it. */ - g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE_ZEROES; - vbdev_crypto_submit_request(g_io_ch, g_bdev_io); - CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); -} - -static void -test_reset(void) -{ - /* TODO: There are a few different ways to do this given that - * the code uses spdk_for_each_channel() to implement reset - * handling. Submitting w/o UT for this function for now and - * will follow up with something shortly. - */ -} - static void init_cleanup(void) { + struct accel_dpdk_cryptodev_device *dev, *tmp; + if (g_crypto_op_mp) { rte_mempool_free(g_crypto_op_mp); g_crypto_op_mp = NULL; @@ -842,6 +870,13 @@ init_cleanup(void) rte_mempool_free(g_session_mp_priv); g_session_mp_priv = NULL; } + + TAILQ_FOREACH_SAFE(dev, &g_crypto_devices, link, tmp) { + TAILQ_REMOVE(&g_crypto_devices, dev, link); + accel_dpdk_cryptodev_release(dev); + } + + spdk_io_device_unregister(&g_accel_dpdk_cryptodev_module, NULL); } static void @@ -852,6 +887,10 @@ test_initdrivers(void) static struct rte_mempool *orig_session_mp; static struct rte_mempool *orig_session_mp_priv; + /* accel_dpdk_cryptodev_init calls spdk_io_device_register, we need to have a thread */ + allocate_threads(1); + set_thread(0); + /* These tests will alloc and free our g_mbuf_mp * so save that off here and restore it after each test is over. */ @@ -865,7 +904,7 @@ test_initdrivers(void) /* No drivers available, not an error though */ MOCK_SET(rte_cryptodev_count, 0); - rc = vbdev_crypto_init_crypto_drivers(); + rc = accel_dpdk_cryptodev_init(); CU_ASSERT(rc == 0); CU_ASSERT(g_mbuf_mp == NULL); CU_ASSERT(g_session_mp == NULL); @@ -874,7 +913,7 @@ test_initdrivers(void) /* Can't create session pool. */ MOCK_SET(rte_cryptodev_count, 2); MOCK_SET(spdk_mempool_create, NULL); - rc = vbdev_crypto_init_crypto_drivers(); + rc = accel_dpdk_cryptodev_init(); CU_ASSERT(rc == -ENOMEM); CU_ASSERT(g_mbuf_mp == NULL); CU_ASSERT(g_session_mp == NULL); @@ -883,7 +922,7 @@ test_initdrivers(void) /* Can't create op pool. */ MOCK_SET(rte_crypto_op_pool_create, NULL); - rc = vbdev_crypto_init_crypto_drivers(); + rc = accel_dpdk_cryptodev_init(); CU_ASSERT(rc == -ENOMEM); CU_ASSERT(g_mbuf_mp == NULL); CU_ASSERT(g_session_mp == NULL); @@ -892,7 +931,7 @@ test_initdrivers(void) /* Check resources are not sufficient */ MOCK_CLEARED_ASSERT(spdk_mempool_create); - rc = vbdev_crypto_init_crypto_drivers(); + rc = accel_dpdk_cryptodev_init(); CU_ASSERT(rc == -EINVAL); /* Test crypto dev configure failure. */ @@ -900,7 +939,7 @@ test_initdrivers(void) MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_AESNI); MOCK_SET(rte_cryptodev_configure, -1); MOCK_CLEARED_ASSERT(spdk_mempool_create); - rc = vbdev_crypto_init_crypto_drivers(); + rc = accel_dpdk_cryptodev_init(); MOCK_SET(rte_cryptodev_configure, 0); CU_ASSERT(g_mbuf_mp == NULL); CU_ASSERT(g_session_mp == NULL); @@ -910,7 +949,7 @@ test_initdrivers(void) /* Test failure of qp setup. */ MOCK_SET(rte_cryptodev_queue_pair_setup, -1); MOCK_CLEARED_ASSERT(spdk_mempool_create); - rc = vbdev_crypto_init_crypto_drivers(); + rc = accel_dpdk_cryptodev_init(); CU_ASSERT(rc == -EINVAL); CU_ASSERT(g_mbuf_mp == NULL); CU_ASSERT(g_session_mp == NULL); @@ -920,7 +959,7 @@ test_initdrivers(void) /* Test failure of dev start. */ MOCK_SET(rte_cryptodev_start, -1); MOCK_CLEARED_ASSERT(spdk_mempool_create); - rc = vbdev_crypto_init_crypto_drivers(); + rc = accel_dpdk_cryptodev_init(); CU_ASSERT(rc == -EINVAL); CU_ASSERT(g_mbuf_mp == NULL); CU_ASSERT(g_session_mp == NULL); @@ -930,7 +969,7 @@ test_initdrivers(void) /* Test bogus PMD */ MOCK_CLEARED_ASSERT(spdk_mempool_create); MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_BOGUS_PMD); - rc = vbdev_crypto_init_crypto_drivers(); + rc = accel_dpdk_cryptodev_init(); CU_ASSERT(g_mbuf_mp == NULL); CU_ASSERT(g_session_mp == NULL); CU_ASSERT(rc == -EINVAL); @@ -938,7 +977,7 @@ test_initdrivers(void) /* Test happy path QAT. */ MOCK_CLEARED_ASSERT(spdk_mempool_create); MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_QAT); - rc = vbdev_crypto_init_crypto_drivers(); + rc = accel_dpdk_cryptodev_init(); CU_ASSERT(g_mbuf_mp != NULL); CU_ASSERT(g_session_mp != NULL); init_cleanup(); @@ -947,7 +986,7 @@ test_initdrivers(void) /* Test happy path AESNI. */ MOCK_CLEARED_ASSERT(spdk_mempool_create); MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_AESNI); - rc = vbdev_crypto_init_crypto_drivers(); + rc = accel_dpdk_cryptodev_init(); CU_ASSERT(g_mbuf_offset == DPDK_DYNFIELD_OFFSET); init_cleanup(); CU_ASSERT(rc == 0); @@ -955,7 +994,7 @@ test_initdrivers(void) /* Test happy path MLX5. */ MOCK_CLEARED_ASSERT(spdk_mempool_create); MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_MLX5); - rc = vbdev_crypto_init_crypto_drivers(); + rc = accel_dpdk_cryptodev_init(); CU_ASSERT(g_mbuf_offset == DPDK_DYNFIELD_OFFSET); init_cleanup(); CU_ASSERT(rc == 0); @@ -967,13 +1006,11 @@ test_initdrivers(void) MOCK_SET(rte_vdev_init, -1); MOCK_CLEARED_ASSERT(spdk_mempool_create); MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_QAT); - rc = vbdev_crypto_init_crypto_drivers(); + rc = accel_dpdk_cryptodev_init(); CU_ASSERT(rc == 0); CU_ASSERT(g_mbuf_mp != NULL); CU_ASSERT(g_session_mp != NULL); -#if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0) CU_ASSERT(g_session_mp_priv != NULL); -#endif init_cleanup(); MOCK_SET(rte_vdev_init, 0); MOCK_CLEAR(rte_cryptodev_device_count_by_driver); @@ -982,83 +1019,59 @@ test_initdrivers(void) g_mbuf_mp = orig_mbuf_mp; g_session_mp = orig_session_mp; g_session_mp_priv = orig_session_mp_priv; + free_threads(); } static void -test_crypto_op_complete(void) +test_supported_opcodes(void) { - /* Make sure completion code respects failure. */ - g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; - g_completion_called = false; - _crypto_operation_complete(g_bdev_io); - CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); - CU_ASSERT(g_completion_called == true); - - /* Test read completion. */ - g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; - g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ; - g_completion_called = false; - _crypto_operation_complete(g_bdev_io); - CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); - CU_ASSERT(g_completion_called == true); - - /* Test write completion success. */ - g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; - g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE; - g_completion_called = false; - MOCK_SET(spdk_bdev_writev_blocks, 0); - _crypto_operation_complete(g_bdev_io); - CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); - CU_ASSERT(g_completion_called == true); - - /* Test write completion failed. */ - g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; - g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE; - g_completion_called = false; - MOCK_SET(spdk_bdev_writev_blocks, -1); - _crypto_operation_complete(g_bdev_io); - CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); - CU_ASSERT(g_completion_called == true); - - /* Test bogus type for this completion. */ - g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; - g_bdev_io->type = SPDK_BDEV_IO_TYPE_RESET; - g_completion_called = false; - _crypto_operation_complete(g_bdev_io); - CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); - CU_ASSERT(g_completion_called == true); -} - -static void -test_supported_io(void) -{ - void *ctx = NULL; bool rc = true; + enum accel_opcode opc; - /* Make sure we always report false to WZ, we need the bdev layer to - * send real 0's so we can encrypt/decrypt them. - */ - rc = vbdev_crypto_io_type_supported(ctx, SPDK_BDEV_IO_TYPE_WRITE_ZEROES); - CU_ASSERT(rc == false); + for (opc = 0; opc < ACCEL_OPC_LAST; opc++) { + rc = accel_dpdk_cryptodev_supports_opcode(opc); + switch (opc) { + case ACCEL_OPC_ENCRYPT: + case ACCEL_OPC_DECRYPT: + CU_ASSERT(rc == true); + break; + default: + CU_ASSERT(rc == false); + } + } } static void test_poller(void) { + struct accel_dpdk_cryptodev_task task = {}; + struct iovec src_iov = {.iov_base = (void *)0xDEADBEEF, .iov_len = 1024 }; + struct iovec dst_iov = src_iov; int rc; + + task.base.op_code = ACCEL_OPC_DECRYPT; + task.base.s.iovcnt = 1; + task.base.s.iovs = &src_iov; + task.base.d.iovcnt = 1; + task.base.d.iovs = &dst_iov; + task.base.nbytes = 1024; + task.base.block_size = 512; + task.base.crypto_key = &g_key; + task.base.iv = 1; + struct rte_mbuf *src_mbufs[2]; - struct vbdev_crypto_op *op_to_resubmit; + struct accel_dpdk_cryptodev_queued_op *op_to_resubmit; /* test regular 1 op to dequeue and complete */ g_dequeue_mock = g_enqueue_mock = 1; rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, 1); g_test_crypto_ops[0]->sym->m_src = src_mbufs[0]; *RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset, - uint64_t *) = (uintptr_t)g_bdev_io; + uint64_t *) = (uintptr_t)&task; g_test_crypto_ops[0]->sym->m_dst = NULL; - g_io_ctx->cryop_cnt_remaining = 1; - g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ; - rc = crypto_dev_poller(g_crypto_ch); + task.cryop_cnt_remaining = 1; + task.base.op_code = ACCEL_OPC_DECRYPT; + rc = accel_dpdk_cryptodev_poller(g_crypto_ch); CU_ASSERT(rc == 1); /* We have nothing dequeued but have some to resubmit */ @@ -1067,132 +1080,129 @@ test_poller(void) /* add an op to the queued list. */ g_resubmit_test = true; - op_to_resubmit = (struct vbdev_crypto_op *)((uint8_t *)g_test_crypto_ops[0] + QUEUED_OP_OFFSET); + op_to_resubmit = (struct accel_dpdk_cryptodev_queued_op *)((uint8_t *)g_test_crypto_ops[0] + + ACCEL_DPDK_CRYPTODEV_QUEUED_OP_OFFSET); op_to_resubmit->crypto_op = (void *)0xDEADBEEF; - op_to_resubmit->bdev_io = g_bdev_io; + op_to_resubmit->task = &task; + op_to_resubmit->qp = &g_aesni_qp; TAILQ_INSERT_TAIL(&g_crypto_ch->queued_cry_ops, op_to_resubmit, link); CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == false); - rc = crypto_dev_poller(g_crypto_ch); + rc = accel_dpdk_cryptodev_poller(g_crypto_ch); g_resubmit_test = false; - CU_ASSERT(rc == 0); + CU_ASSERT(rc == 1); CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true); /* 2 to dequeue but 2nd one failed */ g_dequeue_mock = g_enqueue_mock = 2; - g_io_ctx->cryop_cnt_remaining = 2; + task.cryop_cnt_remaining = 2; rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, 2); g_test_crypto_ops[0]->sym->m_src = src_mbufs[0]; *RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset, - uint64_t *) = (uint64_t)g_bdev_io; + uint64_t *) = (uint64_t)&task; g_test_crypto_ops[0]->sym->m_dst = NULL; g_test_crypto_ops[0]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; g_test_crypto_ops[1]->sym->m_src = src_mbufs[1]; *RTE_MBUF_DYNFIELD(g_test_crypto_ops[1]->sym->m_src, g_mbuf_offset, - uint64_t *) = (uint64_t)g_bdev_io; + uint64_t *) = (uint64_t)&task; g_test_crypto_ops[1]->sym->m_dst = NULL; g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; - g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; - rc = crypto_dev_poller(g_crypto_ch); - CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); - CU_ASSERT(rc == 2); + rc = accel_dpdk_cryptodev_poller(g_crypto_ch); + CU_ASSERT(task.is_failed == true); + CU_ASSERT(rc == 1); } -/* Helper function for test_assign_device_qp() */ +/* Helper function for accel_dpdk_cryptodev_assign_device_qps() */ static void -_clear_device_qp_lists(void) +_check_expected_values(struct accel_dpdk_cryptodev_io_channel *crypto_ch, + uint8_t expected_qat_index, + uint8_t next_qat_index) { - struct device_qp *device_qp = NULL; + uint32_t num_qpairs; - while (!TAILQ_EMPTY(&g_device_qp_qat)) { - device_qp = TAILQ_FIRST(&g_device_qp_qat); - TAILQ_REMOVE(&g_device_qp_qat, device_qp, link); - free(device_qp); + memset(crypto_ch->device_qp, 0, sizeof(crypto_ch->device_qp)); - } - CU_ASSERT(TAILQ_EMPTY(&g_device_qp_qat) == true); - while (!TAILQ_EMPTY(&g_device_qp_aesni_mb)) { - device_qp = TAILQ_FIRST(&g_device_qp_aesni_mb); - TAILQ_REMOVE(&g_device_qp_aesni_mb, device_qp, link); - free(device_qp); - } - CU_ASSERT(TAILQ_EMPTY(&g_device_qp_aesni_mb) == true); - while (!TAILQ_EMPTY(&g_device_qp_mlx5)) { - device_qp = TAILQ_FIRST(&g_device_qp_mlx5); - TAILQ_REMOVE(&g_device_qp_mlx5, device_qp, link); - free(device_qp); - } - CU_ASSERT(TAILQ_EMPTY(&g_device_qp_mlx5) == true); -} + num_qpairs = accel_dpdk_cryptodev_assign_device_qps(crypto_ch); + CU_ASSERT(num_qpairs == 3); -/* Helper function for test_assign_device_qp() */ -static void -_check_expected_values(struct vbdev_crypto *crypto_bdev, struct device_qp *device_qp, - struct crypto_io_channel *crypto_ch, uint8_t expected_index, - uint8_t current_index) -{ - _assign_device_qp(&g_crypto_bdev, device_qp, g_crypto_ch); - CU_ASSERT(g_crypto_ch->device_qp->index == expected_index); - CU_ASSERT(g_next_qat_index == current_index); + SPDK_CU_ASSERT_FATAL(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_QAT] != NULL); + CU_ASSERT(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_QAT]->index == expected_qat_index); + CU_ASSERT(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_QAT]->in_use == true); + CU_ASSERT(g_next_qat_index == next_qat_index); + SPDK_CU_ASSERT_FATAL(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB] != NULL); + CU_ASSERT(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB]->in_use == true); + SPDK_CU_ASSERT_FATAL(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI] != NULL); + CU_ASSERT(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI]->in_use == true); } static void test_assign_device_qp(void) { - struct device_qp *device_qp = NULL; + struct accel_dpdk_cryptodev_device qat_dev = { + .type = ACCEL_DPDK_CRYPTODEV_DRIVER_QAT, + .qpairs = TAILQ_HEAD_INITIALIZER(qat_dev.qpairs) + }; + struct accel_dpdk_cryptodev_device aesni_dev = { + .type = ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB, + .qpairs = TAILQ_HEAD_INITIALIZER(aesni_dev.qpairs) + }; + struct accel_dpdk_cryptodev_device mlx5_dev = { + .type = ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI, + .qpairs = TAILQ_HEAD_INITIALIZER(mlx5_dev.qpairs) + }; + struct accel_dpdk_cryptodev_qp *qat_qps; + struct accel_dpdk_cryptodev_qp aesni_qps[4] = {}; + struct accel_dpdk_cryptodev_qp mlx5_qps[4] = {}; + struct accel_dpdk_cryptodev_io_channel io_ch = {}; + TAILQ_HEAD(, accel_dpdk_cryptodev_device) devs_tmp = TAILQ_HEAD_INITIALIZER(devs_tmp); int i; - /* start with a known state, clear the device/qp lists */ - _clear_device_qp_lists(); + g_qat_total_qp = 96; + qat_qps = calloc(g_qat_total_qp, sizeof(*qat_qps)); + SPDK_CU_ASSERT_FATAL(qat_qps != NULL); - /* make sure that one AESNI_MB qp is found */ - device_qp = calloc(1, sizeof(struct device_qp)); - TAILQ_INSERT_TAIL(&g_device_qp_aesni_mb, device_qp, link); - g_crypto_ch->device_qp = NULL; - g_crypto_bdev.opts->drv_name = AESNI_MB; - _assign_device_qp(&g_crypto_bdev, device_qp, g_crypto_ch); - CU_ASSERT(g_crypto_ch->device_qp != NULL); + for (i = 0; i < 4; i++) { + aesni_qps[i].index = i; + aesni_qps[i].device = &aesni_dev; + TAILQ_INSERT_TAIL(&aesni_dev.qpairs, &aesni_qps[i], link); + + mlx5_qps[i].index = i; + mlx5_qps[i].device = &mlx5_dev; + TAILQ_INSERT_TAIL(&mlx5_dev.qpairs, &mlx5_qps[i], link); + } + for (i = 0; i < g_qat_total_qp; i++) { + qat_qps[i].index = i; + qat_qps[i].device = &qat_dev; + TAILQ_INSERT_TAIL(&qat_dev.qpairs, &qat_qps[i], link); + } + + /* Swap g_crypto_devices so that other tests are not affected */ + TAILQ_SWAP(&g_crypto_devices, &devs_tmp, accel_dpdk_cryptodev_device, link); + + TAILQ_INSERT_TAIL(&g_crypto_devices, &qat_dev, link); + TAILQ_INSERT_TAIL(&g_crypto_devices, &aesni_dev, link); + TAILQ_INSERT_TAIL(&g_crypto_devices, &mlx5_dev, link); /* QAT testing is more complex as the code under test load balances by * assigning each subsequent device/qp to every QAT_VF_SPREAD modulo * g_qat_total_qp. For the current latest QAT we'll have 48 virtual functions - * each with 2 qp so the "spread" between assignments is 32. - */ - g_qat_total_qp = 96; - for (i = 0; i < g_qat_total_qp; i++) { - device_qp = calloc(1, sizeof(struct device_qp)); - device_qp->index = i; - TAILQ_INSERT_TAIL(&g_device_qp_qat, device_qp, link); - } - g_crypto_ch->device_qp = NULL; - g_crypto_bdev.opts->drv_name = QAT; + * each with 2 qp so the "spread" between assignments is 32. */ /* First assignment will assign to 0 and next at 32. */ - _check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch, - 0, QAT_VF_SPREAD); + _check_expected_values(&io_ch, 0, ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD); /* Second assignment will assign to 32 and next at 64. */ - _check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch, - QAT_VF_SPREAD, QAT_VF_SPREAD * 2); + _check_expected_values(&io_ch, ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD, + ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD * 2); /* Third assignment will assign to 64 and next at 0. */ - _check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch, - QAT_VF_SPREAD * 2, 0); + _check_expected_values(&io_ch, ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD * 2, 0); /* Fourth assignment will assign to 1 and next at 33. */ - _check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch, - 1, QAT_VF_SPREAD + 1); + _check_expected_values(&io_ch, 1, ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD + 1); - /* make sure that one MLX5 qp is found */ - device_qp = calloc(1, sizeof(struct device_qp)); - TAILQ_INSERT_TAIL(&g_device_qp_mlx5, device_qp, link); - g_crypto_ch->device_qp = NULL; - g_crypto_bdev.opts->drv_name = MLX5; - _assign_device_qp(&g_crypto_bdev, device_qp, g_crypto_ch); - CU_ASSERT(g_crypto_ch->device_qp == device_qp); - - _clear_device_qp_lists(); + TAILQ_SWAP(&devs_tmp, &g_crypto_devices, accel_dpdk_cryptodev_device, link); } int @@ -1204,18 +1214,15 @@ main(int argc, char **argv) CU_set_error_action(CUEA_ABORT); CU_initialize_registry(); - suite = CU_add_suite("crypto", test_setup, test_cleanup); + suite = CU_add_suite("dpdk_cryptodev", test_setup, test_cleanup); CU_ADD_TEST(suite, test_error_paths); - CU_ADD_TEST(suite, test_simple_write); - CU_ADD_TEST(suite, test_simple_read); - CU_ADD_TEST(suite, test_large_rw); + CU_ADD_TEST(suite, test_simple_encrypt); + CU_ADD_TEST(suite, test_simple_decrypt); + CU_ADD_TEST(suite, test_large_enc_dec); CU_ADD_TEST(suite, test_dev_full); CU_ADD_TEST(suite, test_crazy_rw); - CU_ADD_TEST(suite, test_passthru); CU_ADD_TEST(suite, test_initdrivers); - CU_ADD_TEST(suite, test_crypto_op_complete); - CU_ADD_TEST(suite, test_supported_io); - CU_ADD_TEST(suite, test_reset); + CU_ADD_TEST(suite, test_supported_opcodes); CU_ADD_TEST(suite, test_poller); CU_ADD_TEST(suite, test_assign_device_qp);