dpdk_cryptodev: Remove limit on max IO size

Previously vbdev_crypto used DPDK directly and
the restriction on max IO size was propagated to
generic bdev layer which split big IO requests.

Now, when DPDK code is a standalone accel module,
this restriction on max IO size is not visible to
the user and we should get rid of it.

To remove this limitation, allow to submit crypto
operations for part of logical blocks in big IO,
the rest blocks will be processed when all submitted
crypto ops are completed.

To verify this patch, add a functional test which
submits big IO verify mode

Signed-off-by: Alexey Marchuk <alexeymar@nvidia.com>
Change-Id: I0ee89e98195a5c744f3fb2bfc752b578965c3bc5
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/15768
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Alexey Marchuk 2022-12-02 13:10:44 +01:00 committed by Ben Walker
parent 687d5a8766
commit bf8e0656e8
4 changed files with 347 additions and 112 deletions

View File

@ -26,9 +26,6 @@
*/
#define ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD 32
/* Max length in byte of a crypto operation */
#define ACCEL_DPDK_CRYPTODEV_CRYPTO_MAX_IO (64 * 1024)
/* This controls how many ops will be dequeued from the crypto driver in one run
* of the poller. It is mainly a performance knob as it effectively determines how
* much work the poller has to do. However even that can vary between crypto drivers
@ -37,15 +34,7 @@
*/
#define ACCEL_DPDK_CRYPTODEV_MAX_DEQUEUE_BURST_SIZE 64
/* When enqueueing, we need to supply the crypto driver with an array of pointers to
* operation structs. As each of these can be max 512B, we can adjust the ACCEL_DPDK_CRYPTODEV_CRYPTO_MAX_IO
* value in conjunction with the other defines to make sure we're not using crazy amounts
* of memory. All of these numbers can and probably should be adjusted based on the
* workload. By default we'll use the worst case (smallest) block size for the
* minimum number of array entries. As an example, a ACCEL_DPDK_CRYPTODEV_CRYPTO_MAX_IO size of 64K with 512B
* blocks would give us an enqueue array size of 128.
*/
#define ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE (ACCEL_DPDK_CRYPTODEV_CRYPTO_MAX_IO / 512)
#define ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE (128)
/* The number of MBUFS we need must be a power of two and to support other small IOs
* in addition to the limits mentioned above, we go to the next power of two. It is
@ -96,6 +85,10 @@
#define ACCEL_DPDK_CRYPTODEV_AES_XTS_TWEAK_KEY_LENGTH 16 /* XTS part key size is always 128 bit. */
/* Limit of the max memory len attached to mbuf - rte_pktmbuf_attach_extbuf has uint16_t `buf_len`
* parameter, we use closes aligned value 32768 for better performance */
#define ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN 32768
/* Used to store IO context in mbuf */
static const struct rte_mbuf_dynfield rte_mbuf_dynfield_io_context = {
.name = "context_accel_dpdk_cryptodev",
@ -175,8 +168,11 @@ struct accel_dpdk_cryptodev_io_channel {
struct accel_dpdk_cryptodev_task {
struct spdk_accel_task base;
uint32_t cryop_cnt_remaining;
uint32_t cryop_completed; /* The number of crypto operations completed by HW */
uint32_t cryop_submitted; /* The number of crypto operations submitted to HW */
uint32_t cryop_total; /* Total number of crypto operations in this task */
bool is_failed;
bool inplace;
TAILQ_ENTRY(accel_dpdk_cryptodev_task) link;
};
@ -212,6 +208,9 @@ static pthread_mutex_t g_device_lock = PTHREAD_MUTEX_INITIALIZER;
static struct spdk_accel_module_if g_accel_dpdk_cryptodev_module;
static int accel_dpdk_cryptodev_process_task(struct accel_dpdk_cryptodev_io_channel *crypto_ch,
struct accel_dpdk_cryptodev_task *task);
void
accel_dpdk_cryptodev_enable(void)
{
@ -281,7 +280,8 @@ cancel_queued_crypto_ops(struct accel_dpdk_cryptodev_io_channel *crypto_ch,
}
static inline uint16_t
accel_dpdk_cryptodev_poll_qp(struct accel_dpdk_cryptodev_qp *qp)
accel_dpdk_cryptodev_poll_qp(struct accel_dpdk_cryptodev_qp *qp,
struct accel_dpdk_cryptodev_io_channel *crypto_ch)
{
struct rte_crypto_op *dequeued_ops[ACCEL_DPDK_CRYPTODEV_MAX_DEQUEUE_BURST_SIZE];
struct rte_mbuf *mbufs_to_free[2 * ACCEL_DPDK_CRYPTODEV_MAX_DEQUEUE_BURST_SIZE];
@ -325,11 +325,17 @@ accel_dpdk_cryptodev_poll_qp(struct accel_dpdk_cryptodev_qp *qp)
mbufs_to_free[num_mbufs++] = (void *)dequeued_ops[i]->sym->m_dst;
}
assert(task->cryop_cnt_remaining > 0);
/* done encrypting, complete the task */
if (--task->cryop_cnt_remaining == 0) {
task->cryop_completed++;
if (task->cryop_completed == task->cryop_total) {
/* Complete the IO */
spdk_accel_task_complete(&task->base, task->is_failed ? -EINVAL : 0);
} else if (task->cryop_completed == task->cryop_submitted) {
/* submit remaining crypto ops */
int rc = accel_dpdk_cryptodev_process_task(crypto_ch, task);
if (spdk_unlikely(rc)) {
spdk_accel_task_complete(&task->base, rc);
}
}
}
@ -365,7 +371,7 @@ accel_dpdk_cryptodev_poller(void *args)
qp = crypto_ch->device_qp[i];
/* Avoid polling "idle" qps since it may affect performance */
if (qp && qp->num_enqueued_ops) {
num_dequeued_ops += accel_dpdk_cryptodev_poll_qp(qp);
num_dequeued_ops += accel_dpdk_cryptodev_poll_qp(qp, crypto_ch);
}
}
@ -394,8 +400,9 @@ accel_dpdk_cryptodev_poller(void *args)
cancel_queued_crypto_ops(crypto_ch, task);
task->cryop_completed++;
/* Fail the IO if there is nothing left on device. */
if (--task->cryop_cnt_remaining == 0) {
if (task->cryop_completed == task->cryop_submitted) {
spdk_accel_task_complete(&task->base, -EFAULT);
}
}
@ -421,6 +428,7 @@ accel_dpdk_cryptodev_mbuf_chain_remainder(struct accel_dpdk_cryptodev_task *task
return -EFAULT;
}
remainder = spdk_min(remainder, phys_len);
remainder = spdk_min(remainder, ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN);
rc = rte_pktmbuf_alloc_bulk(g_mbuf_mp, (struct rte_mbuf **)&chain_mbuf, 1);
if (spdk_unlikely(rc)) {
return -ENOMEM;
@ -454,6 +462,7 @@ accel_dpdk_cryptodev_mbuf_attach_buf(struct accel_dpdk_cryptodev_task *task, str
return 0;
}
assert(phys_len <= len);
phys_len = spdk_min(phys_len, ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN);
/* Set the mbuf elements address and length. */
rte_pktmbuf_attach_extbuf(mbuf, addr, phys_addr, phys_len, &g_shinfo);
@ -515,11 +524,12 @@ accel_dpdk_cryptodev_task_alloc_resources(struct rte_mbuf **src_mbufs, struct rt
0x1000);
#endif
/* Allocate crypto operations. */
SPDK_NOTICELOG("requested %u ops\n", count);
rc = rte_crypto_op_bulk_alloc(g_crypto_op_mp,
RTE_CRYPTO_OP_TYPE_SYMMETRIC,
crypto_ops, count);
if (rc < count) {
SPDK_ERRLOG("Failed to allocate crypto ops!\n");
SPDK_ERRLOG("Failed to allocate crypto ops! rc %d\n", rc);
goto err_free_ops;
}
@ -592,7 +602,8 @@ accel_dpdk_cryptodev_process_task(struct accel_dpdk_cryptodev_io_channel *crypto
uint32_t cryop_cnt;
uint32_t crypto_len = task->base.block_size;
uint64_t dst_length, total_length;
uint64_t iv_start = task->base.iv;
uint32_t sgl_offset;
uint64_t iv_start;
struct accel_dpdk_cryptodev_queued_op *op_to_queue;
uint32_t i, crypto_index;
struct rte_crypto_op *crypto_ops[ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE];
@ -604,7 +615,6 @@ accel_dpdk_cryptodev_process_task(struct accel_dpdk_cryptodev_io_channel *crypto
struct accel_dpdk_cryptodev_qp *qp;
struct accel_dpdk_cryptodev_device *dev;
struct spdk_iov_sgl src, dst = {};
bool inplace = true;
int rc;
if (spdk_unlikely(!task->base.crypto_key ||
@ -612,30 +622,43 @@ accel_dpdk_cryptodev_process_task(struct accel_dpdk_cryptodev_io_channel *crypto
return -EINVAL;
}
total_length = 0;
for (i = 0; i < task->base.s.iovcnt; i++) {
total_length += task->base.s.iovs[i].iov_len;
}
dst_length = 0;
for (i = 0; i < task->base.d.iovcnt; i++) {
dst_length += task->base.d.iovs[i].iov_len;
}
if (spdk_unlikely(total_length != dst_length || !total_length)) {
return -ERANGE;
}
if (spdk_unlikely(total_length % task->base.block_size != 0)) {
return -EINVAL;
}
priv = task->base.crypto_key->priv;
assert(priv->driver < ACCEL_DPDK_CRYPTODEV_DRIVER_LAST);
if (total_length > ACCEL_DPDK_CRYPTODEV_CRYPTO_MAX_IO) {
return -E2BIG;
if (task->cryop_completed) {
/* We continue to process remaining blocks */
assert(task->cryop_submitted == task->cryop_completed);
assert(task->cryop_total > task->cryop_completed);
cryop_cnt = task->cryop_total - task->cryop_completed;
sgl_offset = task->cryop_completed * crypto_len;
iv_start = task->base.iv + task->cryop_completed;
} else {
/* That is a new task */
total_length = 0;
for (i = 0; i < task->base.s.iovcnt; i++) {
total_length += task->base.s.iovs[i].iov_len;
}
dst_length = 0;
for (i = 0; i < task->base.d.iovcnt; i++) {
dst_length += task->base.d.iovs[i].iov_len;
}
if (spdk_unlikely(total_length != dst_length || !total_length)) {
return -ERANGE;
}
if (spdk_unlikely(total_length % task->base.block_size != 0)) {
return -EINVAL;
}
cryop_cnt = total_length / task->base.block_size;
task->cryop_total = cryop_cnt;
sgl_offset = 0;
iv_start = task->base.iv;
}
cryop_cnt = total_length / task->base.block_size;
/* Limit the number of crypto ops that we can process once */
cryop_cnt = spdk_min(cryop_cnt, ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE);
qp = crypto_ch->device_qp[priv->driver];
assert(qp);
dev = qp->device;
@ -658,23 +681,13 @@ accel_dpdk_cryptodev_process_task(struct accel_dpdk_cryptodev_io_channel *crypto
return -EINVAL;
}
/* Check if crypto operation is inplace: no destination or source == destination */
if (task->base.s.iovcnt == task->base.d.iovcnt) {
if (memcmp(task->base.s.iovs, task->base.d.iovs, sizeof(struct iovec) * task->base.s.iovcnt) != 0) {
inplace = false;
}
} else if (task->base.d.iovcnt != 0) {
inplace = false;
}
rc = accel_dpdk_cryptodev_task_alloc_resources(src_mbufs, inplace ? NULL : dst_mbufs, crypto_ops,
cryop_cnt);
rc = accel_dpdk_cryptodev_task_alloc_resources(src_mbufs, task->inplace ? NULL : dst_mbufs,
crypto_ops, cryop_cnt);
if (rc) {
return rc;
}
/* This value is used in the completion callback to determine when the accel task is complete.
*/
task->cryop_cnt_remaining = cryop_cnt;
/* This value is used in the completion callback to determine when the accel task is complete. */
task->cryop_submitted += cryop_cnt;
/* As we don't support chaining because of a decision to use LBA as IV, construction
* of crypto operations is straightforward. We build both the op, the mbuf and the
@ -683,9 +696,9 @@ accel_dpdk_cryptodev_process_task(struct accel_dpdk_cryptodev_io_channel *crypto
* LBA sized chunk of memory will correspond 1:1 to a crypto operation and a single
* mbuf per crypto operation.
*/
spdk_iov_sgl_init(&src, task->base.s.iovs, task->base.s.iovcnt, 0);
if (!inplace) {
spdk_iov_sgl_init(&dst, task->base.d.iovs, task->base.d.iovcnt, 0);
spdk_iov_sgl_init(&src, task->base.s.iovs, task->base.s.iovcnt, sgl_offset);
if (!task->inplace) {
spdk_iov_sgl_init(&dst, task->base.d.iovs, task->base.d.iovcnt, sgl_offset);
}
for (crypto_index = 0; crypto_index < cryop_cnt; crypto_index++) {
@ -704,14 +717,17 @@ accel_dpdk_cryptodev_process_task(struct accel_dpdk_cryptodev_io_channel *crypto
/* link the mbuf to the crypto op. */
crypto_ops[crypto_index]->sym->m_src = src_mbufs[crypto_index];
if (inplace) {
if (task->inplace) {
crypto_ops[crypto_index]->sym->m_dst = NULL;
} else {
#ifndef __clang_analyzer__
/* scan-build thinks that dst_mbufs is not initialized */
rc = accel_dpdk_cryptodev_mbuf_add_single_block(&dst, dst_mbufs[crypto_index], task);
if (spdk_unlikely(rc)) {
goto err_free_ops;
}
crypto_ops[crypto_index]->sym->m_dst = dst_mbufs[crypto_index];
#endif
}
}
@ -759,7 +775,7 @@ accel_dpdk_cryptodev_process_task(struct accel_dpdk_cryptodev_io_channel *crypto
/* Error cleanup paths. */
err_free_ops:
if (!inplace) {
if (!task->inplace) {
/* This also releases chained mbufs if any. */
rte_pktmbuf_free_bulk(dst_mbufs, cryop_cnt);
}
@ -916,6 +932,21 @@ accel_dpdk_cryptodev_submit_tasks(struct spdk_io_channel *_ch, struct spdk_accel
base);
struct accel_dpdk_cryptodev_io_channel *ch = spdk_io_channel_get_ctx(_ch);
task->cryop_completed = 0;
task->cryop_submitted = 0;
task->cryop_total = 0;
task->inplace = true;
task->is_failed = false;
/* Check if crypto operation is inplace: no destination or source == destination */
if (task->base.s.iovcnt == task->base.d.iovcnt) {
if (memcmp(task->base.s.iovs, task->base.d.iovs, sizeof(struct iovec) * task->base.s.iovcnt) != 0) {
task->inplace = false;
}
} else if (task->base.d.iovcnt != 0) {
task->inplace = false;
}
return accel_dpdk_cryptodev_process_task(ch, task);
}

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (C) 2016 Intel Corporation
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
#
testdir=$(readlink -f $(dirname $0))
@ -660,6 +661,8 @@ fi
trap "cleanup" SIGINT SIGTERM EXIT
run_test "bdev_verify" $rootdir/build/examples/bdevperf --json "$conf_file" -q 128 -o 4096 -w verify -t 5 -C -m 0x3 "$env_ctx"
# TODO: increase queue depth to 128 once issue #2824 is fixed
run_test "bdev_verify_big_io" $rootdir/build/examples/bdevperf --json "$conf_file" -q 16 -o 65536 -w verify -t 5 -C -m 0x3 "$env_ctx"
run_test "bdev_write_zeroes" $rootdir/build/examples/bdevperf --json "$conf_file" -q 128 -o 4096 -w write_zeroes -t 1 "$env_ctx"
# test json config not enclosed with {}

View File

@ -393,30 +393,24 @@ test_error_paths(void)
CU_ASSERT(rc == -EINVAL);
key.module_if = &g_accel_dpdk_cryptodev_module;
/* case 3 - buffers are too big */
dst_iov.iov_len = src_iov.iov_len = ACCEL_DPDK_CRYPTODEV_CRYPTO_MAX_IO + 512;
rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
CU_ASSERT(rc == -E2BIG);
dst_iov.iov_len = src_iov.iov_len = 512;
/* case 4 - no key handle in the channel */
/* case 3 - no key handle in the channel */
rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
CU_ASSERT(rc == -EINVAL);
task.base.crypto_key = &g_key;
/* case 5 - invalid op */
/* case 4 - invalid op */
task.base.op_code = ACCEL_OPC_COMPARE;
rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
CU_ASSERT(rc == -EINVAL);
task.base.op_code = ACCEL_OPC_ENCRYPT;
/* case 6 - no entries in g_mbuf_mp */
/* case 5 - no entries in g_mbuf_mp */
MOCK_SET(spdk_mempool_get, NULL);
rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
CU_ASSERT(rc == -ENOMEM);
MOCK_CLEAR(spdk_mempool_get);
/* case 7 - vtophys error in accel_dpdk_cryptodev_mbuf_attach_buf */
/* case 6 - vtophys error in accel_dpdk_cryptodev_mbuf_attach_buf */
MOCK_SET(spdk_vtophys, SPDK_VTOPHYS_ERROR);
rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
CU_ASSERT(rc == -EFAULT);
@ -429,7 +423,7 @@ test_simple_encrypt(void)
struct iovec src_iov[4] = {[0] = {.iov_base = (void *)0xDEADBEEF, .iov_len = 512 }};
struct iovec dst_iov = src_iov[0];
struct accel_dpdk_cryptodev_task task = {};
struct rte_mbuf *mbuf;
struct rte_mbuf *mbuf, *next;
int rc, i;
task.base.op_code = ACCEL_OPC_ENCRYPT;
@ -445,7 +439,7 @@ test_simple_encrypt(void)
/* Inplace encryption */
rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
CU_ASSERT(rc == 0);
CU_ASSERT(task.cryop_cnt_remaining == 1);
CU_ASSERT(task.cryop_submitted == 1);
CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == src_iov[0].iov_base);
CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == src_iov[0].iov_len);
CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
@ -458,12 +452,12 @@ test_simple_encrypt(void)
rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
/* out-of-place encryption */
task.cryop_cnt_remaining = 0;
task.cryop_submitted = 0;
dst_iov.iov_base = (void *)0xFEEDBEEF;
rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
CU_ASSERT(rc == 0);
CU_ASSERT(task.cryop_cnt_remaining == 1);
CU_ASSERT(task.cryop_submitted == 1);
CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == src_iov[0].iov_base);
CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == src_iov[0].iov_len);
CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
@ -485,18 +479,19 @@ test_simple_encrypt(void)
}
rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
CU_ASSERT(rc == 0);
CU_ASSERT(task.cryop_cnt_remaining == 1);
CU_ASSERT(task.cryop_submitted == 1);
mbuf = g_test_crypto_ops[0]->sym->m_src;
CU_ASSERT(mbuf != NULL);
SPDK_CU_ASSERT_FATAL(mbuf != NULL);
CU_ASSERT(mbuf->buf_addr == src_iov[0].iov_base);
CU_ASSERT(mbuf->data_len == src_iov[0].iov_len);
mbuf = mbuf->next;
for (i = 1; i < 4; i++) {
mbuf = mbuf->next;
SPDK_CU_ASSERT_FATAL(mbuf != NULL);
CU_ASSERT(mbuf->buf_addr == src_iov[i].iov_base);
CU_ASSERT(mbuf->data_len == src_iov[i].iov_len);
next = mbuf->next;
rte_pktmbuf_free(mbuf);
mbuf = next;
}
CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
@ -507,6 +502,37 @@ test_simple_encrypt(void)
rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_dst);
/* Big logical block size, inplace encryption */
src_iov[0].iov_len = ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN * 4;
dst_iov = src_iov[0];
task.base.block_size = ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN * 4;
task.base.s.iovcnt = 1;
g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
CU_ASSERT(rc == 0);
CU_ASSERT(task.cryop_submitted == 1);
mbuf = g_test_crypto_ops[0]->sym->m_src;
SPDK_CU_ASSERT_FATAL(mbuf != NULL);
CU_ASSERT(mbuf->buf_addr == src_iov[0].iov_base);
CU_ASSERT(mbuf->data_len == ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN);
mbuf = mbuf->next;
for (i = 1; i < 4; i++) {
SPDK_CU_ASSERT_FATAL(mbuf != NULL);
CU_ASSERT(mbuf->buf_addr == (char *)src_iov[0].iov_base + i * ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN);
CU_ASSERT(mbuf->data_len == ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN);
next = mbuf->next;
rte_pktmbuf_free(mbuf);
mbuf = next;
}
CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN * 4);
CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
uint64_t *) == (uint64_t)&task);
CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL);
rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
}
static void
@ -515,7 +541,7 @@ test_simple_decrypt(void)
struct iovec src_iov[4] = {[0] = {.iov_base = (void *)0xDEADBEEF, .iov_len = 512 }};
struct iovec dst_iov = src_iov[0];
struct accel_dpdk_cryptodev_task task = {};
struct rte_mbuf *mbuf;
struct rte_mbuf *mbuf, *next;
int rc, i;
task.base.op_code = ACCEL_OPC_DECRYPT;
@ -531,7 +557,7 @@ test_simple_decrypt(void)
/* Inplace decryption */
rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
CU_ASSERT(rc == 0);
CU_ASSERT(task.cryop_cnt_remaining == 1);
CU_ASSERT(task.cryop_submitted == 1);
CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == src_iov[0].iov_base);
CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == src_iov[0].iov_len);
CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
@ -544,12 +570,12 @@ test_simple_decrypt(void)
rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
/* out-of-place decryption */
task.cryop_cnt_remaining = 0;
task.cryop_submitted = 0;
dst_iov.iov_base = (void *)0xFEEDBEEF;
rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
CU_ASSERT(rc == 0);
CU_ASSERT(task.cryop_cnt_remaining == 1);
CU_ASSERT(task.cryop_submitted == 1);
CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == src_iov[0].iov_base);
CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == src_iov[0].iov_len);
CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
@ -571,18 +597,19 @@ test_simple_decrypt(void)
}
rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
CU_ASSERT(rc == 0);
CU_ASSERT(task.cryop_cnt_remaining == 1);
CU_ASSERT(task.cryop_submitted == 1);
mbuf = g_test_crypto_ops[0]->sym->m_src;
CU_ASSERT(mbuf != NULL);
SPDK_CU_ASSERT_FATAL(mbuf != NULL);
CU_ASSERT(mbuf->buf_addr == src_iov[0].iov_base);
CU_ASSERT(mbuf->data_len == src_iov[0].iov_len);
mbuf = mbuf->next;
for (i = 1; i < 4; i++) {
mbuf = mbuf->next;
SPDK_CU_ASSERT_FATAL(mbuf != NULL);
CU_ASSERT(mbuf->buf_addr == src_iov[i].iov_base);
CU_ASSERT(mbuf->data_len == src_iov[i].iov_len);
next = mbuf->next;
rte_pktmbuf_free(mbuf);
mbuf = next;
}
CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
@ -593,6 +620,37 @@ test_simple_decrypt(void)
rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_dst);
/* Big logical block size, inplace encryption */
src_iov[0].iov_len = ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN * 4;
dst_iov = src_iov[0];
task.base.block_size = ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN * 4;
task.base.s.iovcnt = 1;
g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
CU_ASSERT(rc == 0);
CU_ASSERT(task.cryop_submitted == 1);
mbuf = g_test_crypto_ops[0]->sym->m_src;
SPDK_CU_ASSERT_FATAL(mbuf != NULL);
CU_ASSERT(mbuf->buf_addr == src_iov[0].iov_base);
CU_ASSERT(mbuf->data_len == ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN);
mbuf = mbuf->next;
for (i = 1; i < 4; i++) {
SPDK_CU_ASSERT_FATAL(mbuf != NULL);
CU_ASSERT(mbuf->buf_addr == (char *)src_iov[0].iov_base + i * ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN);
CU_ASSERT(mbuf->data_len == ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN);
next = mbuf->next;
rte_pktmbuf_free(mbuf);
mbuf = next;
}
CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN * 4);
CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
uint64_t *) == (uint64_t)&task);
CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL);
rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
}
static void
@ -600,9 +658,10 @@ test_large_enc_dec(void)
{
struct accel_dpdk_cryptodev_task task = {};
uint32_t block_len = 512;
uint32_t num_blocks = ACCEL_DPDK_CRYPTODEV_CRYPTO_MAX_IO / block_len;
struct iovec src_iov = {.iov_base = (void *)0xDEADBEEF, .iov_len = ACCEL_DPDK_CRYPTODEV_CRYPTO_MAX_IO };
uint32_t num_blocks = ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE * 2;
struct iovec src_iov = {.iov_base = (void *)0xDEADBEEF, .iov_len = num_blocks * block_len };
struct iovec dst_iov = src_iov;
uint32_t iov_offset = ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE * block_len;
uint32_t i;
int rc;
@ -615,14 +674,17 @@ test_large_enc_dec(void)
task.base.crypto_key = &g_key;
task.base.iv = 1;
/* Multi block size decryption, multi-element, inplace */
/* Test 1. Multi block size decryption, multi-element, inplace */
g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
CU_ASSERT(rc == 0);
CU_ASSERT(task.cryop_cnt_remaining == num_blocks);
CU_ASSERT(task.inplace == true);
CU_ASSERT(task.cryop_submitted == ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE);
CU_ASSERT(task.cryop_total == num_blocks);
CU_ASSERT(task.cryop_completed == 0);
for (i = 0; i < num_blocks; i++) {
for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) {
CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov.iov_base + (i * block_len));
CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
@ -634,15 +696,39 @@ test_large_enc_dec(void)
rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
}
/* Multi block size decryption, multi-element, out-of-place */
task.cryop_cnt_remaining = 0;
/* Call accel_dpdk_cryptodev_process_task like it was called by completion poller */
task.cryop_completed = task.cryop_submitted;
rc = accel_dpdk_cryptodev_process_task(g_crypto_ch, &task);
CU_ASSERT(rc == 0);
CU_ASSERT(task.cryop_submitted == num_blocks);
CU_ASSERT(task.cryop_total == task.cryop_submitted);
for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) {
CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov.iov_base + iov_offset +
(i * block_len));
CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
uint64_t *) == (uint64_t)&task);
CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
}
/* Test 2. Multi block size decryption, multi-element, out-of-place */
dst_iov.iov_base = (void *)0xFEEDBEEF;
g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
CU_ASSERT(rc == 0);
CU_ASSERT(task.cryop_cnt_remaining == num_blocks);
for (i = 0; i < num_blocks; i++) {
CU_ASSERT(task.inplace == false);
CU_ASSERT(task.cryop_submitted == ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE);
CU_ASSERT(task.cryop_total == num_blocks);
CU_ASSERT(task.cryop_completed == 0);
for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) {
CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov.iov_base + (i * block_len));
CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
@ -657,17 +743,45 @@ test_large_enc_dec(void)
rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_dst);
}
/* Multi block size encryption, multi-element, inplace */
/* Call accel_dpdk_cryptodev_process_task like it was called by completion poller */
task.cryop_completed = task.cryop_submitted;
rc = accel_dpdk_cryptodev_process_task(g_crypto_ch, &task);
CU_ASSERT(rc == 0);
CU_ASSERT(task.cryop_submitted == num_blocks);
CU_ASSERT(task.cryop_total == task.cryop_submitted);
for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) {
CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov.iov_base + iov_offset +
(i * block_len));
CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
uint64_t *) == (uint64_t)&task);
CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr == dst_iov.iov_base + iov_offset +
(i * block_len));
CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->data_len == block_len);
CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->next == NULL);
rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_dst);
}
/* Test 3. Multi block size encryption, multi-element, inplace */
dst_iov = src_iov;
task.base.op_code = ACCEL_OPC_ENCRYPT;
task.cryop_cnt_remaining = 0;
task.cryop_submitted = 0;
g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
CU_ASSERT(rc == 0);
CU_ASSERT(task.cryop_cnt_remaining == num_blocks);
CU_ASSERT(task.inplace == true);
CU_ASSERT(task.cryop_submitted == ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE);
CU_ASSERT(task.cryop_total == num_blocks);
CU_ASSERT(task.cryop_completed == 0);
for (i = 0; i < num_blocks; i++) {
for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) {
CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov.iov_base + (i * block_len));
CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
@ -679,15 +793,39 @@ test_large_enc_dec(void)
rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
}
/* Call accel_dpdk_cryptodev_process_task like it was called by completion poller */
task.cryop_completed = task.cryop_submitted;
rc = accel_dpdk_cryptodev_process_task(g_crypto_ch, &task);
CU_ASSERT(rc == 0);
CU_ASSERT(task.cryop_submitted == num_blocks);
CU_ASSERT(task.cryop_total == task.cryop_submitted);
for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) {
CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov.iov_base + iov_offset +
(i * block_len));
CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
uint64_t *) == (uint64_t)&task);
CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
}
/* Multi block size encryption, multi-element, out-of-place */
task.cryop_cnt_remaining = 0;
task.cryop_submitted = 0;
dst_iov.iov_base = (void *)0xFEEDBEEF;
g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
CU_ASSERT(rc == 0);
CU_ASSERT(task.cryop_cnt_remaining == num_blocks);
for (i = 0; i < num_blocks; i++) {
CU_ASSERT(task.inplace == false);
CU_ASSERT(task.cryop_submitted == ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE);
CU_ASSERT(task.cryop_total == num_blocks);
CU_ASSERT(task.cryop_completed == 0);
for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) {
CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov.iov_base + (i * block_len));
CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
@ -701,6 +839,31 @@ test_large_enc_dec(void)
rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_dst);
}
/* Call accel_dpdk_cryptodev_process_task like it was called by completion poller */
task.cryop_completed = task.cryop_submitted;
rc = accel_dpdk_cryptodev_process_task(g_crypto_ch, &task);
CU_ASSERT(rc == 0);
CU_ASSERT(task.cryop_submitted == num_blocks);
CU_ASSERT(task.cryop_total == task.cryop_submitted);
for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) {
CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov.iov_base + iov_offset +
(i * block_len));
CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
uint64_t *) == (uint64_t)&task);
CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr == dst_iov.iov_base + iov_offset +
(i * block_len));
CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->data_len == block_len);
CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->next == NULL);
rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_dst);
}
}
static void
@ -731,7 +894,7 @@ test_dev_full(void)
rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
CU_ASSERT(rc == 0);
CU_ASSERT(task.cryop_cnt_remaining == 2);
CU_ASSERT(task.cryop_submitted == 2);
sym_op = g_test_crypto_ops[0]->sym;
CU_ASSERT(sym_op->m_src->buf_addr == src_iov.iov_base);
CU_ASSERT(sym_op->m_src->data_len == 512);
@ -793,7 +956,7 @@ test_crazy_rw(void)
rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
CU_ASSERT(rc == 0);
CU_ASSERT(task.cryop_cnt_remaining == num_blocks);
CU_ASSERT(task.cryop_submitted == num_blocks);
for (i = 0; i < num_blocks; i++) {
CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
@ -810,7 +973,7 @@ test_crazy_rw(void)
/* Multi block size write, single element strange IOV makeup */
num_blocks = 8;
task.base.op_code = ACCEL_OPC_ENCRYPT;
task.cryop_cnt_remaining = 0;
task.cryop_submitted = 0;
task.base.s.iovcnt = 4;
task.base.d.iovcnt = 4;
task.base.s.iovs[0].iov_len = 2048;
@ -826,7 +989,7 @@ test_crazy_rw(void)
rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
CU_ASSERT(rc == 0);
CU_ASSERT(task.cryop_cnt_remaining == num_blocks);
CU_ASSERT(task.cryop_submitted == num_blocks);
for (i = 0; i < num_blocks; i++) {
CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
@ -1040,6 +1203,8 @@ test_poller(void)
struct accel_dpdk_cryptodev_task task = {};
struct iovec src_iov = {.iov_base = (void *)0xDEADBEEF, .iov_len = 1024 };
struct iovec dst_iov = src_iov;
struct rte_mbuf *src_mbufs[2];
struct accel_dpdk_cryptodev_queued_op *op_to_resubmit;
int rc;
task.base.op_code = ACCEL_OPC_DECRYPT;
@ -1050,9 +1215,7 @@ test_poller(void)
task.base.block_size = 512;
task.base.crypto_key = &g_key;
task.base.iv = 1;
struct rte_mbuf *src_mbufs[2];
struct accel_dpdk_cryptodev_queued_op *op_to_resubmit;
task.inplace = true;
/* test regular 1 op to dequeue and complete */
g_dequeue_mock = g_enqueue_mock = 1;
@ -1061,16 +1224,22 @@ test_poller(void)
*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
uint64_t *) = (uintptr_t)&task;
g_test_crypto_ops[0]->sym->m_dst = NULL;
task.cryop_cnt_remaining = 1;
task.cryop_submitted = 1;
task.cryop_total = 1;
task.cryop_completed = 0;
task.base.op_code = ACCEL_OPC_DECRYPT;
rc = accel_dpdk_cryptodev_poller(g_crypto_ch);
CU_ASSERT(rc == 1);
CU_ASSERT(task.cryop_completed == task.cryop_submitted);
/* We have nothing dequeued but have some to resubmit */
g_dequeue_mock = 0;
CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
/* add an op to the queued list. */
task.cryop_submitted = 1;
task.cryop_total = 1;
task.cryop_completed = 0;
g_resubmit_test = true;
op_to_resubmit = (struct accel_dpdk_cryptodev_queued_op *)((uint8_t *)g_test_crypto_ops[0] +
ACCEL_DPDK_CRYPTODEV_QUEUED_OP_OFFSET);
@ -1088,7 +1257,9 @@ test_poller(void)
/* 2 to dequeue but 2nd one failed */
g_dequeue_mock = g_enqueue_mock = 2;
task.cryop_cnt_remaining = 2;
task.cryop_submitted = 2;
task.cryop_total = 2;
task.cryop_completed = 0;
rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, 2);
g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
@ -1103,6 +1274,32 @@ test_poller(void)
rc = accel_dpdk_cryptodev_poller(g_crypto_ch);
CU_ASSERT(task.is_failed == true);
CU_ASSERT(rc == 1);
/* Dequeue a task which needs to be submitted again */
g_dequeue_mock = g_enqueue_mock = ut_rte_crypto_op_bulk_alloc = 1;
task.cryop_submitted = 1;
task.cryop_total = 2;
task.cryop_completed = 0;
rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, 1);
SPDK_CU_ASSERT_FATAL(src_mbufs[0] != NULL);
g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
uint64_t *) = (uintptr_t)&task;
g_test_crypto_ops[0]->sym->m_dst = NULL;
rc = accel_dpdk_cryptodev_poller(g_crypto_ch);
CU_ASSERT(rc == 1);
CU_ASSERT(task.cryop_submitted == 2);
CU_ASSERT(task.cryop_total == 2);
CU_ASSERT(task.cryop_completed == 1);
CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == src_iov.iov_base + task.base.block_size);
CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == task.base.block_size);
CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == task.base.block_size);
CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
uint64_t *) == (uint64_t)&task);
CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL);
rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
}
/* Helper function for accel_dpdk_cryptodev_assign_device_qps() */
@ -1195,6 +1392,8 @@ test_assign_device_qp(void)
_check_expected_values(&io_ch, 1, ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD + 1);
TAILQ_SWAP(&devs_tmp, &g_crypto_devices, accel_dpdk_cryptodev_device, link);
free(qat_qps);
}
int

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (C) 2018 Intel Corporation
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
#
# Environment variables:
@ -206,6 +207,7 @@ run_test "unittest_include" $valgrind $testdir/include/spdk/histogram_data.h/his
run_test "unittest_bdev" unittest_bdev
if grep -q '#define SPDK_CONFIG_CRYPTO 1' $rootdir/include/spdk/config.h; then
run_test "unittest_bdev_crypto" $valgrind $testdir/lib/bdev/crypto.c/crypto_ut
run_test "unittest_bdev_crypto" $valgrind $testdir/lib/accel/dpdk_cryptodev.c/accel_dpdk_cryptodev_ut
fi
if grep -q '#define SPDK_CONFIG_VBDEV_COMPRESS 1' $rootdir/include/spdk/config.h; then