accel/dpdk_cryptodev: Remove queued_cry_ops

If we were not able to submit all configured
crypto ops, then we can just release crypto_ops
and mbuf object of these crypto ops and save
the actual number of submitted operation in
the accel task. Once all submitted operations
complete, poller will call
accel_dpdk_cryptodev_process_task func to submit
cyrpto operations for reamining data blocks.
If no crypto ops were submitted then the task
will be palced in the channel's queued_tasks
and poller will try to resubmit the task.
That in theory should increase performance
since we attempted to resubmit queued ops
with burst size==1 which is not efficient

Fixes issue #2907

Signed-off-by: Alexey Marchuk <alexeymar@nvidia.com>
Change-Id: I4d17e8ed1ad5383848e4d09c46009c6cb2834360
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/16784
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Community-CI: Mellanox Build Bot
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Alexey Marchuk 2023-02-12 16:06:26 +01:00 committed by Jim Harris
parent 8f4d98bb40
commit 9c636a02f9
2 changed files with 56 additions and 168 deletions

View File

@ -66,7 +66,6 @@
(ACCEL_DPDK_CRYPTODEV_DEFAULT_NUM_XFORMS * \ (ACCEL_DPDK_CRYPTODEV_DEFAULT_NUM_XFORMS * \
sizeof(struct rte_crypto_sym_xform))) sizeof(struct rte_crypto_sym_xform)))
#define ACCEL_DPDK_CRYPTODEV_IV_LENGTH 16 #define ACCEL_DPDK_CRYPTODEV_IV_LENGTH 16
#define ACCEL_DPDK_CRYPTODEV_QUEUED_OP_OFFSET (ACCEL_DPDK_CRYPTODEV_IV_OFFSET + ACCEL_DPDK_CRYPTODEV_IV_LENGTH)
/* Driver names */ /* Driver names */
#define ACCEL_DPDK_CRYPTODEV_AESNI_MB "crypto_aesni_mb" #define ACCEL_DPDK_CRYPTODEV_AESNI_MB "crypto_aesni_mb"
@ -145,15 +144,6 @@ struct accel_dpdk_cryptodev_key_priv {
TAILQ_HEAD(, accel_dpdk_cryptodev_key_handle) dev_keys; TAILQ_HEAD(, accel_dpdk_cryptodev_key_handle) dev_keys;
}; };
/* For queueing up crypto operations that we can't submit for some reason */
struct accel_dpdk_cryptodev_queued_op {
struct accel_dpdk_cryptodev_qp *qp;
struct rte_crypto_op *crypto_op;
struct accel_dpdk_cryptodev_task *task;
TAILQ_ENTRY(accel_dpdk_cryptodev_queued_op) link;
};
#define ACCEL_DPDK_CRYPTODEV_QUEUED_OP_LENGTH (sizeof(struct accel_dpdk_cryptodev_queued_op))
/* The crypto channel struct. It is allocated and freed on my behalf by the io channel code. /* The crypto channel struct. It is allocated and freed on my behalf by the io channel code.
* We store things in here that are needed on per thread basis like the base_channel for this thread, * We store things in here that are needed on per thread basis like the base_channel for this thread,
* and the poller for this thread. * and the poller for this thread.
@ -163,8 +153,6 @@ struct accel_dpdk_cryptodev_io_channel {
struct spdk_poller *poller; struct spdk_poller *poller;
/* Array of qpairs for each available device. The specific device will be selected depending on the crypto key */ /* Array of qpairs for each available device. The specific device will be selected depending on the crypto key */
struct accel_dpdk_cryptodev_qp *device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_LAST]; struct accel_dpdk_cryptodev_qp *device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_LAST];
/* queued for re-submission to CryptoDev. Used when for some reason crypto op was not processed by the driver */
TAILQ_HEAD(, accel_dpdk_cryptodev_queued_op) queued_cry_ops;
/* Used to queue tasks when qpair is full. No crypto operation was submitted to the driver by the task */ /* Used to queue tasks when qpair is full. No crypto operation was submitted to the driver by the task */
TAILQ_HEAD(, accel_dpdk_cryptodev_task) queued_tasks; TAILQ_HEAD(, accel_dpdk_cryptodev_task) queued_tasks;
}; };
@ -245,43 +233,6 @@ accel_dpdk_cryptodev_get_driver(void)
return g_driver_names[g_dpdk_cryptodev_driver]; return g_driver_names[g_dpdk_cryptodev_driver];
} }
static void
cancel_queued_crypto_ops(struct accel_dpdk_cryptodev_io_channel *crypto_ch,
struct accel_dpdk_cryptodev_task *task)
{
struct rte_mbuf *mbufs_to_free[2 * ACCEL_DPDK_CRYPTODEV_MAX_DEQUEUE_BURST_SIZE];
struct rte_crypto_op *cancelled_ops[ACCEL_DPDK_CRYPTODEV_MAX_DEQUEUE_BURST_SIZE];
struct accel_dpdk_cryptodev_queued_op *op_to_cancel, *tmp_op;
struct rte_crypto_op *crypto_op;
int num_mbufs = 0, num_dequeued_ops = 0;
/* Remove all ops from the failed IO. Since we don't know the
* order we have to check them all. */
TAILQ_FOREACH_SAFE(op_to_cancel, &crypto_ch->queued_cry_ops, link, tmp_op) {
/* Checking if this is our op. One IO contains multiple ops. */
if (task == op_to_cancel->task) {
crypto_op = op_to_cancel->crypto_op;
TAILQ_REMOVE(&crypto_ch->queued_cry_ops, op_to_cancel, link);
/* Populating lists for freeing mbufs and ops. */
mbufs_to_free[num_mbufs++] = (void *)crypto_op->sym->m_src;
if (crypto_op->sym->m_dst) {
mbufs_to_free[num_mbufs++] = (void *)crypto_op->sym->m_dst;
}
cancelled_ops[num_dequeued_ops++] = crypto_op;
}
}
/* Now bulk free both mbufs and crypto operations. */
if (num_dequeued_ops > 0) {
rte_mempool_put_bulk(g_crypto_op_mp, (void **)cancelled_ops,
num_dequeued_ops);
assert(num_mbufs > 0);
/* This also releases chained mbufs if any. */
rte_pktmbuf_free_bulk(mbufs_to_free, num_mbufs);
}
}
static inline uint16_t static inline uint16_t
accel_dpdk_cryptodev_poll_qp(struct accel_dpdk_cryptodev_qp *qp, accel_dpdk_cryptodev_poll_qp(struct accel_dpdk_cryptodev_qp *qp,
struct accel_dpdk_cryptodev_io_channel *crypto_ch) struct accel_dpdk_cryptodev_io_channel *crypto_ch)
@ -369,10 +320,8 @@ accel_dpdk_cryptodev_poller(void *args)
struct accel_dpdk_cryptodev_io_channel *crypto_ch = args; struct accel_dpdk_cryptodev_io_channel *crypto_ch = args;
struct accel_dpdk_cryptodev_qp *qp; struct accel_dpdk_cryptodev_qp *qp;
struct accel_dpdk_cryptodev_task *task, *task_tmp; struct accel_dpdk_cryptodev_task *task, *task_tmp;
struct accel_dpdk_cryptodev_queued_op *op_to_resubmit, *op_to_resubmit_tmp;
TAILQ_HEAD(, accel_dpdk_cryptodev_task) queued_tasks_tmp; TAILQ_HEAD(, accel_dpdk_cryptodev_task) queued_tasks_tmp;
uint32_t num_dequeued_ops = 0, num_enqueued_ops = 0; uint32_t num_dequeued_ops = 0, num_enqueued_ops = 0;
uint16_t enqueued;
int i, rc; int i, rc;
for (i = 0; i < ACCEL_DPDK_CRYPTODEV_DRIVER_LAST; i++) { for (i = 0; i < ACCEL_DPDK_CRYPTODEV_DRIVER_LAST; i++) {
@ -383,42 +332,6 @@ accel_dpdk_cryptodev_poller(void *args)
} }
} }
/* Check if there are any queued crypto ops to process */
TAILQ_FOREACH_SAFE(op_to_resubmit, &crypto_ch->queued_cry_ops, link, op_to_resubmit_tmp) {
task = op_to_resubmit->task;
qp = op_to_resubmit->qp;
if (qp->num_enqueued_ops == qp->device->qp_desc_nr) {
continue;
}
enqueued = rte_cryptodev_enqueue_burst(qp->device->cdev_id,
qp->qp,
&op_to_resubmit->crypto_op,
1);
if (enqueued == 1) {
TAILQ_REMOVE(&crypto_ch->queued_cry_ops, op_to_resubmit, link);
qp->num_enqueued_ops++;
num_enqueued_ops++;
} else {
if (op_to_resubmit->crypto_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
/* If we couldn't get one, just break and try again later. */
break;
} else {
/* Something is really wrong with the op. Most probably the
* mbuf is broken or the HW is not able to process the request.
* Fail the IO and remove its ops from the queued ops list. */
task->is_failed = true;
cancel_queued_crypto_ops(crypto_ch, task);
task->cryop_completed++;
/* Fail the IO if there is nothing left on device. */
if (task->cryop_completed == task->cryop_submitted) {
spdk_accel_task_complete(&task->base, -EFAULT);
}
}
}
}
if (!TAILQ_EMPTY(&crypto_ch->queued_tasks)) { if (!TAILQ_EMPTY(&crypto_ch->queued_tasks)) {
TAILQ_INIT(&queued_tasks_tmp); TAILQ_INIT(&queued_tasks_tmp);
@ -639,7 +552,6 @@ accel_dpdk_cryptodev_process_task(struct accel_dpdk_cryptodev_io_channel *crypto
uint32_t sgl_offset; uint32_t sgl_offset;
uint32_t qp_capacity; uint32_t qp_capacity;
uint64_t iv_start; uint64_t iv_start;
struct accel_dpdk_cryptodev_queued_op *op_to_queue;
uint32_t i, crypto_index; uint32_t i, crypto_index;
struct rte_crypto_op *crypto_ops[ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE]; struct rte_crypto_op *crypto_ops[ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE];
struct rte_mbuf *src_mbufs[ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE]; struct rte_mbuf *src_mbufs[ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE];
@ -729,8 +641,6 @@ accel_dpdk_cryptodev_process_task(struct accel_dpdk_cryptodev_io_channel *crypto
if (rc) { if (rc) {
return rc; return rc;
} }
/* This value is used in the completion callback to determine when the accel task is complete. */
task->cryop_submitted += cryop_cnt;
/* As we don't support chaining because of a decision to use LBA as IV, construction /* As we don't support chaining because of a decision to use LBA as IV, construction
* of crypto operations is straightforward. We build both the op, the mbuf and the * of crypto operations is straightforward. We build both the op, the mbuf and the
@ -749,7 +659,7 @@ accel_dpdk_cryptodev_process_task(struct accel_dpdk_cryptodev_io_channel *crypto
for (crypto_index = 0; crypto_index < cryop_cnt; crypto_index++) { for (crypto_index = 0; crypto_index < cryop_cnt; crypto_index++) {
rc = accel_dpdk_cryptodev_mbuf_add_single_block(&src, src_mbufs[crypto_index], task); rc = accel_dpdk_cryptodev_mbuf_add_single_block(&src, src_mbufs[crypto_index], task);
if (spdk_unlikely(rc)) { if (spdk_unlikely(rc)) {
goto err_free_ops; goto free_ops;
} }
accel_dpdk_cryptodev_op_set_iv(crypto_ops[crypto_index], iv_start); accel_dpdk_cryptodev_op_set_iv(crypto_ops[crypto_index], iv_start);
iv_start++; iv_start++;
@ -769,7 +679,7 @@ accel_dpdk_cryptodev_process_task(struct accel_dpdk_cryptodev_io_channel *crypto
/* scan-build thinks that dst_mbufs is not initialized */ /* scan-build thinks that dst_mbufs is not initialized */
rc = accel_dpdk_cryptodev_mbuf_add_single_block(&dst, dst_mbufs[crypto_index], task); rc = accel_dpdk_cryptodev_mbuf_add_single_block(&dst, dst_mbufs[crypto_index], task);
if (spdk_unlikely(rc)) { if (spdk_unlikely(rc)) {
goto err_free_ops; goto free_ops;
} }
crypto_ops[crypto_index]->sym->m_dst = dst_mbufs[crypto_index]; crypto_ops[crypto_index]->sym->m_dst = dst_mbufs[crypto_index];
#endif #endif
@ -780,7 +690,8 @@ accel_dpdk_cryptodev_process_task(struct accel_dpdk_cryptodev_io_channel *crypto
* configured the crypto device for. * configured the crypto device for.
*/ */
num_enqueued_ops = rte_cryptodev_enqueue_burst(dev->cdev_id, qp->qp, crypto_ops, cryop_cnt); num_enqueued_ops = rte_cryptodev_enqueue_burst(dev->cdev_id, qp->qp, crypto_ops, cryop_cnt);
/* This value is used in the completion callback to determine when the accel task is complete. */
task->cryop_submitted += num_enqueued_ops;
qp->num_enqueued_ops += num_enqueued_ops; qp->num_enqueued_ops += num_enqueued_ops;
/* We were unable to enqueue everything but did get some, so need to decide what /* We were unable to enqueue everything but did get some, so need to decide what
* to do based on the status of the last op. * to do based on the status of the last op.
@ -788,17 +699,22 @@ accel_dpdk_cryptodev_process_task(struct accel_dpdk_cryptodev_io_channel *crypto
if (num_enqueued_ops < cryop_cnt) { if (num_enqueued_ops < cryop_cnt) {
switch (crypto_ops[num_enqueued_ops]->status) { switch (crypto_ops[num_enqueued_ops]->status) {
case RTE_CRYPTO_OP_STATUS_NOT_PROCESSED: case RTE_CRYPTO_OP_STATUS_NOT_PROCESSED:
/* Queue them up on a linked list to be resubmitted via the poller. */ if (num_enqueued_ops == 0) {
for (crypto_index = num_enqueued_ops; crypto_index < cryop_cnt; crypto_index++) { /* Nothing was submitted. Free crypto ops and mbufs, treat this case as NOMEM */
op_to_queue = (struct accel_dpdk_cryptodev_queued_op *)rte_crypto_op_ctod_offset( rc = -ENOMEM;
crypto_ops[crypto_index], goto free_ops;
uint8_t *, ACCEL_DPDK_CRYPTODEV_QUEUED_OP_OFFSET);
op_to_queue->qp = qp;
op_to_queue->crypto_op = crypto_ops[crypto_index];
op_to_queue->task = task;
TAILQ_INSERT_TAIL(&crypto_ch->queued_cry_ops, op_to_queue, link);
} }
break; /* Part of the crypto operations were not submitted, release mbufs and crypto ops.
* The rest crypto ops will be submitted again once current batch is completed */
cryop_cnt -= num_enqueued_ops;
memmove(crypto_ops, &crypto_ops[num_enqueued_ops], sizeof(crypto_ops[0]) * cryop_cnt);
memmove(src_mbufs, &src_mbufs[num_enqueued_ops], sizeof(src_mbufs[0]) * cryop_cnt);
if (!task->inplace) {
memmove(dst_mbufs, &dst_mbufs[num_enqueued_ops], sizeof(dst_mbufs[0]) * cryop_cnt);
}
rc = 0;
goto free_ops;
default: default:
/* For all other statuses, mark task as failed so that the poller will pick /* For all other statuses, mark task as failed so that the poller will pick
* the failure up for the overall task status. * the failure up for the overall task status.
@ -809,7 +725,7 @@ accel_dpdk_cryptodev_process_task(struct accel_dpdk_cryptodev_io_channel *crypto
* busy, fail it now as the poller won't know anything about it. * busy, fail it now as the poller won't know anything about it.
*/ */
rc = -EINVAL; rc = -EINVAL;
goto err_free_ops; goto free_ops;
} }
break; break;
} }
@ -818,7 +734,7 @@ accel_dpdk_cryptodev_process_task(struct accel_dpdk_cryptodev_io_channel *crypto
return 0; return 0;
/* Error cleanup paths. */ /* Error cleanup paths. */
err_free_ops: free_ops:
if (!task->inplace) { if (!task->inplace) {
/* This also releases chained mbufs if any. */ /* This also releases chained mbufs if any. */
rte_pktmbuf_free_bulk(dst_mbufs, cryop_cnt); rte_pktmbuf_free_bulk(dst_mbufs, cryop_cnt);
@ -939,8 +855,6 @@ _accel_dpdk_cryptodev_create_cb(void *io_device, void *ctx_buf)
return -EINVAL; return -EINVAL;
} }
/* We use this to queue up crypto ops when the device is busy. */
TAILQ_INIT(&crypto_ch->queued_cry_ops);
/* We use this to queue tasks when qpair is full or no resources in pools */ /* We use this to queue tasks when qpair is full or no resources in pools */
TAILQ_INIT(&crypto_ch->queued_tasks); TAILQ_INIT(&crypto_ch->queued_tasks);
@ -1248,7 +1162,7 @@ accel_dpdk_cryptodev_init(void)
g_crypto_op_mp = rte_crypto_op_pool_create("dpdk_crypto_op_mp", g_crypto_op_mp = rte_crypto_op_pool_create("dpdk_crypto_op_mp",
RTE_CRYPTO_OP_TYPE_SYMMETRIC, ACCEL_DPDK_CRYPTODEV_NUM_MBUFS, ACCEL_DPDK_CRYPTODEV_POOL_CACHE_SIZE, RTE_CRYPTO_OP_TYPE_SYMMETRIC, ACCEL_DPDK_CRYPTODEV_NUM_MBUFS, ACCEL_DPDK_CRYPTODEV_POOL_CACHE_SIZE,
(ACCEL_DPDK_CRYPTODEV_DEFAULT_NUM_XFORMS * sizeof(struct rte_crypto_sym_xform)) + (ACCEL_DPDK_CRYPTODEV_DEFAULT_NUM_XFORMS * sizeof(struct rte_crypto_sym_xform)) +
ACCEL_DPDK_CRYPTODEV_IV_LENGTH + ACCEL_DPDK_CRYPTODEV_QUEUED_OP_LENGTH, rte_socket_id()); ACCEL_DPDK_CRYPTODEV_IV_LENGTH, rte_socket_id());
if (g_crypto_op_mp == NULL) { if (g_crypto_op_mp == NULL) {
SPDK_ERRLOG("Cannot create op pool\n"); SPDK_ERRLOG("Cannot create op pool\n");
rc = -ENOMEM; rc = -ENOMEM;

View File

@ -295,7 +295,6 @@ test_setup(void)
/* Prepare essential variables for test routines */ /* Prepare essential variables for test routines */
g_io_ch = calloc(1, sizeof(*g_io_ch) + sizeof(struct accel_dpdk_cryptodev_io_channel)); g_io_ch = calloc(1, sizeof(*g_io_ch) + sizeof(struct accel_dpdk_cryptodev_io_channel));
g_crypto_ch = (struct accel_dpdk_cryptodev_io_channel *)spdk_io_channel_get_ctx(g_io_ch); g_crypto_ch = (struct accel_dpdk_cryptodev_io_channel *)spdk_io_channel_get_ctx(g_io_ch);
TAILQ_INIT(&g_crypto_ch->queued_cry_ops);
TAILQ_INIT(&g_crypto_ch->queued_tasks); TAILQ_INIT(&g_crypto_ch->queued_tasks);
g_aesni_crypto_dev.type = ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB; g_aesni_crypto_dev.type = ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB;
@ -322,14 +321,12 @@ test_setup(void)
* same coverage just calloc them here. * same coverage just calloc them here.
*/ */
for (i = 0; i < MAX_TEST_BLOCKS; i++) { for (i = 0; i < MAX_TEST_BLOCKS; i++) {
size_t size = ACCEL_DPDK_CRYPTODEV_IV_OFFSET + ACCEL_DPDK_CRYPTODEV_IV_LENGTH + size_t size = ACCEL_DPDK_CRYPTODEV_IV_OFFSET + ACCEL_DPDK_CRYPTODEV_IV_LENGTH;
ACCEL_DPDK_CRYPTODEV_QUEUED_OP_LENGTH;
rc = posix_memalign((void **)&g_test_crypto_ops[i], 64, size); rc = posix_memalign((void **)&g_test_crypto_ops[i], 64, size);
if (rc != 0) { if (rc != 0) {
assert(false); assert(false);
} }
memset(g_test_crypto_ops[i], 0, memset(g_test_crypto_ops[i], 0, ACCEL_DPDK_CRYPTODEV_IV_OFFSET);
ACCEL_DPDK_CRYPTODEV_IV_OFFSET + ACCEL_DPDK_CRYPTODEV_QUEUED_OP_LENGTH);
} }
g_mbuf_offset = DPDK_DYNFIELD_OFFSET; g_mbuf_offset = DPDK_DYNFIELD_OFFSET;
@ -709,7 +706,8 @@ test_large_enc_dec(void)
/* Test 1. Multi block size decryption, multi-element, inplace */ /* Test 1. Multi block size decryption, multi-element, inplace */
g_aesni_qp.num_enqueued_ops = 0; g_aesni_qp.num_enqueued_ops = 0;
g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks; g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc =
ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE;
rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
CU_ASSERT(rc == 0); CU_ASSERT(rc == 0);
@ -738,7 +736,7 @@ test_large_enc_dec(void)
rc = accel_dpdk_cryptodev_process_task(g_crypto_ch, &task); rc = accel_dpdk_cryptodev_process_task(g_crypto_ch, &task);
CU_ASSERT(rc == 0); CU_ASSERT(rc == 0);
CU_ASSERT(task.cryop_submitted == num_blocks); CU_ASSERT(task.cryop_submitted == ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE * 2);
CU_ASSERT(task.cryop_total == task.cryop_submitted); CU_ASSERT(task.cryop_total == task.cryop_submitted);
for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) { for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) {
@ -757,9 +755,10 @@ test_large_enc_dec(void)
/* Test 2. Multi block size decryption, multi-element, out-of-place */ /* Test 2. Multi block size decryption, multi-element, out-of-place */
g_aesni_qp.num_enqueued_ops = 0; g_aesni_qp.num_enqueued_ops = 0;
g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
/* Modify dst to make payload out-of-place */ /* Modify dst to make payload out-of-place */
dst_iov[0].iov_base -= 1; dst_iov[0].iov_base -= 1;
g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc =
ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE;
rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
CU_ASSERT(rc == 0); CU_ASSERT(rc == 0);
@ -792,7 +791,7 @@ test_large_enc_dec(void)
rc = accel_dpdk_cryptodev_process_task(g_crypto_ch, &task); rc = accel_dpdk_cryptodev_process_task(g_crypto_ch, &task);
CU_ASSERT(rc == 0); CU_ASSERT(rc == 0);
CU_ASSERT(task.cryop_submitted == num_blocks); CU_ASSERT(task.cryop_submitted == ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE * 2);
CU_ASSERT(task.cryop_total == task.cryop_submitted); CU_ASSERT(task.cryop_total == task.cryop_submitted);
for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) { for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) {
@ -817,9 +816,10 @@ test_large_enc_dec(void)
g_aesni_qp.num_enqueued_ops = 0; g_aesni_qp.num_enqueued_ops = 0;
task.base.op_code = ACCEL_OPC_ENCRYPT; task.base.op_code = ACCEL_OPC_ENCRYPT;
task.cryop_submitted = 0; task.cryop_submitted = 0;
g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
/* Modify dst to make payload iplace */ /* Modify dst to make payload iplace */
dst_iov[0].iov_base += 1; dst_iov[0].iov_base += 1;
g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc =
ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE;
rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
CU_ASSERT(rc == 0); CU_ASSERT(rc == 0);
@ -848,7 +848,7 @@ test_large_enc_dec(void)
rc = accel_dpdk_cryptodev_process_task(g_crypto_ch, &task); rc = accel_dpdk_cryptodev_process_task(g_crypto_ch, &task);
CU_ASSERT(rc == 0); CU_ASSERT(rc == 0);
CU_ASSERT(task.cryop_submitted == num_blocks); CU_ASSERT(task.cryop_submitted == ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE * 2);
CU_ASSERT(task.cryop_total == task.cryop_submitted); CU_ASSERT(task.cryop_total == task.cryop_submitted);
for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) { for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) {
@ -868,9 +868,10 @@ test_large_enc_dec(void)
/* Multi block size encryption, multi-element, out-of-place */ /* Multi block size encryption, multi-element, out-of-place */
g_aesni_qp.num_enqueued_ops = 0; g_aesni_qp.num_enqueued_ops = 0;
task.cryop_submitted = 0; task.cryop_submitted = 0;
g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
/* Modify dst to make payload out-of-place */ /* Modify dst to make payload out-of-place */
dst_iov[0].iov_base -= 1; dst_iov[0].iov_base -= 1;
g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc =
ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE;
rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
CU_ASSERT(task.inplace == false); CU_ASSERT(task.inplace == false);
@ -902,7 +903,7 @@ test_large_enc_dec(void)
rc = accel_dpdk_cryptodev_process_task(g_crypto_ch, &task); rc = accel_dpdk_cryptodev_process_task(g_crypto_ch, &task);
CU_ASSERT(rc == 0); CU_ASSERT(rc == 0);
CU_ASSERT(task.cryop_submitted == num_blocks); CU_ASSERT(task.cryop_submitted == ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE * 2);
CU_ASSERT(task.cryop_total == task.cryop_submitted); CU_ASSERT(task.cryop_total == task.cryop_submitted);
for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) { for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) {
@ -928,7 +929,6 @@ static void
test_dev_full(void) test_dev_full(void)
{ {
struct accel_dpdk_cryptodev_task task = {}; struct accel_dpdk_cryptodev_task task = {};
struct accel_dpdk_cryptodev_queued_op *queued_op;
struct rte_crypto_sym_op *sym_op; struct rte_crypto_sym_op *sym_op;
struct iovec src_iov = {.iov_base = (void *)0xDEADBEEF, .iov_len = 1024 }; struct iovec src_iov = {.iov_base = (void *)0xDEADBEEF, .iov_len = 1024 };
struct iovec dst_iov = src_iov; struct iovec dst_iov = src_iov;
@ -943,17 +943,15 @@ test_dev_full(void)
task.base.crypto_key = &g_key; task.base.crypto_key = &g_key;
task.base.iv = 1; task.base.iv = 1;
/* Two element block size decryption */ /* Two element block size decryption, 2nd op was not submitted */
g_aesni_qp.num_enqueued_ops = 0; g_aesni_qp.num_enqueued_ops = 0;
g_enqueue_mock = g_dequeue_mock = 1; g_enqueue_mock = g_dequeue_mock = 1;
ut_rte_crypto_op_bulk_alloc = 2; ut_rte_crypto_op_bulk_alloc = 2;
g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
CU_ASSERT(rc == 0); CU_ASSERT(rc == 0);
CU_ASSERT(task.cryop_submitted == 2); CU_ASSERT(task.cryop_submitted == 1);
sym_op = g_test_crypto_ops[0]->sym; sym_op = g_test_crypto_ops[0]->sym;
CU_ASSERT(sym_op->m_src->buf_addr == src_iov.iov_base); CU_ASSERT(sym_op->m_src->buf_addr == src_iov.iov_base);
CU_ASSERT(sym_op->m_src->data_len == 512); CU_ASSERT(sym_op->m_src->data_len == 512);
@ -962,24 +960,24 @@ test_dev_full(void)
CU_ASSERT(sym_op->cipher.data.offset == 0); CU_ASSERT(sym_op->cipher.data.offset == 0);
CU_ASSERT(*RTE_MBUF_DYNFIELD(sym_op->m_src, g_mbuf_offset, uint64_t *) == (uint64_t)&task); CU_ASSERT(*RTE_MBUF_DYNFIELD(sym_op->m_src, g_mbuf_offset, uint64_t *) == (uint64_t)&task);
CU_ASSERT(sym_op->m_dst == NULL); CU_ASSERT(sym_op->m_dst == NULL);
/* op which was not submitted is already released */
/* make sure one got queued and confirm its values */
CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == false);
queued_op = TAILQ_FIRST(&g_crypto_ch->queued_cry_ops);
sym_op = queued_op->crypto_op->sym;
TAILQ_REMOVE(&g_crypto_ch->queued_cry_ops, queued_op, link);
CU_ASSERT(queued_op->task == &task);
CU_ASSERT(queued_op->crypto_op == g_test_crypto_ops[1]);
CU_ASSERT(sym_op->m_src->buf_addr == (void *)0xDEADBEEF + 512);
CU_ASSERT(sym_op->m_src->data_len == 512);
CU_ASSERT(sym_op->m_src->next == NULL);
CU_ASSERT(sym_op->cipher.data.length == 512);
CU_ASSERT(sym_op->cipher.data.offset == 0);
CU_ASSERT(*RTE_MBUF_DYNFIELD(sym_op->m_src, g_mbuf_offset, uint64_t *) == (uint64_t)&task);
CU_ASSERT(sym_op->m_dst == NULL);
CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src); rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
rte_pktmbuf_free(g_test_crypto_ops[1]->sym->m_src); task.cryop_submitted = 0;
/* Two element block size decryption, no ops were submitted, task should be queued */
g_aesni_qp.num_enqueued_ops = 0;
g_enqueue_mock = g_dequeue_mock = 0;
ut_rte_crypto_op_bulk_alloc = 2;
g_test_crypto_ops[0]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_tasks) == true);
rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
CU_ASSERT(rc == 0);
CU_ASSERT(task.cryop_submitted == 0);
CU_ASSERT(!TAILQ_EMPTY(&g_crypto_ch->queued_tasks));
CU_ASSERT(TAILQ_FIRST(&g_crypto_ch->queued_tasks) == &task);
TAILQ_INIT(&g_crypto_ch->queued_tasks);
/* Non-busy reason for enqueue failure, all were rejected. */ /* Non-busy reason for enqueue failure, all were rejected. */
g_enqueue_mock = 0; g_enqueue_mock = 0;
@ -995,6 +993,7 @@ test_dev_full(void)
CU_ASSERT(rc == 0); CU_ASSERT(rc == 0);
CU_ASSERT(!TAILQ_EMPTY(&g_crypto_ch->queued_tasks)); CU_ASSERT(!TAILQ_EMPTY(&g_crypto_ch->queued_tasks));
CU_ASSERT(TAILQ_FIRST(&g_crypto_ch->queued_tasks) == &task); CU_ASSERT(TAILQ_FIRST(&g_crypto_ch->queued_tasks) == &task);
g_aesni_qp.num_enqueued_ops = 0;
TAILQ_INIT(&g_crypto_ch->queued_tasks); TAILQ_INIT(&g_crypto_ch->queued_tasks);
} }
@ -1278,7 +1277,6 @@ test_poller(void)
struct iovec src_iov = {.iov_base = (void *)0xDEADBEEF, .iov_len = 1024 }; struct iovec src_iov = {.iov_base = (void *)0xDEADBEEF, .iov_len = 1024 };
struct iovec dst_iov = src_iov; struct iovec dst_iov = src_iov;
struct rte_mbuf *src_mbufs[2]; struct rte_mbuf *src_mbufs[2];
struct accel_dpdk_cryptodev_queued_op *op_to_resubmit;
int rc; int rc;
task.base.op_code = ACCEL_OPC_DECRYPT; task.base.op_code = ACCEL_OPC_DECRYPT;
@ -1308,30 +1306,6 @@ test_poller(void)
CU_ASSERT(task.cryop_completed == task.cryop_submitted); CU_ASSERT(task.cryop_completed == task.cryop_submitted);
CU_ASSERT(g_aesni_qp.num_enqueued_ops == 0); CU_ASSERT(g_aesni_qp.num_enqueued_ops == 0);
/* We have nothing dequeued but have some to resubmit */
g_dequeue_mock = 0;
CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
/* add an op to the queued list. */
task.cryop_submitted = 1;
task.cryop_total = 1;
task.cryop_completed = 0;
g_resubmit_test = true;
op_to_resubmit = (struct accel_dpdk_cryptodev_queued_op *)((uint8_t *)g_test_crypto_ops[0] +
ACCEL_DPDK_CRYPTODEV_QUEUED_OP_OFFSET);
op_to_resubmit->crypto_op = (void *)0xDEADBEEF;
op_to_resubmit->task = &task;
op_to_resubmit->qp = &g_aesni_qp;
TAILQ_INSERT_TAIL(&g_crypto_ch->queued_cry_ops,
op_to_resubmit,
link);
CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == false);
rc = accel_dpdk_cryptodev_poller(g_crypto_ch);
g_resubmit_test = false;
CU_ASSERT(rc == 1);
CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
CU_ASSERT(g_aesni_qp.num_enqueued_ops == 1);
/* 2 to dequeue but 2nd one failed */ /* 2 to dequeue but 2nd one failed */
g_dequeue_mock = g_enqueue_mock = 2; g_dequeue_mock = g_enqueue_mock = 2;
g_aesni_qp.num_enqueued_ops = 2; g_aesni_qp.num_enqueued_ops = 2;