2022-06-03 19:15:11 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
2022-11-01 20:26:26 +00:00
|
|
|
* Copyright (C) 2021 Intel Corporation.
|
2021-02-08 22:56:19 +00:00
|
|
|
* All rights reserved.
|
2022-01-26 01:29:39 +00:00
|
|
|
* Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
2021-02-08 22:56:19 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include "spdk_cunit.h"
|
|
|
|
#include "spdk_internal/mock.h"
|
2022-08-08 21:43:24 +00:00
|
|
|
#include "spdk_internal/accel_module.h"
|
2021-06-05 12:50:15 +00:00
|
|
|
#include "thread/thread_internal.h"
|
accel: initial operation chaining support
This patch introduces the concept of chaining multiple accel operations
and executing them all at once in a single step. This means that it
will be possible to schedule accel operations at different layers of the
stack (e.g. copy in NVMe-oF transport, crypto in bdev_crypto), but
execute them all in a single place. Thanks to this, we can take
advantage of hardware accelerators that supports executing multiple
operations as a single operation (e.g. copy + crypto).
This operation group is called spdk_accel_sequence and operations can be
appended to that object via one of the spdk_accel_append_* functions.
New operations are always added at the end of a sequence. Users can
specify a callback to be notified when a particular operation in a
sequence is completed, but they don't receive the status of whether it
was successful or not. This is by design, as they shouldn't care about
the status of an individual operation and should rely on other means to
receive the status of the whole sequence. It's also important to note
that any intermediate steps within a sequence may not produce observable
results. For instance, appending a copy from A to B and then a copy
from B to C, it's indeterminate whether A's data will be in B after a
sequence is executed. It is only guaranteed that A's data will be in C.
A sequence can also be reversed using spdk_accel_sequence_reverse(),
meaning that the first operation becomes last and vice versa. It's
especially useful in read paths, as it makes it possible to build the
sequence during submission, then, once the data is read from storage,
reverse the sequence and execute it.
Finally, there are two ways to terminate a sequence: aborting or
executing. It can be aborted via spdk_accel_sequence_abort() which will
execute individual operations' callbacks and free any allocated
resources. To execute it, one must use spdk_accel_sequence_finish().
For now, each operation is executed one by one and is submitted to the
appropriate accel module. Executing multiple operations as a single one
will be added in the future.
Also, currently, only fill and copy operations can be appended to a
sequence. Support for more operations will be added in subsequent
patches.
Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Change-Id: Id35d093e14feb59b996f780ef77e000e10bfcd20
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/15529
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
2022-11-16 07:22:55 +00:00
|
|
|
#include "common/lib/ut_multithread.c"
|
2022-08-08 20:51:25 +00:00
|
|
|
#include "accel/accel.c"
|
2022-08-05 19:54:16 +00:00
|
|
|
#include "accel/accel_sw.c"
|
2022-01-26 01:29:39 +00:00
|
|
|
#include "unit/lib/json_mock.c"
|
2021-02-08 22:56:19 +00:00
|
|
|
|
2021-09-08 21:29:35 +00:00
|
|
|
#ifdef SPDK_CONFIG_PMDK
|
|
|
|
DEFINE_STUB(pmem_msync, int, (const void *addr, size_t len), 0);
|
|
|
|
DEFINE_STUB(pmem_memcpy_persist, void *, (void *pmemdest, const void *src, size_t len), NULL);
|
|
|
|
DEFINE_STUB(pmem_is_pmem, int, (const void *addr, size_t len), 0);
|
|
|
|
DEFINE_STUB(pmem_memset_persist, void *, (void *pmemdest, int c, size_t len), NULL);
|
|
|
|
#endif
|
2022-10-21 14:09:30 +00:00
|
|
|
DEFINE_STUB(spdk_memory_domain_create, int,
|
|
|
|
(struct spdk_memory_domain **domain, enum spdk_dma_device_type type,
|
|
|
|
struct spdk_memory_domain_ctx *ctx, const char *id), 0);
|
|
|
|
DEFINE_STUB_V(spdk_memory_domain_destroy, (struct spdk_memory_domain *domain));
|
2021-09-08 21:29:35 +00:00
|
|
|
|
2021-03-23 16:43:31 +00:00
|
|
|
/* global vars and setup/cleanup functions used for all test functions */
|
2022-08-08 21:43:24 +00:00
|
|
|
struct spdk_accel_module_if g_module = {};
|
2021-03-23 16:43:31 +00:00
|
|
|
struct spdk_io_channel *g_ch = NULL;
|
|
|
|
struct accel_io_channel *g_accel_ch = NULL;
|
2021-02-19 03:54:35 +00:00
|
|
|
struct sw_accel_io_channel *g_sw_ch = NULL;
|
2022-08-08 21:43:24 +00:00
|
|
|
struct spdk_io_channel *g_module_ch = NULL;
|
2021-03-23 16:43:31 +00:00
|
|
|
|
2022-03-15 17:43:07 +00:00
|
|
|
static uint64_t g_opc_mask = 0;
|
|
|
|
|
|
|
|
static uint64_t
|
|
|
|
_accel_op_to_bit(enum accel_opcode opc)
|
|
|
|
{
|
|
|
|
return (1 << opc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
_supports_opcode(enum accel_opcode opc)
|
|
|
|
{
|
|
|
|
if (_accel_op_to_bit(opc) & g_opc_mask) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-03-23 16:43:31 +00:00
|
|
|
static int
|
|
|
|
test_setup(void)
|
|
|
|
{
|
2022-03-09 22:21:42 +00:00
|
|
|
int i;
|
|
|
|
|
2021-03-23 16:43:31 +00:00
|
|
|
g_ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct accel_io_channel));
|
|
|
|
if (g_ch == NULL) {
|
|
|
|
/* for some reason the assert fatal macro doesn't work in the setup function. */
|
|
|
|
CU_ASSERT(false);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
g_accel_ch = (struct accel_io_channel *)((char *)g_ch + sizeof(struct spdk_io_channel));
|
2022-08-08 21:43:24 +00:00
|
|
|
g_module_ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct sw_accel_io_channel));
|
|
|
|
if (g_module_ch == NULL) {
|
2021-02-19 03:54:35 +00:00
|
|
|
CU_ASSERT(false);
|
|
|
|
return -1;
|
|
|
|
}
|
2022-03-09 22:21:42 +00:00
|
|
|
|
2022-08-08 21:43:24 +00:00
|
|
|
g_module.submit_tasks = sw_accel_submit_tasks;
|
|
|
|
g_module.name = "software";
|
2022-03-09 22:21:42 +00:00
|
|
|
for (i = 0; i < ACCEL_OPC_LAST; i++) {
|
2022-08-08 21:43:24 +00:00
|
|
|
g_accel_ch->module_ch[i] = g_module_ch;
|
|
|
|
g_modules_opc[i] = &g_module;
|
2022-03-09 22:21:42 +00:00
|
|
|
}
|
2022-08-08 21:43:24 +00:00
|
|
|
g_sw_ch = (struct sw_accel_io_channel *)((char *)g_module_ch + sizeof(
|
2021-02-19 03:54:35 +00:00
|
|
|
struct spdk_io_channel));
|
|
|
|
TAILQ_INIT(&g_sw_ch->tasks_to_complete);
|
2022-08-08 21:43:24 +00:00
|
|
|
g_module.supports_opcode = _supports_opcode;
|
2021-03-23 16:43:31 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
test_cleanup(void)
|
|
|
|
{
|
|
|
|
free(g_ch);
|
2022-08-08 21:43:24 +00:00
|
|
|
free(g_module_ch);
|
2021-03-23 16:43:31 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-02-09 22:37:09 +00:00
|
|
|
#define DUMMY_ARG 0xDEADBEEF
|
|
|
|
static bool g_dummy_cb_called = false;
|
|
|
|
static void
|
|
|
|
dummy_cb_fn(void *cb_arg, int status)
|
|
|
|
{
|
|
|
|
CU_ASSERT(*(uint32_t *)cb_arg == DUMMY_ARG);
|
|
|
|
CU_ASSERT(status == 0);
|
|
|
|
g_dummy_cb_called = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
test_spdk_accel_task_complete(void)
|
|
|
|
{
|
|
|
|
struct spdk_accel_task accel_task = {};
|
|
|
|
struct spdk_accel_task *expected_accel_task = NULL;
|
|
|
|
uint32_t cb_arg = DUMMY_ARG;
|
|
|
|
int status = 0;
|
|
|
|
|
2021-03-23 16:43:31 +00:00
|
|
|
accel_task.accel_ch = g_accel_ch;
|
2021-02-09 22:37:09 +00:00
|
|
|
accel_task.cb_fn = dummy_cb_fn;
|
|
|
|
accel_task.cb_arg = &cb_arg;
|
2021-03-23 16:43:31 +00:00
|
|
|
TAILQ_INIT(&g_accel_ch->task_pool);
|
2021-02-09 22:37:09 +00:00
|
|
|
|
2021-10-12 22:32:49 +00:00
|
|
|
/* Confirm cb is called and task added to list. */
|
2021-02-09 22:37:09 +00:00
|
|
|
spdk_accel_task_complete(&accel_task, status);
|
|
|
|
CU_ASSERT(g_dummy_cb_called == true);
|
2021-03-23 16:43:31 +00:00
|
|
|
expected_accel_task = TAILQ_FIRST(&g_accel_ch->task_pool);
|
|
|
|
TAILQ_REMOVE(&g_accel_ch->task_pool, expected_accel_task, link);
|
2021-02-09 22:37:09 +00:00
|
|
|
CU_ASSERT(expected_accel_task == &accel_task);
|
|
|
|
}
|
|
|
|
|
2021-02-10 20:39:08 +00:00
|
|
|
static void
|
|
|
|
test_get_task(void)
|
|
|
|
{
|
|
|
|
struct spdk_accel_task *task;
|
|
|
|
struct spdk_accel_task _task;
|
|
|
|
void *cb_arg = NULL;
|
|
|
|
|
2021-03-23 16:43:31 +00:00
|
|
|
TAILQ_INIT(&g_accel_ch->task_pool);
|
2021-02-10 20:39:08 +00:00
|
|
|
|
|
|
|
/* no tasks left, return NULL. */
|
2021-10-12 22:32:49 +00:00
|
|
|
task = _get_task(g_accel_ch, dummy_cb_fn, cb_arg);
|
2021-02-10 20:39:08 +00:00
|
|
|
CU_ASSERT(task == NULL);
|
|
|
|
|
|
|
|
_task.cb_fn = dummy_cb_fn;
|
|
|
|
_task.cb_arg = cb_arg;
|
2021-03-23 16:43:31 +00:00
|
|
|
_task.accel_ch = g_accel_ch;
|
|
|
|
TAILQ_INSERT_TAIL(&g_accel_ch->task_pool, &_task, link);
|
2021-02-10 20:39:08 +00:00
|
|
|
|
|
|
|
/* Get a valid task. */
|
2021-10-12 22:32:49 +00:00
|
|
|
task = _get_task(g_accel_ch, dummy_cb_fn, cb_arg);
|
2021-02-10 20:39:08 +00:00
|
|
|
CU_ASSERT(task == &_task);
|
|
|
|
CU_ASSERT(_task.cb_fn == dummy_cb_fn);
|
|
|
|
CU_ASSERT(_task.cb_arg == cb_arg);
|
2021-03-23 16:43:31 +00:00
|
|
|
CU_ASSERT(_task.accel_ch == g_accel_ch);
|
2021-02-10 20:39:08 +00:00
|
|
|
}
|
|
|
|
|
2021-02-19 03:54:35 +00:00
|
|
|
#define TEST_SUBMIT_SIZE 64
|
|
|
|
static void
|
|
|
|
test_spdk_accel_submit_copy(void)
|
|
|
|
{
|
|
|
|
const uint64_t nbytes = TEST_SUBMIT_SIZE;
|
2021-11-16 10:00:02 +00:00
|
|
|
uint8_t dst[TEST_SUBMIT_SIZE] = {0};
|
|
|
|
uint8_t src[TEST_SUBMIT_SIZE] = {0};
|
2021-02-19 03:54:35 +00:00
|
|
|
void *cb_arg = NULL;
|
|
|
|
int rc;
|
|
|
|
struct spdk_accel_task task;
|
|
|
|
struct spdk_accel_task *expected_accel_task = NULL;
|
2021-08-24 21:08:29 +00:00
|
|
|
int flags = 0;
|
2021-02-19 03:54:35 +00:00
|
|
|
|
|
|
|
TAILQ_INIT(&g_accel_ch->task_pool);
|
|
|
|
|
|
|
|
/* Fail with no tasks on _get_task() */
|
2022-03-09 22:21:42 +00:00
|
|
|
rc = spdk_accel_submit_copy(g_ch, src, dst, nbytes, flags, NULL, cb_arg);
|
2021-02-19 03:54:35 +00:00
|
|
|
CU_ASSERT(rc == -ENOMEM);
|
|
|
|
|
|
|
|
task.accel_ch = g_accel_ch;
|
2021-08-24 21:08:29 +00:00
|
|
|
task.flags = 1;
|
2021-02-19 03:54:35 +00:00
|
|
|
TAILQ_INSERT_TAIL(&g_accel_ch->task_pool, &task, link);
|
|
|
|
|
2022-03-09 22:21:42 +00:00
|
|
|
/* submission OK. */
|
|
|
|
rc = spdk_accel_submit_copy(g_ch, dst, src, nbytes, flags, NULL, cb_arg);
|
2021-02-19 03:54:35 +00:00
|
|
|
CU_ASSERT(rc == 0);
|
|
|
|
CU_ASSERT(task.dst == dst);
|
|
|
|
CU_ASSERT(task.src == src);
|
2022-03-15 17:43:07 +00:00
|
|
|
CU_ASSERT(task.op_code == ACCEL_OPC_COPY);
|
2021-02-19 03:54:35 +00:00
|
|
|
CU_ASSERT(task.nbytes == nbytes);
|
2021-08-24 21:08:29 +00:00
|
|
|
CU_ASSERT(task.flags == 0);
|
2021-02-19 03:54:35 +00:00
|
|
|
CU_ASSERT(memcmp(dst, src, TEST_SUBMIT_SIZE) == 0);
|
|
|
|
expected_accel_task = TAILQ_FIRST(&g_sw_ch->tasks_to_complete);
|
|
|
|
TAILQ_REMOVE(&g_sw_ch->tasks_to_complete, expected_accel_task, link);
|
|
|
|
CU_ASSERT(expected_accel_task == &task);
|
|
|
|
}
|
|
|
|
|
2021-08-25 06:10:12 +00:00
|
|
|
static void
|
|
|
|
test_spdk_accel_submit_dualcast(void)
|
|
|
|
{
|
|
|
|
void *dst1;
|
|
|
|
void *dst2;
|
|
|
|
void *src;
|
|
|
|
uint32_t align = ALIGN_4K;
|
|
|
|
uint64_t nbytes = TEST_SUBMIT_SIZE;
|
|
|
|
void *cb_arg = NULL;
|
|
|
|
int rc;
|
|
|
|
struct spdk_accel_task task;
|
|
|
|
struct spdk_accel_task *expected_accel_task = NULL;
|
2021-08-24 21:08:29 +00:00
|
|
|
int flags = 0;
|
2021-08-25 06:10:12 +00:00
|
|
|
|
2022-03-09 22:21:42 +00:00
|
|
|
TAILQ_INIT(&g_accel_ch->task_pool);
|
|
|
|
|
2021-08-25 06:10:12 +00:00
|
|
|
/* Dualcast requires 4K alignment on dst addresses,
|
|
|
|
* hence using the hard coded address to test the buffer alignment
|
|
|
|
*/
|
|
|
|
dst1 = (void *)0x5000;
|
|
|
|
dst2 = (void *)0x60f0;
|
|
|
|
src = calloc(1, TEST_SUBMIT_SIZE);
|
|
|
|
SPDK_CU_ASSERT_FATAL(src != NULL);
|
|
|
|
memset(src, 0x5A, TEST_SUBMIT_SIZE);
|
|
|
|
|
|
|
|
/* This should fail since dst2 is not 4k aligned */
|
2022-03-09 22:21:42 +00:00
|
|
|
rc = spdk_accel_submit_dualcast(g_ch, dst1, dst2, src, nbytes, flags, NULL, cb_arg);
|
2021-08-25 06:10:12 +00:00
|
|
|
CU_ASSERT(rc == -EINVAL);
|
|
|
|
|
|
|
|
dst1 = (void *)0x7010;
|
|
|
|
dst2 = (void *)0x6000;
|
|
|
|
/* This should fail since dst1 is not 4k aligned */
|
2022-03-09 22:21:42 +00:00
|
|
|
rc = spdk_accel_submit_dualcast(g_ch, dst1, dst2, src, nbytes, flags, NULL, cb_arg);
|
2021-08-25 06:10:12 +00:00
|
|
|
CU_ASSERT(rc == -EINVAL);
|
|
|
|
|
|
|
|
/* Dualcast requires 4K alignment on dst addresses */
|
|
|
|
dst1 = (void *)0x7000;
|
|
|
|
dst2 = (void *)0x6000;
|
|
|
|
/* Fail with no tasks on _get_task() */
|
2022-03-09 22:21:42 +00:00
|
|
|
rc = spdk_accel_submit_dualcast(g_ch, dst1, dst2, src, nbytes, flags, NULL, cb_arg);
|
2021-08-25 06:10:12 +00:00
|
|
|
CU_ASSERT(rc == -ENOMEM);
|
|
|
|
|
|
|
|
TAILQ_INSERT_TAIL(&g_accel_ch->task_pool, &task, link);
|
|
|
|
|
2022-03-09 22:21:42 +00:00
|
|
|
/* accel submission OK., since we test the SW path , need to use valid memory addresses
|
|
|
|
* cannot hardcode them anymore */
|
2021-08-25 06:10:12 +00:00
|
|
|
dst1 = spdk_dma_zmalloc(nbytes, align, NULL);
|
|
|
|
SPDK_CU_ASSERT_FATAL(dst1 != NULL);
|
|
|
|
dst2 = spdk_dma_zmalloc(nbytes, align, NULL);
|
|
|
|
SPDK_CU_ASSERT_FATAL(dst2 != NULL);
|
2022-08-08 21:43:24 +00:00
|
|
|
/* SW module does the dualcast. */
|
2022-03-09 22:21:42 +00:00
|
|
|
rc = spdk_accel_submit_dualcast(g_ch, dst1, dst2, src, nbytes, flags, NULL, cb_arg);
|
2021-08-25 06:10:12 +00:00
|
|
|
CU_ASSERT(rc == 0);
|
|
|
|
CU_ASSERT(task.dst == dst1);
|
|
|
|
CU_ASSERT(task.dst2 == dst2);
|
|
|
|
CU_ASSERT(task.src == src);
|
2022-03-15 17:43:07 +00:00
|
|
|
CU_ASSERT(task.op_code == ACCEL_OPC_DUALCAST);
|
2021-08-25 06:10:12 +00:00
|
|
|
CU_ASSERT(task.nbytes == nbytes);
|
2021-08-24 21:08:29 +00:00
|
|
|
CU_ASSERT(task.flags == 0);
|
2021-08-25 06:10:12 +00:00
|
|
|
CU_ASSERT(memcmp(dst1, src, TEST_SUBMIT_SIZE) == 0);
|
|
|
|
CU_ASSERT(memcmp(dst2, src, TEST_SUBMIT_SIZE) == 0);
|
|
|
|
expected_accel_task = TAILQ_FIRST(&g_sw_ch->tasks_to_complete);
|
|
|
|
TAILQ_REMOVE(&g_sw_ch->tasks_to_complete, expected_accel_task, link);
|
|
|
|
CU_ASSERT(expected_accel_task == &task);
|
|
|
|
|
|
|
|
free(src);
|
|
|
|
spdk_free(dst1);
|
|
|
|
spdk_free(dst2);
|
|
|
|
}
|
|
|
|
|
2021-08-25 15:26:46 +00:00
|
|
|
static void
|
|
|
|
test_spdk_accel_submit_compare(void)
|
|
|
|
{
|
|
|
|
void *src1;
|
|
|
|
void *src2;
|
|
|
|
uint64_t nbytes = TEST_SUBMIT_SIZE;
|
|
|
|
void *cb_arg = NULL;
|
|
|
|
int rc;
|
|
|
|
struct spdk_accel_task task;
|
|
|
|
struct spdk_accel_task *expected_accel_task = NULL;
|
|
|
|
|
2022-03-09 22:21:42 +00:00
|
|
|
TAILQ_INIT(&g_accel_ch->task_pool);
|
|
|
|
|
2021-08-25 15:26:46 +00:00
|
|
|
src1 = calloc(1, TEST_SUBMIT_SIZE);
|
|
|
|
SPDK_CU_ASSERT_FATAL(src1 != NULL);
|
|
|
|
src2 = calloc(1, TEST_SUBMIT_SIZE);
|
|
|
|
SPDK_CU_ASSERT_FATAL(src2 != NULL);
|
|
|
|
|
|
|
|
/* Fail with no tasks on _get_task() */
|
2022-03-09 22:21:42 +00:00
|
|
|
rc = spdk_accel_submit_compare(g_ch, src1, src2, nbytes, NULL, cb_arg);
|
2021-08-25 15:26:46 +00:00
|
|
|
CU_ASSERT(rc == -ENOMEM);
|
|
|
|
|
|
|
|
TAILQ_INSERT_TAIL(&g_accel_ch->task_pool, &task, link);
|
|
|
|
|
2022-03-09 22:21:42 +00:00
|
|
|
/* accel submission OK. */
|
|
|
|
rc = spdk_accel_submit_compare(g_ch, src1, src2, nbytes, NULL, cb_arg);
|
2021-08-25 15:26:46 +00:00
|
|
|
CU_ASSERT(rc == 0);
|
|
|
|
CU_ASSERT(task.src == src1);
|
|
|
|
CU_ASSERT(task.src2 == src2);
|
2022-03-15 17:43:07 +00:00
|
|
|
CU_ASSERT(task.op_code == ACCEL_OPC_COMPARE);
|
2021-08-25 15:26:46 +00:00
|
|
|
CU_ASSERT(task.nbytes == nbytes);
|
|
|
|
CU_ASSERT(memcmp(src1, src2, TEST_SUBMIT_SIZE) == 0);
|
|
|
|
expected_accel_task = TAILQ_FIRST(&g_sw_ch->tasks_to_complete);
|
|
|
|
TAILQ_REMOVE(&g_sw_ch->tasks_to_complete, expected_accel_task, link);
|
|
|
|
CU_ASSERT(expected_accel_task == &task);
|
|
|
|
|
|
|
|
free(src1);
|
|
|
|
free(src2);
|
|
|
|
}
|
|
|
|
|
2021-09-13 23:36:22 +00:00
|
|
|
static void
|
|
|
|
test_spdk_accel_submit_fill(void)
|
|
|
|
{
|
|
|
|
void *dst;
|
|
|
|
void *src;
|
|
|
|
uint8_t fill = 0xf;
|
2022-03-15 14:40:24 +00:00
|
|
|
uint64_t fill64;
|
2021-09-13 23:36:22 +00:00
|
|
|
uint64_t nbytes = TEST_SUBMIT_SIZE;
|
|
|
|
void *cb_arg = NULL;
|
|
|
|
int rc;
|
|
|
|
struct spdk_accel_task task;
|
|
|
|
struct spdk_accel_task *expected_accel_task = NULL;
|
2021-08-24 21:08:29 +00:00
|
|
|
int flags = 0;
|
2021-09-13 23:36:22 +00:00
|
|
|
|
2022-03-09 22:21:42 +00:00
|
|
|
TAILQ_INIT(&g_accel_ch->task_pool);
|
|
|
|
|
2021-09-13 23:36:22 +00:00
|
|
|
dst = calloc(1, TEST_SUBMIT_SIZE);
|
|
|
|
SPDK_CU_ASSERT_FATAL(dst != NULL);
|
|
|
|
src = calloc(1, TEST_SUBMIT_SIZE);
|
|
|
|
SPDK_CU_ASSERT_FATAL(src != NULL);
|
|
|
|
memset(src, fill, TEST_SUBMIT_SIZE);
|
2022-03-15 14:40:24 +00:00
|
|
|
memset(&fill64, fill, sizeof(uint64_t));
|
2021-09-13 23:36:22 +00:00
|
|
|
|
|
|
|
/* Fail with no tasks on _get_task() */
|
2022-03-09 22:21:42 +00:00
|
|
|
rc = spdk_accel_submit_fill(g_ch, dst, fill, nbytes, flags, NULL, cb_arg);
|
2021-09-13 23:36:22 +00:00
|
|
|
CU_ASSERT(rc == -ENOMEM);
|
|
|
|
|
|
|
|
TAILQ_INSERT_TAIL(&g_accel_ch->task_pool, &task, link);
|
|
|
|
|
2022-03-09 22:21:42 +00:00
|
|
|
/* accel submission OK. */
|
|
|
|
rc = spdk_accel_submit_fill(g_ch, dst, fill, nbytes, flags, NULL, cb_arg);
|
2021-09-13 23:36:22 +00:00
|
|
|
CU_ASSERT(rc == 0);
|
|
|
|
CU_ASSERT(task.dst == dst);
|
2022-03-15 14:40:24 +00:00
|
|
|
CU_ASSERT(task.fill_pattern == fill64);
|
2022-03-15 17:43:07 +00:00
|
|
|
CU_ASSERT(task.op_code == ACCEL_OPC_FILL);
|
2021-09-13 23:36:22 +00:00
|
|
|
CU_ASSERT(task.nbytes == nbytes);
|
2021-08-24 21:08:29 +00:00
|
|
|
CU_ASSERT(task.flags == 0);
|
2021-09-13 23:36:22 +00:00
|
|
|
|
|
|
|
CU_ASSERT(memcmp(dst, src, TEST_SUBMIT_SIZE) == 0);
|
|
|
|
expected_accel_task = TAILQ_FIRST(&g_sw_ch->tasks_to_complete);
|
|
|
|
TAILQ_REMOVE(&g_sw_ch->tasks_to_complete, expected_accel_task, link);
|
|
|
|
CU_ASSERT(expected_accel_task == &task);
|
|
|
|
|
|
|
|
free(dst);
|
|
|
|
free(src);
|
|
|
|
}
|
|
|
|
|
2021-09-13 23:57:15 +00:00
|
|
|
static void
|
|
|
|
test_spdk_accel_submit_crc32c(void)
|
|
|
|
{
|
|
|
|
const uint64_t nbytes = TEST_SUBMIT_SIZE;
|
|
|
|
uint32_t crc_dst;
|
|
|
|
uint8_t src[TEST_SUBMIT_SIZE];
|
|
|
|
uint32_t seed = 1;
|
|
|
|
void *cb_arg = NULL;
|
|
|
|
int rc;
|
|
|
|
struct spdk_accel_task task;
|
|
|
|
struct spdk_accel_task *expected_accel_task = NULL;
|
|
|
|
|
|
|
|
TAILQ_INIT(&g_accel_ch->task_pool);
|
2021-10-14 20:53:53 +00:00
|
|
|
|
|
|
|
/* Fail with no tasks on _get_task() */
|
2022-03-09 22:21:42 +00:00
|
|
|
rc = spdk_accel_submit_crc32c(g_ch, &crc_dst, src, seed, nbytes, NULL, cb_arg);
|
2021-10-14 20:53:53 +00:00
|
|
|
CU_ASSERT(rc == -ENOMEM);
|
|
|
|
|
|
|
|
TAILQ_INSERT_TAIL(&g_accel_ch->task_pool, &task, link);
|
|
|
|
|
2022-03-09 22:21:42 +00:00
|
|
|
/* accel submission OK. */
|
|
|
|
rc = spdk_accel_submit_crc32c(g_ch, &crc_dst, src, seed, nbytes, NULL, cb_arg);
|
2021-10-14 20:53:53 +00:00
|
|
|
CU_ASSERT(rc == 0);
|
|
|
|
CU_ASSERT(task.crc_dst == &crc_dst);
|
|
|
|
CU_ASSERT(task.src == src);
|
2022-07-21 21:57:11 +00:00
|
|
|
CU_ASSERT(task.s.iovcnt == 0);
|
2021-10-14 20:53:53 +00:00
|
|
|
CU_ASSERT(task.seed == seed);
|
2022-03-15 17:43:07 +00:00
|
|
|
CU_ASSERT(task.op_code == ACCEL_OPC_CRC32C);
|
2021-10-14 20:53:53 +00:00
|
|
|
CU_ASSERT(task.nbytes == nbytes);
|
|
|
|
expected_accel_task = TAILQ_FIRST(&g_sw_ch->tasks_to_complete);
|
|
|
|
TAILQ_REMOVE(&g_sw_ch->tasks_to_complete, expected_accel_task, link);
|
|
|
|
CU_ASSERT(expected_accel_task == &task);
|
|
|
|
}
|
|
|
|
|
2021-09-14 22:06:47 +00:00
|
|
|
static void
|
|
|
|
test_spdk_accel_submit_crc32cv(void)
|
|
|
|
{
|
|
|
|
uint32_t crc_dst;
|
|
|
|
uint32_t seed = 0;
|
|
|
|
uint32_t iov_cnt = 32;
|
|
|
|
void *cb_arg = NULL;
|
|
|
|
int rc;
|
|
|
|
uint32_t i = 0;
|
|
|
|
struct spdk_accel_task task;
|
|
|
|
struct iovec iov[32];
|
|
|
|
struct spdk_accel_task *expected_accel_task = NULL;
|
|
|
|
|
2022-03-09 22:21:42 +00:00
|
|
|
TAILQ_INIT(&g_accel_ch->task_pool);
|
|
|
|
|
2021-09-14 22:06:47 +00:00
|
|
|
for (i = 0; i < iov_cnt; i++) {
|
|
|
|
iov[i].iov_base = calloc(1, TEST_SUBMIT_SIZE);
|
|
|
|
SPDK_CU_ASSERT_FATAL(iov[i].iov_base != NULL);
|
|
|
|
iov[i].iov_len = TEST_SUBMIT_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
task.nbytes = TEST_SUBMIT_SIZE;
|
|
|
|
TAILQ_INSERT_TAIL(&g_accel_ch->task_pool, &task, link);
|
|
|
|
|
2022-03-09 22:21:42 +00:00
|
|
|
/* accel submission OK. */
|
|
|
|
rc = spdk_accel_submit_crc32cv(g_ch, &crc_dst, iov, iov_cnt, seed, NULL, cb_arg);
|
2021-09-14 22:06:47 +00:00
|
|
|
CU_ASSERT(rc == 0);
|
2022-07-21 21:57:11 +00:00
|
|
|
CU_ASSERT(task.s.iovs == iov);
|
|
|
|
CU_ASSERT(task.s.iovcnt == iov_cnt);
|
2021-09-14 22:06:47 +00:00
|
|
|
CU_ASSERT(task.crc_dst == &crc_dst);
|
|
|
|
CU_ASSERT(task.seed == seed);
|
2022-03-15 17:43:07 +00:00
|
|
|
CU_ASSERT(task.op_code == ACCEL_OPC_CRC32C);
|
2021-11-17 18:48:24 +00:00
|
|
|
CU_ASSERT(task.cb_arg == cb_arg);
|
2021-09-14 22:06:47 +00:00
|
|
|
CU_ASSERT(task.nbytes == iov[0].iov_len);
|
|
|
|
expected_accel_task = TAILQ_FIRST(&g_sw_ch->tasks_to_complete);
|
|
|
|
TAILQ_REMOVE(&g_sw_ch->tasks_to_complete, expected_accel_task, link);
|
|
|
|
CU_ASSERT(expected_accel_task == &task);
|
|
|
|
|
|
|
|
for (i = 0; i < iov_cnt; i++) {
|
|
|
|
free(iov[i].iov_base);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-14 22:24:37 +00:00
|
|
|
static void
|
|
|
|
test_spdk_accel_submit_copy_crc32c(void)
|
|
|
|
{
|
|
|
|
const uint64_t nbytes = TEST_SUBMIT_SIZE;
|
|
|
|
uint32_t crc_dst;
|
|
|
|
uint8_t dst[TEST_SUBMIT_SIZE];
|
|
|
|
uint8_t src[TEST_SUBMIT_SIZE];
|
|
|
|
uint32_t seed = 0;
|
|
|
|
void *cb_arg = NULL;
|
|
|
|
int rc;
|
|
|
|
struct spdk_accel_task task;
|
|
|
|
struct spdk_accel_task *expected_accel_task = NULL;
|
2021-08-24 21:08:29 +00:00
|
|
|
int flags = 0;
|
2021-09-14 22:24:37 +00:00
|
|
|
|
|
|
|
TAILQ_INIT(&g_accel_ch->task_pool);
|
|
|
|
|
2022-03-09 22:21:42 +00:00
|
|
|
/* Fail with no tasks on _get_task() */
|
2021-08-24 21:08:29 +00:00
|
|
|
rc = spdk_accel_submit_copy_crc32c(g_ch, dst, src, &crc_dst, seed, nbytes, flags,
|
2022-03-09 22:21:42 +00:00
|
|
|
NULL, cb_arg);
|
|
|
|
CU_ASSERT(rc == -ENOMEM);
|
2021-09-14 22:24:37 +00:00
|
|
|
|
|
|
|
TAILQ_INSERT_TAIL(&g_accel_ch->task_pool, &task, link);
|
|
|
|
|
2022-03-09 22:21:42 +00:00
|
|
|
/* accel submission OK. */
|
2021-08-24 21:08:29 +00:00
|
|
|
rc = spdk_accel_submit_copy_crc32c(g_ch, dst, src, &crc_dst, seed, nbytes, flags,
|
2022-03-09 22:21:42 +00:00
|
|
|
NULL, cb_arg);
|
2021-09-14 22:24:37 +00:00
|
|
|
CU_ASSERT(rc == 0);
|
|
|
|
CU_ASSERT(task.dst == dst);
|
|
|
|
CU_ASSERT(task.src == src);
|
|
|
|
CU_ASSERT(task.crc_dst == &crc_dst);
|
2022-07-21 21:57:11 +00:00
|
|
|
CU_ASSERT(task.s.iovcnt == 0);
|
2021-09-14 22:24:37 +00:00
|
|
|
CU_ASSERT(task.seed == seed);
|
|
|
|
CU_ASSERT(task.nbytes == nbytes);
|
2021-08-24 21:08:29 +00:00
|
|
|
CU_ASSERT(task.flags == 0);
|
2022-03-15 17:43:07 +00:00
|
|
|
CU_ASSERT(task.op_code == ACCEL_OPC_COPY_CRC32C);
|
2021-09-14 22:24:37 +00:00
|
|
|
expected_accel_task = TAILQ_FIRST(&g_sw_ch->tasks_to_complete);
|
|
|
|
TAILQ_REMOVE(&g_sw_ch->tasks_to_complete, expected_accel_task, link);
|
|
|
|
CU_ASSERT(expected_accel_task == &task);
|
|
|
|
}
|
|
|
|
|
2022-03-09 22:21:42 +00:00
|
|
|
static void
|
2022-08-05 19:25:33 +00:00
|
|
|
test_spdk_accel_module_find_by_name(void)
|
2022-03-09 22:21:42 +00:00
|
|
|
{
|
2022-08-05 19:25:33 +00:00
|
|
|
struct spdk_accel_module_if mod1 = {};
|
|
|
|
struct spdk_accel_module_if mod2 = {};
|
|
|
|
struct spdk_accel_module_if mod3 = {};
|
|
|
|
struct spdk_accel_module_if *accel_module = NULL;
|
2022-03-09 22:21:42 +00:00
|
|
|
|
2022-08-05 19:25:33 +00:00
|
|
|
mod1.name = "ioat";
|
|
|
|
mod2.name = "idxd";
|
|
|
|
mod3.name = "software";
|
2022-03-09 22:21:42 +00:00
|
|
|
|
2022-08-05 19:25:33 +00:00
|
|
|
TAILQ_INIT(&spdk_accel_module_list);
|
|
|
|
TAILQ_INSERT_TAIL(&spdk_accel_module_list, &mod1, tailq);
|
|
|
|
TAILQ_INSERT_TAIL(&spdk_accel_module_list, &mod2, tailq);
|
|
|
|
TAILQ_INSERT_TAIL(&spdk_accel_module_list, &mod3, tailq);
|
2022-03-09 22:21:42 +00:00
|
|
|
|
|
|
|
/* Now let's find a valid engine */
|
2022-08-05 19:25:33 +00:00
|
|
|
accel_module = _module_find_by_name("ioat");
|
|
|
|
CU_ASSERT(accel_module != NULL);
|
2022-03-09 22:21:42 +00:00
|
|
|
|
|
|
|
/* Try to find one that doesn't exist */
|
2022-08-05 19:25:33 +00:00
|
|
|
accel_module = _module_find_by_name("XXX");
|
|
|
|
CU_ASSERT(accel_module == NULL);
|
2022-03-09 22:21:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2022-08-05 19:25:33 +00:00
|
|
|
test_spdk_accel_module_register(void)
|
2022-03-09 22:21:42 +00:00
|
|
|
{
|
2022-08-05 19:25:33 +00:00
|
|
|
struct spdk_accel_module_if mod1 = {};
|
|
|
|
struct spdk_accel_module_if mod2 = {};
|
|
|
|
struct spdk_accel_module_if mod3 = {};
|
|
|
|
struct spdk_accel_module_if mod4 = {};
|
|
|
|
struct spdk_accel_module_if *accel_module = NULL;
|
2022-03-09 22:21:42 +00:00
|
|
|
int i = 0;
|
|
|
|
|
2022-08-05 19:25:33 +00:00
|
|
|
mod1.name = "ioat";
|
|
|
|
mod2.name = "idxd";
|
|
|
|
mod3.name = "software";
|
|
|
|
mod4.name = "nothing";
|
2022-03-09 22:21:42 +00:00
|
|
|
|
2022-08-05 19:25:33 +00:00
|
|
|
TAILQ_INIT(&spdk_accel_module_list);
|
|
|
|
|
|
|
|
spdk_accel_module_list_add(&mod1);
|
|
|
|
spdk_accel_module_list_add(&mod2);
|
|
|
|
spdk_accel_module_list_add(&mod3);
|
|
|
|
spdk_accel_module_list_add(&mod4);
|
2022-03-09 22:21:42 +00:00
|
|
|
|
|
|
|
/* Now confirm they're in the right order. */
|
2022-08-05 19:25:33 +00:00
|
|
|
TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
|
2022-03-09 22:21:42 +00:00
|
|
|
switch (i++) {
|
|
|
|
case 0:
|
2022-08-05 19:25:33 +00:00
|
|
|
CU_ASSERT(strcmp(accel_module->name, "software") == 0);
|
2022-03-09 22:21:42 +00:00
|
|
|
break;
|
|
|
|
case 1:
|
2022-08-05 19:25:33 +00:00
|
|
|
CU_ASSERT(strcmp(accel_module->name, "ioat") == 0);
|
2022-03-09 22:21:42 +00:00
|
|
|
break;
|
|
|
|
case 2:
|
2022-08-05 19:25:33 +00:00
|
|
|
CU_ASSERT(strcmp(accel_module->name, "idxd") == 0);
|
2022-03-09 22:21:42 +00:00
|
|
|
break;
|
|
|
|
case 3:
|
2022-08-05 19:25:33 +00:00
|
|
|
CU_ASSERT(strcmp(accel_module->name, "nothing") == 0);
|
2022-03-09 22:21:42 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
CU_ASSERT(false);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
CU_ASSERT(i == 4);
|
|
|
|
}
|
|
|
|
|
accel: initial operation chaining support
This patch introduces the concept of chaining multiple accel operations
and executing them all at once in a single step. This means that it
will be possible to schedule accel operations at different layers of the
stack (e.g. copy in NVMe-oF transport, crypto in bdev_crypto), but
execute them all in a single place. Thanks to this, we can take
advantage of hardware accelerators that supports executing multiple
operations as a single operation (e.g. copy + crypto).
This operation group is called spdk_accel_sequence and operations can be
appended to that object via one of the spdk_accel_append_* functions.
New operations are always added at the end of a sequence. Users can
specify a callback to be notified when a particular operation in a
sequence is completed, but they don't receive the status of whether it
was successful or not. This is by design, as they shouldn't care about
the status of an individual operation and should rely on other means to
receive the status of the whole sequence. It's also important to note
that any intermediate steps within a sequence may not produce observable
results. For instance, appending a copy from A to B and then a copy
from B to C, it's indeterminate whether A's data will be in B after a
sequence is executed. It is only guaranteed that A's data will be in C.
A sequence can also be reversed using spdk_accel_sequence_reverse(),
meaning that the first operation becomes last and vice versa. It's
especially useful in read paths, as it makes it possible to build the
sequence during submission, then, once the data is read from storage,
reverse the sequence and execute it.
Finally, there are two ways to terminate a sequence: aborting or
executing. It can be aborted via spdk_accel_sequence_abort() which will
execute individual operations' callbacks and free any allocated
resources. To execute it, one must use spdk_accel_sequence_finish().
For now, each operation is executed one by one and is submitted to the
appropriate accel module. Executing multiple operations as a single one
will be added in the future.
Also, currently, only fill and copy operations can be appended to a
sequence. Support for more operations will be added in subsequent
patches.
Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Change-Id: Id35d093e14feb59b996f780ef77e000e10bfcd20
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/15529
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
2022-11-16 07:22:55 +00:00
|
|
|
struct ut_sequence {
|
|
|
|
bool complete;
|
|
|
|
int status;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
ut_sequence_step_cb(void *cb_arg)
|
|
|
|
{
|
|
|
|
int *completed = cb_arg;
|
|
|
|
|
|
|
|
(*completed)++;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
ut_sequence_complete_cb(void *cb_arg, int status)
|
|
|
|
{
|
|
|
|
struct ut_sequence *seq = cb_arg;
|
|
|
|
|
|
|
|
seq->complete = true;
|
|
|
|
seq->status = status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
test_sequence_fill_copy(void)
|
|
|
|
{
|
|
|
|
struct spdk_accel_sequence *seq = NULL;
|
|
|
|
struct spdk_io_channel *ioch;
|
|
|
|
struct ut_sequence ut_seq;
|
|
|
|
char buf[4096], tmp[2][4096], expected[4096];
|
|
|
|
struct iovec src_iovs[2], dst_iovs[2];
|
|
|
|
int rc, completed;
|
|
|
|
|
|
|
|
ioch = spdk_accel_get_io_channel();
|
|
|
|
SPDK_CU_ASSERT_FATAL(ioch != NULL);
|
|
|
|
|
|
|
|
/* First check the simplest case - single task in a sequence */
|
|
|
|
memset(buf, 0, sizeof(buf));
|
|
|
|
memset(expected, 0xa5, sizeof(expected));
|
|
|
|
completed = 0;
|
|
|
|
rc = spdk_accel_append_fill(&seq, ioch, buf, sizeof(buf), NULL, NULL, 0xa5, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
CU_ASSERT_EQUAL(completed, 0);
|
|
|
|
|
|
|
|
ut_seq.complete = false;
|
|
|
|
rc = spdk_accel_sequence_finish(seq, ut_sequence_complete_cb, &ut_seq);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
poll_threads();
|
|
|
|
CU_ASSERT_EQUAL(completed, 1);
|
|
|
|
CU_ASSERT(ut_seq.complete);
|
|
|
|
CU_ASSERT_EQUAL(ut_seq.status, 0);
|
|
|
|
CU_ASSERT_EQUAL(memcmp(buf, expected, sizeof(buf)), 0);
|
|
|
|
|
|
|
|
/* Check a single copy operation */
|
|
|
|
memset(buf, 0, sizeof(buf));
|
|
|
|
memset(tmp[0], 0xa5, sizeof(tmp[0]));
|
|
|
|
memset(expected, 0xa5, sizeof(expected));
|
|
|
|
completed = 0;
|
|
|
|
seq = NULL;
|
|
|
|
|
|
|
|
dst_iovs[0].iov_base = buf;
|
|
|
|
dst_iovs[0].iov_len = sizeof(buf);
|
|
|
|
src_iovs[0].iov_base = tmp[0];
|
|
|
|
src_iovs[0].iov_len = sizeof(tmp[0]);
|
|
|
|
|
|
|
|
rc = spdk_accel_append_copy(&seq, ioch, &dst_iovs[0], 1, NULL, NULL,
|
|
|
|
&src_iovs[0], 1, NULL, NULL, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
ut_seq.complete = false;
|
|
|
|
rc = spdk_accel_sequence_finish(seq, ut_sequence_complete_cb, &ut_seq);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
poll_threads();
|
|
|
|
CU_ASSERT_EQUAL(completed, 1);
|
|
|
|
CU_ASSERT(ut_seq.complete);
|
|
|
|
CU_ASSERT_EQUAL(ut_seq.status, 0);
|
|
|
|
CU_ASSERT_EQUAL(memcmp(buf, expected, sizeof(buf)), 0);
|
|
|
|
|
|
|
|
/* Check multiple fill operations */
|
|
|
|
memset(buf, 0, sizeof(buf));
|
|
|
|
memset(expected, 0xfe, 4096);
|
|
|
|
memset(expected, 0xde, 2048);
|
|
|
|
memset(expected, 0xa5, 1024);
|
|
|
|
seq = NULL;
|
|
|
|
completed = 0;
|
|
|
|
rc = spdk_accel_append_fill(&seq, ioch, buf, 4096, NULL, NULL, 0xfe, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
rc = spdk_accel_append_fill(&seq, ioch, buf, 2048, NULL, NULL, 0xde, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
rc = spdk_accel_append_fill(&seq, ioch, buf, 1024, NULL, NULL, 0xa5, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
ut_seq.complete = false;
|
|
|
|
rc = spdk_accel_sequence_finish(seq, ut_sequence_complete_cb, &ut_seq);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
poll_threads();
|
|
|
|
CU_ASSERT_EQUAL(completed, 3);
|
|
|
|
CU_ASSERT(ut_seq.complete);
|
|
|
|
CU_ASSERT_EQUAL(ut_seq.status, 0);
|
|
|
|
CU_ASSERT_EQUAL(memcmp(buf, expected, sizeof(buf)), 0);
|
|
|
|
|
|
|
|
/* Check multiple copy operations */
|
|
|
|
memset(buf, 0, sizeof(buf));
|
|
|
|
memset(tmp[0], 0, sizeof(tmp[0]));
|
|
|
|
memset(tmp[1], 0, sizeof(tmp[1]));
|
|
|
|
memset(expected, 0xa5, sizeof(expected));
|
|
|
|
seq = NULL;
|
|
|
|
completed = 0;
|
|
|
|
|
|
|
|
rc = spdk_accel_append_fill(&seq, ioch, tmp[0], sizeof(tmp[0]), NULL, NULL, 0xa5, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
dst_iovs[0].iov_base = tmp[1];
|
|
|
|
dst_iovs[0].iov_len = sizeof(tmp[1]);
|
|
|
|
src_iovs[0].iov_base = tmp[0];
|
|
|
|
src_iovs[0].iov_len = sizeof(tmp[0]);
|
|
|
|
rc = spdk_accel_append_copy(&seq, ioch, &dst_iovs[0], 1, NULL, NULL,
|
|
|
|
&src_iovs[0], 1, NULL, NULL, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
dst_iovs[1].iov_base = buf;
|
|
|
|
dst_iovs[1].iov_len = sizeof(buf);
|
|
|
|
src_iovs[1].iov_base = tmp[1];
|
|
|
|
src_iovs[1].iov_len = sizeof(tmp[1]);
|
|
|
|
rc = spdk_accel_append_copy(&seq, ioch, &dst_iovs[1], 1, NULL, NULL,
|
|
|
|
&src_iovs[1], 1, NULL, NULL, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
ut_seq.complete = false;
|
|
|
|
rc = spdk_accel_sequence_finish(seq, ut_sequence_complete_cb, &ut_seq);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
poll_threads();
|
|
|
|
CU_ASSERT_EQUAL(completed, 3);
|
|
|
|
CU_ASSERT(ut_seq.complete);
|
|
|
|
CU_ASSERT_EQUAL(ut_seq.status, 0);
|
|
|
|
CU_ASSERT_EQUAL(memcmp(buf, expected, sizeof(buf)), 0);
|
|
|
|
|
|
|
|
/* Check that adding a copy operation at the end will change destination buffer */
|
|
|
|
memset(buf, 0, sizeof(buf));
|
|
|
|
memset(tmp[0], 0, sizeof(tmp[0]));
|
|
|
|
memset(expected, 0xa5, sizeof(buf));
|
|
|
|
seq = NULL;
|
|
|
|
completed = 0;
|
|
|
|
rc = spdk_accel_append_fill(&seq, ioch, tmp[0], sizeof(tmp[0]), NULL, NULL, 0xa5, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
dst_iovs[0].iov_base = buf;
|
|
|
|
dst_iovs[0].iov_len = sizeof(buf);
|
|
|
|
src_iovs[0].iov_base = tmp[0];
|
|
|
|
src_iovs[0].iov_len = sizeof(tmp[0]);
|
|
|
|
rc = spdk_accel_append_copy(&seq, ioch, &dst_iovs[0], 1, NULL, NULL,
|
|
|
|
&src_iovs[0], 1, NULL, NULL, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
ut_seq.complete = false;
|
|
|
|
rc = spdk_accel_sequence_finish(seq, ut_sequence_complete_cb, &ut_seq);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
poll_threads();
|
|
|
|
CU_ASSERT_EQUAL(completed, 2);
|
|
|
|
CU_ASSERT(ut_seq.complete);
|
|
|
|
CU_ASSERT_EQUAL(ut_seq.status, 0);
|
|
|
|
CU_ASSERT_EQUAL(memcmp(buf, expected, sizeof(buf)), 0);
|
|
|
|
|
|
|
|
/* Check that it's also possible to add copy operation at the beginning */
|
|
|
|
memset(buf, 0, sizeof(buf));
|
|
|
|
memset(tmp[0], 0xde, sizeof(tmp[0]));
|
|
|
|
memset(tmp[1], 0, sizeof(tmp[1]));
|
|
|
|
memset(expected, 0xa5, sizeof(expected));
|
|
|
|
seq = NULL;
|
|
|
|
completed = 0;
|
|
|
|
|
|
|
|
dst_iovs[0].iov_base = tmp[1];
|
|
|
|
dst_iovs[0].iov_len = sizeof(tmp[1]);
|
|
|
|
src_iovs[0].iov_base = tmp[0];
|
|
|
|
src_iovs[0].iov_len = sizeof(tmp[0]);
|
|
|
|
rc = spdk_accel_append_copy(&seq, ioch, &dst_iovs[0], 1, NULL, NULL,
|
|
|
|
&src_iovs[0], 1, NULL, NULL, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
rc = spdk_accel_append_fill(&seq, ioch, tmp[1], sizeof(tmp[1]), NULL, NULL, 0xa5, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
dst_iovs[1].iov_base = buf;
|
|
|
|
dst_iovs[1].iov_len = sizeof(buf);
|
|
|
|
src_iovs[1].iov_base = tmp[1];
|
|
|
|
src_iovs[1].iov_len = sizeof(tmp[1]);
|
|
|
|
rc = spdk_accel_append_copy(&seq, ioch, &dst_iovs[1], 1, NULL, NULL,
|
|
|
|
&src_iovs[1], 1, NULL, NULL, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
ut_seq.complete = false;
|
|
|
|
rc = spdk_accel_sequence_finish(seq, ut_sequence_complete_cb, &ut_seq);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
poll_threads();
|
|
|
|
CU_ASSERT_EQUAL(completed, 3);
|
|
|
|
CU_ASSERT(ut_seq.complete);
|
|
|
|
CU_ASSERT_EQUAL(ut_seq.status, 0);
|
|
|
|
CU_ASSERT_EQUAL(memcmp(buf, expected, sizeof(buf)), 0);
|
|
|
|
|
|
|
|
spdk_put_io_channel(ioch);
|
|
|
|
poll_threads();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
test_sequence_abort(void)
|
|
|
|
{
|
|
|
|
struct spdk_accel_sequence *seq = NULL;
|
|
|
|
struct spdk_io_channel *ioch;
|
|
|
|
char buf[4096], tmp[2][4096], expected[4096];
|
|
|
|
struct iovec src_iovs[2], dst_iovs[2];
|
|
|
|
int rc, completed;
|
|
|
|
|
|
|
|
ioch = spdk_accel_get_io_channel();
|
|
|
|
SPDK_CU_ASSERT_FATAL(ioch != NULL);
|
|
|
|
|
|
|
|
/* Check that aborting a sequence calls operation's callback, the operation is not executed
|
|
|
|
* and the sequence is freed
|
|
|
|
*/
|
|
|
|
memset(buf, 0, sizeof(buf));
|
|
|
|
memset(expected, 0, sizeof(buf));
|
|
|
|
completed = 0;
|
|
|
|
seq = NULL;
|
|
|
|
rc = spdk_accel_append_fill(&seq, ioch, buf, sizeof(buf), NULL, NULL, 0xa5, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
spdk_accel_sequence_abort(seq);
|
|
|
|
CU_ASSERT_EQUAL(completed, 1);
|
|
|
|
CU_ASSERT_EQUAL(memcmp(buf, expected, sizeof(buf)), 0);
|
|
|
|
|
|
|
|
/* Check sequence with multiple operations */
|
|
|
|
memset(buf, 0, sizeof(buf));
|
|
|
|
memset(expected, 0, sizeof(buf));
|
|
|
|
completed = 0;
|
|
|
|
seq = NULL;
|
|
|
|
|
|
|
|
dst_iovs[0].iov_base = tmp[1];
|
|
|
|
dst_iovs[0].iov_len = sizeof(tmp[1]);
|
|
|
|
src_iovs[0].iov_base = tmp[0];
|
|
|
|
src_iovs[0].iov_len = sizeof(tmp[0]);
|
|
|
|
rc = spdk_accel_append_copy(&seq, ioch, &dst_iovs[0], 1, NULL, NULL,
|
|
|
|
&src_iovs[0], 1, NULL, NULL, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
rc = spdk_accel_append_fill(&seq, ioch, tmp[1], 4096, NULL, NULL, 0xa5, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
rc = spdk_accel_append_fill(&seq, ioch, tmp[1], 2048, NULL, NULL, 0xde, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
dst_iovs[1].iov_base = buf;
|
|
|
|
dst_iovs[1].iov_len = sizeof(buf);
|
|
|
|
src_iovs[1].iov_base = tmp[1];
|
|
|
|
src_iovs[1].iov_len = sizeof(tmp[1]);
|
|
|
|
rc = spdk_accel_append_copy(&seq, ioch, &dst_iovs[1], 1, NULL, NULL,
|
|
|
|
&src_iovs[1], 1, NULL, NULL, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
spdk_accel_sequence_abort(seq);
|
|
|
|
CU_ASSERT_EQUAL(completed, 4);
|
|
|
|
CU_ASSERT_EQUAL(memcmp(buf, expected, sizeof(buf)), 0);
|
|
|
|
|
|
|
|
/* This should be a no-op */
|
|
|
|
spdk_accel_sequence_abort(NULL);
|
|
|
|
|
|
|
|
spdk_put_io_channel(ioch);
|
|
|
|
poll_threads();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
test_sequence_append_error(void)
|
|
|
|
{
|
|
|
|
struct spdk_accel_sequence *seq = NULL;
|
|
|
|
struct spdk_io_channel *ioch;
|
|
|
|
struct accel_io_channel *accel_ch;
|
|
|
|
struct iovec src_iovs, dst_iovs;
|
|
|
|
char buf[4096];
|
|
|
|
TAILQ_HEAD(, spdk_accel_task) tasks = TAILQ_HEAD_INITIALIZER(tasks);
|
|
|
|
TAILQ_HEAD(, spdk_accel_sequence) seqs = TAILQ_HEAD_INITIALIZER(seqs);
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
ioch = spdk_accel_get_io_channel();
|
|
|
|
SPDK_CU_ASSERT_FATAL(ioch != NULL);
|
|
|
|
accel_ch = spdk_io_channel_get_ctx(ioch);
|
|
|
|
|
|
|
|
/* Check that append fails and no sequence object is allocated when there are no more free
|
|
|
|
* tasks */
|
|
|
|
TAILQ_SWAP(&tasks, &accel_ch->task_pool, spdk_accel_task, link);
|
|
|
|
|
|
|
|
rc = spdk_accel_append_fill(&seq, ioch, buf, sizeof(buf), NULL, NULL, 0xa5, 0,
|
|
|
|
ut_sequence_step_cb, NULL);
|
|
|
|
CU_ASSERT_EQUAL(rc, -ENOMEM);
|
|
|
|
CU_ASSERT_PTR_NULL(seq);
|
|
|
|
|
|
|
|
dst_iovs.iov_base = buf;
|
|
|
|
dst_iovs.iov_len = 2048;
|
|
|
|
src_iovs.iov_base = &buf[2048];
|
|
|
|
src_iovs.iov_len = 2048;
|
|
|
|
rc = spdk_accel_append_copy(&seq, ioch, &dst_iovs, 1, NULL, NULL,
|
|
|
|
&src_iovs, 1, NULL, NULL, 0, ut_sequence_step_cb, NULL);
|
|
|
|
CU_ASSERT_EQUAL(rc, -ENOMEM);
|
|
|
|
CU_ASSERT_PTR_NULL(seq);
|
|
|
|
|
2022-11-22 13:36:09 +00:00
|
|
|
dst_iovs.iov_base = buf;
|
|
|
|
dst_iovs.iov_len = 2048;
|
|
|
|
src_iovs.iov_base = &buf[2048];
|
|
|
|
src_iovs.iov_len = 2048;
|
|
|
|
rc = spdk_accel_append_decompress(&seq, ioch, &dst_iovs, 1, NULL, NULL,
|
|
|
|
&src_iovs, 1, NULL, NULL, 0, ut_sequence_step_cb, NULL);
|
|
|
|
CU_ASSERT_EQUAL(rc, -ENOMEM);
|
|
|
|
CU_ASSERT_PTR_NULL(seq);
|
|
|
|
|
accel: initial operation chaining support
This patch introduces the concept of chaining multiple accel operations
and executing them all at once in a single step. This means that it
will be possible to schedule accel operations at different layers of the
stack (e.g. copy in NVMe-oF transport, crypto in bdev_crypto), but
execute them all in a single place. Thanks to this, we can take
advantage of hardware accelerators that supports executing multiple
operations as a single operation (e.g. copy + crypto).
This operation group is called spdk_accel_sequence and operations can be
appended to that object via one of the spdk_accel_append_* functions.
New operations are always added at the end of a sequence. Users can
specify a callback to be notified when a particular operation in a
sequence is completed, but they don't receive the status of whether it
was successful or not. This is by design, as they shouldn't care about
the status of an individual operation and should rely on other means to
receive the status of the whole sequence. It's also important to note
that any intermediate steps within a sequence may not produce observable
results. For instance, appending a copy from A to B and then a copy
from B to C, it's indeterminate whether A's data will be in B after a
sequence is executed. It is only guaranteed that A's data will be in C.
A sequence can also be reversed using spdk_accel_sequence_reverse(),
meaning that the first operation becomes last and vice versa. It's
especially useful in read paths, as it makes it possible to build the
sequence during submission, then, once the data is read from storage,
reverse the sequence and execute it.
Finally, there are two ways to terminate a sequence: aborting or
executing. It can be aborted via spdk_accel_sequence_abort() which will
execute individual operations' callbacks and free any allocated
resources. To execute it, one must use spdk_accel_sequence_finish().
For now, each operation is executed one by one and is submitted to the
appropriate accel module. Executing multiple operations as a single one
will be added in the future.
Also, currently, only fill and copy operations can be appended to a
sequence. Support for more operations will be added in subsequent
patches.
Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Change-Id: Id35d093e14feb59b996f780ef77e000e10bfcd20
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/15529
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
2022-11-16 07:22:55 +00:00
|
|
|
/* Check that the same happens when the sequence queue is empty */
|
|
|
|
TAILQ_SWAP(&tasks, &accel_ch->task_pool, spdk_accel_task, link);
|
|
|
|
TAILQ_SWAP(&seqs, &accel_ch->seq_pool, spdk_accel_sequence, link);
|
|
|
|
|
|
|
|
rc = spdk_accel_append_fill(&seq, ioch, buf, sizeof(buf), NULL, NULL, 0xa5, 0,
|
|
|
|
ut_sequence_step_cb, NULL);
|
|
|
|
CU_ASSERT_EQUAL(rc, -ENOMEM);
|
|
|
|
CU_ASSERT_PTR_NULL(seq);
|
|
|
|
|
|
|
|
dst_iovs.iov_base = buf;
|
|
|
|
dst_iovs.iov_len = 2048;
|
|
|
|
src_iovs.iov_base = &buf[2048];
|
|
|
|
src_iovs.iov_len = 2048;
|
|
|
|
rc = spdk_accel_append_copy(&seq, ioch, &dst_iovs, 1, NULL, NULL,
|
|
|
|
&src_iovs, 1, NULL, NULL, 0, ut_sequence_step_cb, NULL);
|
|
|
|
CU_ASSERT_EQUAL(rc, -ENOMEM);
|
|
|
|
CU_ASSERT_PTR_NULL(seq);
|
|
|
|
|
2022-11-22 13:36:09 +00:00
|
|
|
dst_iovs.iov_base = buf;
|
|
|
|
dst_iovs.iov_len = 2048;
|
|
|
|
src_iovs.iov_base = &buf[2048];
|
|
|
|
src_iovs.iov_len = 2048;
|
|
|
|
rc = spdk_accel_append_decompress(&seq, ioch, &dst_iovs, 1, NULL, NULL,
|
|
|
|
&src_iovs, 1, NULL, NULL, 0, ut_sequence_step_cb, NULL);
|
|
|
|
CU_ASSERT_EQUAL(rc, -ENOMEM);
|
|
|
|
CU_ASSERT_PTR_NULL(seq);
|
|
|
|
|
accel: initial operation chaining support
This patch introduces the concept of chaining multiple accel operations
and executing them all at once in a single step. This means that it
will be possible to schedule accel operations at different layers of the
stack (e.g. copy in NVMe-oF transport, crypto in bdev_crypto), but
execute them all in a single place. Thanks to this, we can take
advantage of hardware accelerators that supports executing multiple
operations as a single operation (e.g. copy + crypto).
This operation group is called spdk_accel_sequence and operations can be
appended to that object via one of the spdk_accel_append_* functions.
New operations are always added at the end of a sequence. Users can
specify a callback to be notified when a particular operation in a
sequence is completed, but they don't receive the status of whether it
was successful or not. This is by design, as they shouldn't care about
the status of an individual operation and should rely on other means to
receive the status of the whole sequence. It's also important to note
that any intermediate steps within a sequence may not produce observable
results. For instance, appending a copy from A to B and then a copy
from B to C, it's indeterminate whether A's data will be in B after a
sequence is executed. It is only guaranteed that A's data will be in C.
A sequence can also be reversed using spdk_accel_sequence_reverse(),
meaning that the first operation becomes last and vice versa. It's
especially useful in read paths, as it makes it possible to build the
sequence during submission, then, once the data is read from storage,
reverse the sequence and execute it.
Finally, there are two ways to terminate a sequence: aborting or
executing. It can be aborted via spdk_accel_sequence_abort() which will
execute individual operations' callbacks and free any allocated
resources. To execute it, one must use spdk_accel_sequence_finish().
For now, each operation is executed one by one and is submitted to the
appropriate accel module. Executing multiple operations as a single one
will be added in the future.
Also, currently, only fill and copy operations can be appended to a
sequence. Support for more operations will be added in subsequent
patches.
Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Change-Id: Id35d093e14feb59b996f780ef77e000e10bfcd20
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/15529
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
2022-11-16 07:22:55 +00:00
|
|
|
TAILQ_SWAP(&tasks, &accel_ch->task_pool, spdk_accel_task, link);
|
|
|
|
|
|
|
|
spdk_put_io_channel(ioch);
|
|
|
|
poll_threads();
|
|
|
|
}
|
|
|
|
|
|
|
|
struct ut_sequence_operation {
|
|
|
|
int complete_status;
|
|
|
|
int submit_status;
|
2022-11-17 17:39:58 +00:00
|
|
|
int count;
|
|
|
|
struct iovec *src_iovs;
|
|
|
|
uint32_t src_iovcnt;
|
|
|
|
struct iovec *dst_iovs;
|
|
|
|
uint32_t dst_iovcnt;
|
accel: initial operation chaining support
This patch introduces the concept of chaining multiple accel operations
and executing them all at once in a single step. This means that it
will be possible to schedule accel operations at different layers of the
stack (e.g. copy in NVMe-oF transport, crypto in bdev_crypto), but
execute them all in a single place. Thanks to this, we can take
advantage of hardware accelerators that supports executing multiple
operations as a single operation (e.g. copy + crypto).
This operation group is called spdk_accel_sequence and operations can be
appended to that object via one of the spdk_accel_append_* functions.
New operations are always added at the end of a sequence. Users can
specify a callback to be notified when a particular operation in a
sequence is completed, but they don't receive the status of whether it
was successful or not. This is by design, as they shouldn't care about
the status of an individual operation and should rely on other means to
receive the status of the whole sequence. It's also important to note
that any intermediate steps within a sequence may not produce observable
results. For instance, appending a copy from A to B and then a copy
from B to C, it's indeterminate whether A's data will be in B after a
sequence is executed. It is only guaranteed that A's data will be in C.
A sequence can also be reversed using spdk_accel_sequence_reverse(),
meaning that the first operation becomes last and vice versa. It's
especially useful in read paths, as it makes it possible to build the
sequence during submission, then, once the data is read from storage,
reverse the sequence and execute it.
Finally, there are two ways to terminate a sequence: aborting or
executing. It can be aborted via spdk_accel_sequence_abort() which will
execute individual operations' callbacks and free any allocated
resources. To execute it, one must use spdk_accel_sequence_finish().
For now, each operation is executed one by one and is submitted to the
appropriate accel module. Executing multiple operations as a single one
will be added in the future.
Also, currently, only fill and copy operations can be appended to a
sequence. Support for more operations will be added in subsequent
patches.
Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Change-Id: Id35d093e14feb59b996f780ef77e000e10bfcd20
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/15529
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
2022-11-16 07:22:55 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct ut_sequence_operation g_seq_operations[ACCEL_OPC_LAST];
|
|
|
|
|
|
|
|
static int
|
|
|
|
ut_sequnce_submit_tasks(struct spdk_io_channel *ch, struct spdk_accel_task *task)
|
|
|
|
{
|
|
|
|
struct ut_sequence_operation *op = &g_seq_operations[task->op_code];
|
|
|
|
|
2022-11-17 17:39:58 +00:00
|
|
|
if (op->src_iovs != NULL) {
|
|
|
|
CU_ASSERT_EQUAL(task->s.iovcnt, op->src_iovcnt);
|
|
|
|
CU_ASSERT_EQUAL(memcmp(task->s.iovs, op->src_iovs,
|
|
|
|
sizeof(struct iovec) * op->src_iovcnt), 0);
|
|
|
|
}
|
|
|
|
if (op->dst_iovs != NULL) {
|
|
|
|
CU_ASSERT_EQUAL(task->d.iovcnt, op->dst_iovcnt);
|
|
|
|
CU_ASSERT_EQUAL(memcmp(task->d.iovs, op->dst_iovs,
|
|
|
|
sizeof(struct iovec) * op->dst_iovcnt), 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
op->count++;
|
accel: initial operation chaining support
This patch introduces the concept of chaining multiple accel operations
and executing them all at once in a single step. This means that it
will be possible to schedule accel operations at different layers of the
stack (e.g. copy in NVMe-oF transport, crypto in bdev_crypto), but
execute them all in a single place. Thanks to this, we can take
advantage of hardware accelerators that supports executing multiple
operations as a single operation (e.g. copy + crypto).
This operation group is called spdk_accel_sequence and operations can be
appended to that object via one of the spdk_accel_append_* functions.
New operations are always added at the end of a sequence. Users can
specify a callback to be notified when a particular operation in a
sequence is completed, but they don't receive the status of whether it
was successful or not. This is by design, as they shouldn't care about
the status of an individual operation and should rely on other means to
receive the status of the whole sequence. It's also important to note
that any intermediate steps within a sequence may not produce observable
results. For instance, appending a copy from A to B and then a copy
from B to C, it's indeterminate whether A's data will be in B after a
sequence is executed. It is only guaranteed that A's data will be in C.
A sequence can also be reversed using spdk_accel_sequence_reverse(),
meaning that the first operation becomes last and vice versa. It's
especially useful in read paths, as it makes it possible to build the
sequence during submission, then, once the data is read from storage,
reverse the sequence and execute it.
Finally, there are two ways to terminate a sequence: aborting or
executing. It can be aborted via spdk_accel_sequence_abort() which will
execute individual operations' callbacks and free any allocated
resources. To execute it, one must use spdk_accel_sequence_finish().
For now, each operation is executed one by one and is submitted to the
appropriate accel module. Executing multiple operations as a single one
will be added in the future.
Also, currently, only fill and copy operations can be appended to a
sequence. Support for more operations will be added in subsequent
patches.
Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Change-Id: Id35d093e14feb59b996f780ef77e000e10bfcd20
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/15529
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
2022-11-16 07:22:55 +00:00
|
|
|
if (op->submit_status != 0) {
|
|
|
|
return op->submit_status;
|
|
|
|
}
|
|
|
|
|
|
|
|
spdk_accel_task_complete(task, op->complete_status);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
test_sequence_completion_error(void)
|
|
|
|
{
|
|
|
|
struct spdk_accel_sequence *seq = NULL;
|
|
|
|
struct spdk_io_channel *ioch;
|
|
|
|
struct ut_sequence ut_seq;
|
|
|
|
struct iovec src_iovs, dst_iovs;
|
|
|
|
char buf[4096], tmp[4096];
|
|
|
|
struct spdk_accel_module_if *modules[ACCEL_OPC_LAST];
|
|
|
|
int i, rc, completed;
|
|
|
|
|
|
|
|
ioch = spdk_accel_get_io_channel();
|
|
|
|
SPDK_CU_ASSERT_FATAL(ioch != NULL);
|
|
|
|
|
|
|
|
/* Override the submit_tasks function */
|
|
|
|
g_module.submit_tasks = ut_sequnce_submit_tasks;
|
|
|
|
for (i = 0; i < ACCEL_OPC_LAST; ++i) {
|
|
|
|
modules[i] = g_modules_opc[i];
|
|
|
|
g_modules_opc[i] = &g_module;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(buf, 0, sizeof(buf));
|
|
|
|
memset(tmp, 0, sizeof(tmp));
|
|
|
|
|
|
|
|
/* Check that if the first operation completes with an error, the whole sequence is
|
|
|
|
* completed with that error and that all operations' completion callbacks are executed
|
|
|
|
*/
|
|
|
|
g_seq_operations[ACCEL_OPC_FILL].complete_status = -E2BIG;
|
|
|
|
completed = 0;
|
|
|
|
seq = NULL;
|
|
|
|
rc = spdk_accel_append_fill(&seq, ioch, tmp, sizeof(tmp), NULL, NULL, 0xa5, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
dst_iovs.iov_base = buf;
|
|
|
|
dst_iovs.iov_len = sizeof(buf);
|
|
|
|
src_iovs.iov_base = tmp;
|
|
|
|
src_iovs.iov_len = sizeof(tmp);
|
|
|
|
|
|
|
|
rc = spdk_accel_append_copy(&seq, ioch, &dst_iovs, 1, NULL, NULL,
|
|
|
|
&src_iovs, 1, NULL, NULL, 0, ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
ut_seq.complete = false;
|
|
|
|
rc = spdk_accel_sequence_finish(seq, ut_sequence_complete_cb, &ut_seq);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
poll_threads();
|
|
|
|
CU_ASSERT_EQUAL(completed, 2);
|
|
|
|
CU_ASSERT_EQUAL(ut_seq.status, -E2BIG);
|
|
|
|
|
|
|
|
/* Check the same with a second operation in the sequence */
|
|
|
|
g_seq_operations[ACCEL_OPC_COPY].complete_status = -EACCES;
|
|
|
|
g_seq_operations[ACCEL_OPC_FILL].complete_status = 0;
|
|
|
|
completed = 0;
|
|
|
|
seq = NULL;
|
|
|
|
rc = spdk_accel_append_fill(&seq, ioch, tmp, sizeof(tmp), NULL, NULL, 0xa5, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
dst_iovs.iov_base = buf;
|
|
|
|
dst_iovs.iov_len = sizeof(buf);
|
|
|
|
src_iovs.iov_base = tmp;
|
|
|
|
src_iovs.iov_len = sizeof(tmp);
|
|
|
|
|
|
|
|
rc = spdk_accel_append_copy(&seq, ioch, &dst_iovs, 1, NULL, NULL,
|
|
|
|
&src_iovs, 1, NULL, NULL, 0, ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
ut_seq.complete = false;
|
|
|
|
rc = spdk_accel_sequence_finish(seq, ut_sequence_complete_cb, &ut_seq);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
poll_threads();
|
|
|
|
CU_ASSERT_EQUAL(completed, 2);
|
|
|
|
CU_ASSERT_EQUAL(ut_seq.status, -EACCES);
|
|
|
|
|
|
|
|
g_seq_operations[ACCEL_OPC_COPY].complete_status = 0;
|
|
|
|
g_seq_operations[ACCEL_OPC_FILL].complete_status = 0;
|
|
|
|
|
|
|
|
/* Check submission failure of the first operation */
|
|
|
|
g_seq_operations[ACCEL_OPC_FILL].submit_status = -EADDRINUSE;
|
|
|
|
completed = 0;
|
|
|
|
seq = NULL;
|
|
|
|
rc = spdk_accel_append_fill(&seq, ioch, tmp, sizeof(tmp), NULL, NULL, 0xa5, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
dst_iovs.iov_base = buf;
|
|
|
|
dst_iovs.iov_len = sizeof(buf);
|
|
|
|
src_iovs.iov_base = tmp;
|
|
|
|
src_iovs.iov_len = sizeof(tmp);
|
|
|
|
|
|
|
|
rc = spdk_accel_append_copy(&seq, ioch, &dst_iovs, 1, NULL, NULL,
|
|
|
|
&src_iovs, 1, NULL, NULL, 0, ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
ut_seq.complete = false;
|
|
|
|
rc = spdk_accel_sequence_finish(seq, ut_sequence_complete_cb, &ut_seq);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
poll_threads();
|
|
|
|
CU_ASSERT_EQUAL(completed, 2);
|
|
|
|
CU_ASSERT_EQUAL(ut_seq.status, -EADDRINUSE);
|
|
|
|
|
|
|
|
/* Check the same with a second operation */
|
|
|
|
g_seq_operations[ACCEL_OPC_COPY].submit_status = -EADDRNOTAVAIL;
|
|
|
|
g_seq_operations[ACCEL_OPC_FILL].submit_status = 0;
|
|
|
|
completed = 0;
|
|
|
|
seq = NULL;
|
|
|
|
rc = spdk_accel_append_fill(&seq, ioch, tmp, sizeof(tmp), NULL, NULL, 0xa5, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
dst_iovs.iov_base = buf;
|
|
|
|
dst_iovs.iov_len = sizeof(buf);
|
|
|
|
src_iovs.iov_base = tmp;
|
|
|
|
src_iovs.iov_len = sizeof(tmp);
|
|
|
|
|
|
|
|
rc = spdk_accel_append_copy(&seq, ioch, &dst_iovs, 1, NULL, NULL,
|
|
|
|
&src_iovs, 1, NULL, NULL, 0, ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
ut_seq.complete = false;
|
|
|
|
rc = spdk_accel_sequence_finish(seq, ut_sequence_complete_cb, &ut_seq);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
poll_threads();
|
|
|
|
CU_ASSERT_EQUAL(completed, 2);
|
|
|
|
CU_ASSERT_EQUAL(ut_seq.status, -EADDRNOTAVAIL);
|
|
|
|
|
|
|
|
/* Cleanup module pointers to make subsequent tests work correctly */
|
|
|
|
for (i = 0; i < ACCEL_OPC_LAST; ++i) {
|
|
|
|
g_modules_opc[i] = modules[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
spdk_put_io_channel(ioch);
|
|
|
|
poll_threads();
|
|
|
|
}
|
|
|
|
|
2022-11-22 13:36:09 +00:00
|
|
|
#ifdef SPDK_CONFIG_ISAL
|
|
|
|
static void
|
|
|
|
ut_compress_cb(void *cb_arg, int status)
|
|
|
|
{
|
|
|
|
int *completed = cb_arg;
|
|
|
|
|
|
|
|
CU_ASSERT_EQUAL(status, 0);
|
|
|
|
|
|
|
|
*completed = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
test_sequence_decompress(void)
|
|
|
|
{
|
|
|
|
struct spdk_accel_sequence *seq = NULL;
|
|
|
|
struct spdk_io_channel *ioch;
|
|
|
|
struct ut_sequence ut_seq;
|
|
|
|
char buf[4096], tmp[2][4096], expected[4096];
|
|
|
|
struct iovec src_iovs[2], dst_iovs[2];
|
|
|
|
uint32_t compressed_size;
|
|
|
|
int rc, completed = 0;
|
|
|
|
|
|
|
|
ioch = spdk_accel_get_io_channel();
|
|
|
|
SPDK_CU_ASSERT_FATAL(ioch != NULL);
|
|
|
|
|
|
|
|
memset(expected, 0xa5, sizeof(expected));
|
|
|
|
src_iovs[0].iov_base = expected;
|
|
|
|
src_iovs[0].iov_len = sizeof(expected);
|
|
|
|
rc = spdk_accel_submit_compress(ioch, tmp[0], sizeof(tmp[0]), &src_iovs[0], 1,
|
|
|
|
&compressed_size, 0, ut_compress_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
while (!completed) {
|
|
|
|
poll_threads();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check a single decompress operation in a sequence */
|
|
|
|
seq = NULL;
|
|
|
|
completed = 0;
|
|
|
|
|
|
|
|
dst_iovs[0].iov_base = buf;
|
|
|
|
dst_iovs[0].iov_len = sizeof(buf);
|
|
|
|
src_iovs[0].iov_base = tmp[0];
|
|
|
|
src_iovs[0].iov_len = compressed_size;
|
|
|
|
rc = spdk_accel_append_decompress(&seq, ioch, &dst_iovs[0], 1, NULL, NULL,
|
|
|
|
&src_iovs[0], 1, NULL, NULL, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
ut_seq.complete = false;
|
|
|
|
rc = spdk_accel_sequence_finish(seq, ut_sequence_complete_cb, &ut_seq);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
poll_threads();
|
|
|
|
|
|
|
|
CU_ASSERT_EQUAL(completed, 1);
|
|
|
|
CU_ASSERT(ut_seq.complete);
|
|
|
|
CU_ASSERT_EQUAL(ut_seq.status, 0);
|
|
|
|
CU_ASSERT_EQUAL(memcmp(buf, expected, sizeof(buf)), 0);
|
|
|
|
|
|
|
|
/* Put the decompress operation in the middle of a sequence with a copy operation at the
|
|
|
|
* beginning and a fill at the end modifying the first 2048B of the buffer.
|
|
|
|
*/
|
|
|
|
memset(expected, 0xfe, 2048);
|
|
|
|
memset(buf, 0, sizeof(buf));
|
|
|
|
seq = NULL;
|
|
|
|
completed = 0;
|
|
|
|
|
|
|
|
dst_iovs[0].iov_base = tmp[1];
|
|
|
|
dst_iovs[0].iov_len = compressed_size;
|
|
|
|
src_iovs[0].iov_base = tmp[0];
|
|
|
|
src_iovs[0].iov_len = compressed_size;
|
|
|
|
rc = spdk_accel_append_copy(&seq, ioch, &dst_iovs[0], 1, NULL, NULL,
|
|
|
|
&src_iovs[0], 1, NULL, NULL, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
dst_iovs[1].iov_base = buf;
|
|
|
|
dst_iovs[1].iov_len = sizeof(buf);
|
|
|
|
src_iovs[1].iov_base = tmp[1];
|
|
|
|
src_iovs[1].iov_len = compressed_size;
|
|
|
|
rc = spdk_accel_append_decompress(&seq, ioch, &dst_iovs[1], 1, NULL, NULL,
|
|
|
|
&src_iovs[1], 1, NULL, NULL, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
rc = spdk_accel_append_fill(&seq, ioch, buf, 2048, NULL, NULL, 0xfe, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
ut_seq.complete = false;
|
|
|
|
rc = spdk_accel_sequence_finish(seq, ut_sequence_complete_cb, &ut_seq);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
poll_threads();
|
|
|
|
|
|
|
|
CU_ASSERT_EQUAL(completed, 3);
|
|
|
|
CU_ASSERT(ut_seq.complete);
|
|
|
|
CU_ASSERT_EQUAL(ut_seq.status, 0);
|
|
|
|
CU_ASSERT_EQUAL(memcmp(buf, expected, sizeof(buf)), 0);
|
|
|
|
|
|
|
|
/* Check sequence with decompress at the beginning: decompress -> copy */
|
|
|
|
memset(expected, 0xa5, sizeof(expected));
|
|
|
|
memset(buf, 0, sizeof(buf));
|
|
|
|
seq = NULL;
|
|
|
|
completed = 0;
|
|
|
|
|
|
|
|
dst_iovs[0].iov_base = tmp[1];
|
|
|
|
dst_iovs[0].iov_len = sizeof(tmp[1]);
|
|
|
|
src_iovs[0].iov_base = tmp[0];
|
|
|
|
src_iovs[0].iov_len = compressed_size;
|
|
|
|
rc = spdk_accel_append_decompress(&seq, ioch, &dst_iovs[0], 1, NULL, NULL,
|
|
|
|
&src_iovs[0], 1, NULL, NULL, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
dst_iovs[1].iov_base = buf;
|
|
|
|
dst_iovs[1].iov_len = sizeof(buf);
|
|
|
|
src_iovs[1].iov_base = tmp[1];
|
|
|
|
src_iovs[1].iov_len = sizeof(tmp[1]);
|
|
|
|
rc = spdk_accel_append_copy(&seq, ioch, &dst_iovs[1], 1, NULL, NULL,
|
|
|
|
&src_iovs[1], 1, NULL, NULL, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
ut_seq.complete = false;
|
|
|
|
rc = spdk_accel_sequence_finish(seq, ut_sequence_complete_cb, &ut_seq);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
poll_threads();
|
|
|
|
|
|
|
|
CU_ASSERT_EQUAL(completed, 2);
|
|
|
|
CU_ASSERT(ut_seq.complete);
|
|
|
|
CU_ASSERT_EQUAL(ut_seq.status, 0);
|
|
|
|
CU_ASSERT_EQUAL(memcmp(buf, expected, sizeof(buf)), 0);
|
|
|
|
|
|
|
|
spdk_put_io_channel(ioch);
|
|
|
|
poll_threads();
|
|
|
|
}
|
2022-11-23 09:36:25 +00:00
|
|
|
|
|
|
|
static void
|
|
|
|
test_sequence_reverse(void)
|
|
|
|
{
|
|
|
|
struct spdk_accel_sequence *seq = NULL;
|
|
|
|
struct spdk_io_channel *ioch;
|
|
|
|
struct ut_sequence ut_seq;
|
|
|
|
char buf[4096], tmp[2][4096], expected[4096];
|
|
|
|
struct iovec src_iovs[2], dst_iovs[2];
|
|
|
|
uint32_t compressed_size;
|
|
|
|
int rc, completed = 0;
|
|
|
|
|
|
|
|
ioch = spdk_accel_get_io_channel();
|
|
|
|
SPDK_CU_ASSERT_FATAL(ioch != NULL);
|
|
|
|
|
|
|
|
memset(expected, 0xa5, sizeof(expected));
|
|
|
|
src_iovs[0].iov_base = expected;
|
|
|
|
src_iovs[0].iov_len = sizeof(expected);
|
|
|
|
rc = spdk_accel_submit_compress(ioch, tmp[0], sizeof(tmp[0]), &src_iovs[0], 1,
|
|
|
|
&compressed_size, 0, ut_compress_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
while (!completed) {
|
|
|
|
poll_threads();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* First check that reversing a sequnce with a single operation is a no-op */
|
|
|
|
memset(buf, 0, sizeof(buf));
|
|
|
|
seq = NULL;
|
|
|
|
completed = 0;
|
|
|
|
|
|
|
|
dst_iovs[0].iov_base = buf;
|
|
|
|
dst_iovs[0].iov_len = sizeof(buf);
|
|
|
|
src_iovs[0].iov_base = tmp[0];
|
|
|
|
src_iovs[0].iov_len = compressed_size;
|
|
|
|
rc = spdk_accel_append_decompress(&seq, ioch, &dst_iovs[0], 1, NULL, NULL,
|
|
|
|
&src_iovs[0], 1, NULL, NULL, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
spdk_accel_sequence_reverse(seq);
|
|
|
|
|
|
|
|
ut_seq.complete = false;
|
|
|
|
rc = spdk_accel_sequence_finish(seq, ut_sequence_complete_cb, &ut_seq);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
poll_threads();
|
|
|
|
|
|
|
|
CU_ASSERT_EQUAL(completed, 1);
|
|
|
|
CU_ASSERT(ut_seq.complete);
|
|
|
|
CU_ASSERT_EQUAL(ut_seq.status, 0);
|
|
|
|
CU_ASSERT_EQUAL(memcmp(buf, expected, sizeof(buf)), 0);
|
|
|
|
|
|
|
|
/* Add a copy operation at the end with src set to the compressed data. After reverse(),
|
|
|
|
* that copy operation should be first, so decompress() should receive compressed data in
|
|
|
|
* its src buffer.
|
|
|
|
*/
|
|
|
|
memset(buf, 0, sizeof(buf));
|
|
|
|
memset(tmp[1], 0, sizeof(tmp[1]));
|
|
|
|
seq = NULL;
|
|
|
|
completed = 0;
|
|
|
|
|
|
|
|
dst_iovs[0].iov_base = buf;
|
|
|
|
dst_iovs[0].iov_len = sizeof(buf);
|
|
|
|
src_iovs[0].iov_base = tmp[1];
|
|
|
|
src_iovs[0].iov_len = compressed_size;
|
|
|
|
rc = spdk_accel_append_decompress(&seq, ioch, &dst_iovs[0], 1, NULL, NULL,
|
|
|
|
&src_iovs[0], 1, NULL, NULL, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
dst_iovs[1].iov_base = tmp[1];
|
|
|
|
dst_iovs[1].iov_len = compressed_size;
|
|
|
|
src_iovs[1].iov_base = tmp[0];
|
|
|
|
src_iovs[1].iov_len = compressed_size;
|
|
|
|
rc = spdk_accel_append_copy(&seq, ioch, &dst_iovs[1], 1, NULL, NULL,
|
|
|
|
&src_iovs[1], 1, NULL, NULL, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
spdk_accel_sequence_reverse(seq);
|
|
|
|
|
|
|
|
ut_seq.complete = false;
|
|
|
|
rc = spdk_accel_sequence_finish(seq, ut_sequence_complete_cb, &ut_seq);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
poll_threads();
|
|
|
|
|
|
|
|
CU_ASSERT_EQUAL(completed, 2);
|
|
|
|
CU_ASSERT(ut_seq.complete);
|
|
|
|
CU_ASSERT_EQUAL(ut_seq.status, 0);
|
|
|
|
CU_ASSERT_EQUAL(memcmp(buf, expected, sizeof(buf)), 0);
|
|
|
|
|
|
|
|
/* Check the same, but add an extra fill operation at the beginning that should execute last
|
|
|
|
* after reverse().
|
|
|
|
*/
|
|
|
|
memset(buf, 0, sizeof(buf));
|
|
|
|
memset(tmp[1], 0, sizeof(tmp[1]));
|
|
|
|
memset(expected, 0xfe, 2048);
|
|
|
|
seq = NULL;
|
|
|
|
completed = 0;
|
|
|
|
|
|
|
|
rc = spdk_accel_append_fill(&seq, ioch, buf, 2048, NULL, NULL, 0xfe, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
dst_iovs[0].iov_base = buf;
|
|
|
|
dst_iovs[0].iov_len = sizeof(buf);
|
|
|
|
src_iovs[0].iov_base = tmp[1];
|
|
|
|
src_iovs[0].iov_len = compressed_size;
|
|
|
|
rc = spdk_accel_append_decompress(&seq, ioch, &dst_iovs[0], 1, NULL, NULL,
|
|
|
|
&src_iovs[0], 1, NULL, NULL, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
dst_iovs[1].iov_base = tmp[1];
|
|
|
|
dst_iovs[1].iov_len = compressed_size;
|
|
|
|
src_iovs[1].iov_base = tmp[0];
|
|
|
|
src_iovs[1].iov_len = compressed_size;
|
|
|
|
rc = spdk_accel_append_copy(&seq, ioch, &dst_iovs[1], 1, NULL, NULL,
|
|
|
|
&src_iovs[1], 1, NULL, NULL, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
spdk_accel_sequence_reverse(seq);
|
|
|
|
|
|
|
|
ut_seq.complete = false;
|
|
|
|
rc = spdk_accel_sequence_finish(seq, ut_sequence_complete_cb, &ut_seq);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
poll_threads();
|
|
|
|
|
|
|
|
CU_ASSERT_EQUAL(completed, 3);
|
|
|
|
CU_ASSERT(ut_seq.complete);
|
|
|
|
CU_ASSERT_EQUAL(ut_seq.status, 0);
|
|
|
|
CU_ASSERT_EQUAL(memcmp(buf, expected, sizeof(buf)), 0);
|
|
|
|
|
|
|
|
/* Build the sequence in order and then reverse it twice */
|
|
|
|
memset(buf, 0, sizeof(buf));
|
|
|
|
memset(tmp[1], 0, sizeof(tmp[1]));
|
|
|
|
seq = NULL;
|
|
|
|
completed = 0;
|
|
|
|
|
|
|
|
dst_iovs[0].iov_base = tmp[1];
|
|
|
|
dst_iovs[0].iov_len = compressed_size;
|
|
|
|
src_iovs[0].iov_base = tmp[0];
|
|
|
|
src_iovs[0].iov_len = compressed_size;
|
|
|
|
rc = spdk_accel_append_copy(&seq, ioch, &dst_iovs[0], 1, NULL, NULL,
|
|
|
|
&src_iovs[0], 1, NULL, NULL, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
dst_iovs[1].iov_base = buf;
|
|
|
|
dst_iovs[1].iov_len = sizeof(buf);
|
|
|
|
src_iovs[1].iov_base = tmp[1];
|
|
|
|
src_iovs[1].iov_len = compressed_size;
|
|
|
|
rc = spdk_accel_append_decompress(&seq, ioch, &dst_iovs[1], 1, NULL, NULL,
|
|
|
|
&src_iovs[1], 1, NULL, NULL, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
rc = spdk_accel_append_fill(&seq, ioch, buf, 2048, NULL, NULL, 0xfe, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
spdk_accel_sequence_reverse(seq);
|
|
|
|
spdk_accel_sequence_reverse(seq);
|
|
|
|
|
|
|
|
ut_seq.complete = false;
|
|
|
|
rc = spdk_accel_sequence_finish(seq, ut_sequence_complete_cb, &ut_seq);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
poll_threads();
|
|
|
|
|
|
|
|
CU_ASSERT_EQUAL(completed, 3);
|
|
|
|
CU_ASSERT(ut_seq.complete);
|
|
|
|
CU_ASSERT_EQUAL(ut_seq.status, 0);
|
|
|
|
CU_ASSERT_EQUAL(memcmp(buf, expected, sizeof(buf)), 0);
|
|
|
|
|
|
|
|
spdk_put_io_channel(ioch);
|
|
|
|
poll_threads();
|
|
|
|
}
|
2022-11-22 13:36:09 +00:00
|
|
|
#endif
|
|
|
|
|
2022-11-17 17:39:58 +00:00
|
|
|
static void
|
|
|
|
test_sequence_copy_elision(void)
|
|
|
|
{
|
|
|
|
struct spdk_accel_sequence *seq = NULL;
|
|
|
|
struct spdk_io_channel *ioch;
|
|
|
|
struct ut_sequence ut_seq;
|
|
|
|
struct iovec src_iovs[4], dst_iovs[4], exp_iovs[2];
|
|
|
|
char buf[4096], tmp[4][4096];
|
|
|
|
struct spdk_accel_module_if *modules[ACCEL_OPC_LAST];
|
|
|
|
int i, rc, completed;
|
|
|
|
|
|
|
|
ioch = spdk_accel_get_io_channel();
|
|
|
|
SPDK_CU_ASSERT_FATAL(ioch != NULL);
|
|
|
|
|
|
|
|
/* Override the submit_tasks function */
|
|
|
|
g_module.submit_tasks = ut_sequnce_submit_tasks;
|
|
|
|
for (i = 0; i < ACCEL_OPC_LAST; ++i) {
|
|
|
|
g_seq_operations[i].complete_status = 0;
|
|
|
|
g_seq_operations[i].submit_status = 0;
|
|
|
|
g_seq_operations[i].count = 0;
|
|
|
|
|
|
|
|
modules[i] = g_modules_opc[i];
|
|
|
|
g_modules_opc[i] = &g_module;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check that a copy operation at the beginning is removed */
|
|
|
|
seq = NULL;
|
|
|
|
completed = 0;
|
|
|
|
g_seq_operations[ACCEL_OPC_DECOMPRESS].dst_iovcnt = 1;
|
|
|
|
g_seq_operations[ACCEL_OPC_DECOMPRESS].src_iovcnt = 1;
|
|
|
|
g_seq_operations[ACCEL_OPC_DECOMPRESS].src_iovs = &exp_iovs[0];
|
|
|
|
g_seq_operations[ACCEL_OPC_DECOMPRESS].dst_iovs = &exp_iovs[1];
|
|
|
|
exp_iovs[0].iov_base = tmp[0];
|
|
|
|
exp_iovs[0].iov_len = sizeof(tmp[0]);
|
|
|
|
exp_iovs[1].iov_base = buf;
|
|
|
|
exp_iovs[1].iov_len = 2048;
|
|
|
|
|
|
|
|
dst_iovs[0].iov_base = tmp[1];
|
|
|
|
dst_iovs[0].iov_len = sizeof(tmp[1]);
|
|
|
|
src_iovs[0].iov_base = tmp[0];
|
|
|
|
src_iovs[0].iov_len = sizeof(tmp[0]);
|
|
|
|
rc = spdk_accel_append_copy(&seq, ioch, &dst_iovs[0], 1, NULL, NULL,
|
|
|
|
&src_iovs[0], 1, NULL, NULL, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
dst_iovs[1].iov_base = buf;
|
|
|
|
dst_iovs[1].iov_len = 2048;
|
|
|
|
src_iovs[1].iov_base = tmp[1];
|
|
|
|
src_iovs[1].iov_len = sizeof(tmp[1]);
|
|
|
|
rc = spdk_accel_append_decompress(&seq, ioch, &dst_iovs[1], 1, NULL, NULL,
|
|
|
|
&src_iovs[1], 1, NULL, NULL, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
ut_seq.complete = false;
|
|
|
|
rc = spdk_accel_sequence_finish(seq, ut_sequence_complete_cb, &ut_seq);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
poll_threads();
|
|
|
|
|
|
|
|
CU_ASSERT_EQUAL(completed, 2);
|
|
|
|
CU_ASSERT(ut_seq.complete);
|
|
|
|
CU_ASSERT_EQUAL(ut_seq.status, 0);
|
|
|
|
CU_ASSERT_EQUAL(g_seq_operations[ACCEL_OPC_COPY].count, 0);
|
|
|
|
CU_ASSERT_EQUAL(g_seq_operations[ACCEL_OPC_DECOMPRESS].count, 1);
|
|
|
|
|
|
|
|
/* Check that a copy operation at the end is removed too */
|
|
|
|
seq = NULL;
|
|
|
|
completed = 0;
|
|
|
|
g_seq_operations[ACCEL_OPC_COPY].count = 0;
|
|
|
|
g_seq_operations[ACCEL_OPC_DECOMPRESS].count = 0;
|
|
|
|
g_seq_operations[ACCEL_OPC_DECOMPRESS].src_iovs = &exp_iovs[0];
|
|
|
|
g_seq_operations[ACCEL_OPC_DECOMPRESS].dst_iovs = &exp_iovs[1];
|
|
|
|
exp_iovs[0].iov_base = tmp[0];
|
|
|
|
exp_iovs[0].iov_len = sizeof(tmp[0]);
|
|
|
|
exp_iovs[1].iov_base = buf;
|
|
|
|
exp_iovs[1].iov_len = 2048;
|
|
|
|
|
|
|
|
dst_iovs[0].iov_base = tmp[1];
|
|
|
|
dst_iovs[0].iov_len = 2048;
|
|
|
|
src_iovs[0].iov_base = tmp[0];
|
|
|
|
src_iovs[0].iov_len = sizeof(tmp[0]);
|
|
|
|
rc = spdk_accel_append_decompress(&seq, ioch, &dst_iovs[0], 1, NULL, NULL,
|
|
|
|
&src_iovs[0], 1, NULL, NULL, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
dst_iovs[1].iov_base = buf;
|
|
|
|
dst_iovs[1].iov_len = 2048;
|
|
|
|
src_iovs[1].iov_base = tmp[1];
|
|
|
|
src_iovs[1].iov_len = 2048;
|
|
|
|
rc = spdk_accel_append_copy(&seq, ioch, &dst_iovs[1], 1, NULL, NULL,
|
|
|
|
&src_iovs[1], 1, NULL, NULL, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
ut_seq.complete = false;
|
|
|
|
rc = spdk_accel_sequence_finish(seq, ut_sequence_complete_cb, &ut_seq);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
poll_threads();
|
|
|
|
|
|
|
|
CU_ASSERT_EQUAL(completed, 2);
|
|
|
|
CU_ASSERT(ut_seq.complete);
|
|
|
|
CU_ASSERT_EQUAL(ut_seq.status, 0);
|
|
|
|
CU_ASSERT_EQUAL(g_seq_operations[ACCEL_OPC_COPY].count, 0);
|
|
|
|
CU_ASSERT_EQUAL(g_seq_operations[ACCEL_OPC_DECOMPRESS].count, 1);
|
|
|
|
|
|
|
|
/* Check a copy operation both at the beginning and the end */
|
|
|
|
seq = NULL;
|
|
|
|
completed = 0;
|
|
|
|
g_seq_operations[ACCEL_OPC_COPY].count = 0;
|
|
|
|
g_seq_operations[ACCEL_OPC_DECOMPRESS].count = 0;
|
|
|
|
g_seq_operations[ACCEL_OPC_DECOMPRESS].src_iovs = &exp_iovs[0];
|
|
|
|
g_seq_operations[ACCEL_OPC_DECOMPRESS].dst_iovs = &exp_iovs[1];
|
|
|
|
exp_iovs[0].iov_base = tmp[0];
|
|
|
|
exp_iovs[0].iov_len = sizeof(tmp[0]);
|
|
|
|
exp_iovs[1].iov_base = buf;
|
|
|
|
exp_iovs[1].iov_len = 2048;
|
|
|
|
|
|
|
|
dst_iovs[0].iov_base = tmp[1];
|
|
|
|
dst_iovs[0].iov_len = sizeof(tmp[1]);
|
|
|
|
src_iovs[0].iov_base = tmp[0];
|
|
|
|
src_iovs[0].iov_len = sizeof(tmp[0]);
|
|
|
|
rc = spdk_accel_append_copy(&seq, ioch, &dst_iovs[0], 1, NULL, NULL,
|
|
|
|
&src_iovs[0], 1, NULL, NULL, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
dst_iovs[1].iov_base = tmp[2];
|
|
|
|
dst_iovs[1].iov_len = 2048;
|
|
|
|
src_iovs[1].iov_base = tmp[1];
|
|
|
|
src_iovs[1].iov_len = sizeof(tmp[1]);
|
|
|
|
rc = spdk_accel_append_decompress(&seq, ioch, &dst_iovs[1], 1, NULL, NULL,
|
|
|
|
&src_iovs[1], 1, NULL, NULL, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
dst_iovs[2].iov_base = buf;
|
|
|
|
dst_iovs[2].iov_len = 2048;
|
|
|
|
src_iovs[2].iov_base = tmp[2];
|
|
|
|
src_iovs[2].iov_len = 2048;
|
|
|
|
rc = spdk_accel_append_copy(&seq, ioch, &dst_iovs[2], 1, NULL, NULL,
|
|
|
|
&src_iovs[2], 1, NULL, NULL, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
ut_seq.complete = false;
|
|
|
|
rc = spdk_accel_sequence_finish(seq, ut_sequence_complete_cb, &ut_seq);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
poll_threads();
|
|
|
|
|
|
|
|
CU_ASSERT_EQUAL(completed, 3);
|
|
|
|
CU_ASSERT(ut_seq.complete);
|
|
|
|
CU_ASSERT_EQUAL(ut_seq.status, 0);
|
|
|
|
CU_ASSERT_EQUAL(g_seq_operations[ACCEL_OPC_COPY].count, 0);
|
|
|
|
CU_ASSERT_EQUAL(g_seq_operations[ACCEL_OPC_DECOMPRESS].count, 1);
|
|
|
|
|
|
|
|
/* Check decompress + copy + decompress + copy */
|
|
|
|
seq = NULL;
|
|
|
|
completed = 0;
|
|
|
|
g_seq_operations[ACCEL_OPC_COPY].count = 0;
|
|
|
|
g_seq_operations[ACCEL_OPC_DECOMPRESS].count = 0;
|
|
|
|
g_seq_operations[ACCEL_OPC_DECOMPRESS].src_iovs = NULL;
|
|
|
|
g_seq_operations[ACCEL_OPC_DECOMPRESS].dst_iovs = NULL;
|
|
|
|
|
|
|
|
dst_iovs[0].iov_base = tmp[1];
|
|
|
|
dst_iovs[0].iov_len = sizeof(tmp[1]);
|
|
|
|
src_iovs[0].iov_base = tmp[0];
|
|
|
|
src_iovs[0].iov_len = sizeof(tmp[0]);
|
|
|
|
rc = spdk_accel_append_decompress(&seq, ioch, &dst_iovs[0], 1, NULL, NULL,
|
|
|
|
&src_iovs[0], 1, NULL, NULL, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
dst_iovs[1].iov_base = tmp[2];
|
|
|
|
dst_iovs[1].iov_len = 2048;
|
|
|
|
src_iovs[1].iov_base = tmp[1];
|
|
|
|
src_iovs[1].iov_len = sizeof(tmp[1]);
|
|
|
|
rc = spdk_accel_append_copy(&seq, ioch, &dst_iovs[1], 1, NULL, NULL,
|
|
|
|
&src_iovs[1], 1, NULL, NULL, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
dst_iovs[2].iov_base = tmp[3];
|
|
|
|
dst_iovs[2].iov_len = 1024;
|
|
|
|
src_iovs[2].iov_base = tmp[2];
|
|
|
|
src_iovs[2].iov_len = 2048;
|
|
|
|
rc = spdk_accel_append_decompress(&seq, ioch, &dst_iovs[2], 1, NULL, NULL,
|
|
|
|
&src_iovs[2], 1, NULL, NULL, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
dst_iovs[3].iov_base = buf;
|
|
|
|
dst_iovs[3].iov_len = 1024;
|
|
|
|
src_iovs[3].iov_base = tmp[3];
|
|
|
|
src_iovs[3].iov_len = 1024;
|
|
|
|
rc = spdk_accel_append_copy(&seq, ioch, &dst_iovs[3], 1, NULL, NULL,
|
|
|
|
&src_iovs[3], 1, NULL, NULL, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
ut_seq.complete = false;
|
|
|
|
rc = spdk_accel_sequence_finish(seq, ut_sequence_complete_cb, &ut_seq);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
poll_threads();
|
|
|
|
|
|
|
|
CU_ASSERT_EQUAL(completed, 4);
|
|
|
|
CU_ASSERT(ut_seq.complete);
|
|
|
|
CU_ASSERT_EQUAL(ut_seq.status, 0);
|
|
|
|
CU_ASSERT_EQUAL(g_seq_operations[ACCEL_OPC_COPY].count, 0);
|
|
|
|
CU_ASSERT_EQUAL(g_seq_operations[ACCEL_OPC_DECOMPRESS].count, 2);
|
|
|
|
|
|
|
|
/* Check two copy operations - one of them should be removed, while the other should be
|
|
|
|
* executed normally */
|
|
|
|
seq = NULL;
|
|
|
|
completed = 0;
|
|
|
|
g_seq_operations[ACCEL_OPC_COPY].count = 0;
|
|
|
|
|
|
|
|
dst_iovs[0].iov_base = tmp[1];
|
|
|
|
dst_iovs[0].iov_len = sizeof(tmp[1]);
|
|
|
|
src_iovs[0].iov_base = tmp[0];
|
|
|
|
src_iovs[0].iov_len = sizeof(tmp[0]);
|
|
|
|
rc = spdk_accel_append_copy(&seq, ioch, &dst_iovs[0], 1, NULL, NULL,
|
|
|
|
&src_iovs[0], 1, NULL, NULL, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
dst_iovs[1].iov_base = buf;
|
|
|
|
dst_iovs[1].iov_len = sizeof(buf);
|
|
|
|
src_iovs[1].iov_base = tmp[1];
|
|
|
|
src_iovs[1].iov_len = sizeof(tmp[1]);
|
|
|
|
rc = spdk_accel_append_copy(&seq, ioch, &dst_iovs[1], 1, NULL, NULL,
|
|
|
|
&src_iovs[1], 1, NULL, NULL, 0,
|
|
|
|
ut_sequence_step_cb, &completed);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
ut_seq.complete = false;
|
|
|
|
rc = spdk_accel_sequence_finish(seq, ut_sequence_complete_cb, &ut_seq);
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
|
|
|
|
|
|
|
poll_threads();
|
|
|
|
|
|
|
|
CU_ASSERT_EQUAL(completed, 2);
|
|
|
|
CU_ASSERT(ut_seq.complete);
|
|
|
|
CU_ASSERT_EQUAL(ut_seq.status, 0);
|
|
|
|
CU_ASSERT_EQUAL(g_seq_operations[ACCEL_OPC_COPY].count, 1);
|
|
|
|
|
|
|
|
/* Cleanup module pointers to make subsequent tests work correctly */
|
|
|
|
for (i = 0; i < ACCEL_OPC_LAST; ++i) {
|
|
|
|
g_modules_opc[i] = modules[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
g_seq_operations[ACCEL_OPC_DECOMPRESS].src_iovs = NULL;
|
|
|
|
g_seq_operations[ACCEL_OPC_DECOMPRESS].dst_iovs = NULL;
|
|
|
|
|
|
|
|
spdk_put_io_channel(ioch);
|
|
|
|
poll_threads();
|
|
|
|
}
|
|
|
|
|
accel: initial operation chaining support
This patch introduces the concept of chaining multiple accel operations
and executing them all at once in a single step. This means that it
will be possible to schedule accel operations at different layers of the
stack (e.g. copy in NVMe-oF transport, crypto in bdev_crypto), but
execute them all in a single place. Thanks to this, we can take
advantage of hardware accelerators that supports executing multiple
operations as a single operation (e.g. copy + crypto).
This operation group is called spdk_accel_sequence and operations can be
appended to that object via one of the spdk_accel_append_* functions.
New operations are always added at the end of a sequence. Users can
specify a callback to be notified when a particular operation in a
sequence is completed, but they don't receive the status of whether it
was successful or not. This is by design, as they shouldn't care about
the status of an individual operation and should rely on other means to
receive the status of the whole sequence. It's also important to note
that any intermediate steps within a sequence may not produce observable
results. For instance, appending a copy from A to B and then a copy
from B to C, it's indeterminate whether A's data will be in B after a
sequence is executed. It is only guaranteed that A's data will be in C.
A sequence can also be reversed using spdk_accel_sequence_reverse(),
meaning that the first operation becomes last and vice versa. It's
especially useful in read paths, as it makes it possible to build the
sequence during submission, then, once the data is read from storage,
reverse the sequence and execute it.
Finally, there are two ways to terminate a sequence: aborting or
executing. It can be aborted via spdk_accel_sequence_abort() which will
execute individual operations' callbacks and free any allocated
resources. To execute it, one must use spdk_accel_sequence_finish().
For now, each operation is executed one by one and is submitted to the
appropriate accel module. Executing multiple operations as a single one
will be added in the future.
Also, currently, only fill and copy operations can be appended to a
sequence. Support for more operations will be added in subsequent
patches.
Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Change-Id: Id35d093e14feb59b996f780ef77e000e10bfcd20
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/15529
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
2022-11-16 07:22:55 +00:00
|
|
|
static int
|
|
|
|
test_sequence_setup(void)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
allocate_cores(1);
|
|
|
|
allocate_threads(1);
|
|
|
|
set_thread(0);
|
|
|
|
|
|
|
|
rc = spdk_accel_initialize();
|
|
|
|
if (rc != 0) {
|
|
|
|
CU_ASSERT(false);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
accel_finish_cb(void *cb_arg)
|
|
|
|
{
|
|
|
|
bool *done = cb_arg;
|
|
|
|
|
|
|
|
*done = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
test_sequence_cleanup(void)
|
|
|
|
{
|
|
|
|
bool done = false;
|
|
|
|
|
|
|
|
spdk_accel_finish(accel_finish_cb, &done);
|
|
|
|
|
|
|
|
while (!done) {
|
|
|
|
poll_threads();
|
|
|
|
}
|
|
|
|
|
|
|
|
free_threads();
|
|
|
|
free_cores();
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-06-22 21:35:04 +00:00
|
|
|
int
|
|
|
|
main(int argc, char **argv)
|
2021-02-08 22:56:19 +00:00
|
|
|
{
|
accel: initial operation chaining support
This patch introduces the concept of chaining multiple accel operations
and executing them all at once in a single step. This means that it
will be possible to schedule accel operations at different layers of the
stack (e.g. copy in NVMe-oF transport, crypto in bdev_crypto), but
execute them all in a single place. Thanks to this, we can take
advantage of hardware accelerators that supports executing multiple
operations as a single operation (e.g. copy + crypto).
This operation group is called spdk_accel_sequence and operations can be
appended to that object via one of the spdk_accel_append_* functions.
New operations are always added at the end of a sequence. Users can
specify a callback to be notified when a particular operation in a
sequence is completed, but they don't receive the status of whether it
was successful or not. This is by design, as they shouldn't care about
the status of an individual operation and should rely on other means to
receive the status of the whole sequence. It's also important to note
that any intermediate steps within a sequence may not produce observable
results. For instance, appending a copy from A to B and then a copy
from B to C, it's indeterminate whether A's data will be in B after a
sequence is executed. It is only guaranteed that A's data will be in C.
A sequence can also be reversed using spdk_accel_sequence_reverse(),
meaning that the first operation becomes last and vice versa. It's
especially useful in read paths, as it makes it possible to build the
sequence during submission, then, once the data is read from storage,
reverse the sequence and execute it.
Finally, there are two ways to terminate a sequence: aborting or
executing. It can be aborted via spdk_accel_sequence_abort() which will
execute individual operations' callbacks and free any allocated
resources. To execute it, one must use spdk_accel_sequence_finish().
For now, each operation is executed one by one and is submitted to the
appropriate accel module. Executing multiple operations as a single one
will be added in the future.
Also, currently, only fill and copy operations can be appended to a
sequence. Support for more operations will be added in subsequent
patches.
Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Change-Id: Id35d093e14feb59b996f780ef77e000e10bfcd20
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/15529
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
2022-11-16 07:22:55 +00:00
|
|
|
CU_pSuite suite = NULL, seq_suite;
|
2021-02-08 22:56:19 +00:00
|
|
|
unsigned int num_failures;
|
|
|
|
|
|
|
|
CU_set_error_action(CUEA_ABORT);
|
|
|
|
CU_initialize_registry();
|
|
|
|
|
accel: initial operation chaining support
This patch introduces the concept of chaining multiple accel operations
and executing them all at once in a single step. This means that it
will be possible to schedule accel operations at different layers of the
stack (e.g. copy in NVMe-oF transport, crypto in bdev_crypto), but
execute them all in a single place. Thanks to this, we can take
advantage of hardware accelerators that supports executing multiple
operations as a single operation (e.g. copy + crypto).
This operation group is called spdk_accel_sequence and operations can be
appended to that object via one of the spdk_accel_append_* functions.
New operations are always added at the end of a sequence. Users can
specify a callback to be notified when a particular operation in a
sequence is completed, but they don't receive the status of whether it
was successful or not. This is by design, as they shouldn't care about
the status of an individual operation and should rely on other means to
receive the status of the whole sequence. It's also important to note
that any intermediate steps within a sequence may not produce observable
results. For instance, appending a copy from A to B and then a copy
from B to C, it's indeterminate whether A's data will be in B after a
sequence is executed. It is only guaranteed that A's data will be in C.
A sequence can also be reversed using spdk_accel_sequence_reverse(),
meaning that the first operation becomes last and vice versa. It's
especially useful in read paths, as it makes it possible to build the
sequence during submission, then, once the data is read from storage,
reverse the sequence and execute it.
Finally, there are two ways to terminate a sequence: aborting or
executing. It can be aborted via spdk_accel_sequence_abort() which will
execute individual operations' callbacks and free any allocated
resources. To execute it, one must use spdk_accel_sequence_finish().
For now, each operation is executed one by one and is submitted to the
appropriate accel module. Executing multiple operations as a single one
will be added in the future.
Also, currently, only fill and copy operations can be appended to a
sequence. Support for more operations will be added in subsequent
patches.
Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Change-Id: Id35d093e14feb59b996f780ef77e000e10bfcd20
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/15529
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
2022-11-16 07:22:55 +00:00
|
|
|
/* Sequence tests require accel to be initialized normally, so run them before the other
|
|
|
|
* tests which register accel modules which aren't fully implemented, causing accel
|
|
|
|
* initialization to fail.
|
|
|
|
*/
|
|
|
|
seq_suite = CU_add_suite("accel_sequence", test_sequence_setup, test_sequence_cleanup);
|
|
|
|
CU_ADD_TEST(seq_suite, test_sequence_fill_copy);
|
|
|
|
CU_ADD_TEST(seq_suite, test_sequence_abort);
|
|
|
|
CU_ADD_TEST(seq_suite, test_sequence_append_error);
|
|
|
|
CU_ADD_TEST(seq_suite, test_sequence_completion_error);
|
2022-11-22 13:36:09 +00:00
|
|
|
#ifdef SPDK_CONFIG_ISAL /* accel_sw requires isa-l for compression */
|
|
|
|
CU_ADD_TEST(seq_suite, test_sequence_decompress);
|
2022-11-23 09:36:25 +00:00
|
|
|
CU_ADD_TEST(seq_suite, test_sequence_reverse);
|
2022-11-22 13:36:09 +00:00
|
|
|
#endif
|
2022-11-17 17:39:58 +00:00
|
|
|
CU_ADD_TEST(seq_suite, test_sequence_copy_elision);
|
2021-02-08 22:56:19 +00:00
|
|
|
|
accel: initial operation chaining support
This patch introduces the concept of chaining multiple accel operations
and executing them all at once in a single step. This means that it
will be possible to schedule accel operations at different layers of the
stack (e.g. copy in NVMe-oF transport, crypto in bdev_crypto), but
execute them all in a single place. Thanks to this, we can take
advantage of hardware accelerators that supports executing multiple
operations as a single operation (e.g. copy + crypto).
This operation group is called spdk_accel_sequence and operations can be
appended to that object via one of the spdk_accel_append_* functions.
New operations are always added at the end of a sequence. Users can
specify a callback to be notified when a particular operation in a
sequence is completed, but they don't receive the status of whether it
was successful or not. This is by design, as they shouldn't care about
the status of an individual operation and should rely on other means to
receive the status of the whole sequence. It's also important to note
that any intermediate steps within a sequence may not produce observable
results. For instance, appending a copy from A to B and then a copy
from B to C, it's indeterminate whether A's data will be in B after a
sequence is executed. It is only guaranteed that A's data will be in C.
A sequence can also be reversed using spdk_accel_sequence_reverse(),
meaning that the first operation becomes last and vice versa. It's
especially useful in read paths, as it makes it possible to build the
sequence during submission, then, once the data is read from storage,
reverse the sequence and execute it.
Finally, there are two ways to terminate a sequence: aborting or
executing. It can be aborted via spdk_accel_sequence_abort() which will
execute individual operations' callbacks and free any allocated
resources. To execute it, one must use spdk_accel_sequence_finish().
For now, each operation is executed one by one and is submitted to the
appropriate accel module. Executing multiple operations as a single one
will be added in the future.
Also, currently, only fill and copy operations can be appended to a
sequence. Support for more operations will be added in subsequent
patches.
Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Change-Id: Id35d093e14feb59b996f780ef77e000e10bfcd20
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/15529
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
2022-11-16 07:22:55 +00:00
|
|
|
suite = CU_add_suite("accel", test_setup, test_cleanup);
|
2021-02-09 22:37:09 +00:00
|
|
|
CU_ADD_TEST(suite, test_spdk_accel_task_complete);
|
2021-02-10 20:39:08 +00:00
|
|
|
CU_ADD_TEST(suite, test_get_task);
|
2021-02-19 03:54:35 +00:00
|
|
|
CU_ADD_TEST(suite, test_spdk_accel_submit_copy);
|
2021-08-25 06:10:12 +00:00
|
|
|
CU_ADD_TEST(suite, test_spdk_accel_submit_dualcast);
|
2021-08-25 15:26:46 +00:00
|
|
|
CU_ADD_TEST(suite, test_spdk_accel_submit_compare);
|
2021-09-13 23:36:22 +00:00
|
|
|
CU_ADD_TEST(suite, test_spdk_accel_submit_fill);
|
2021-09-13 23:57:15 +00:00
|
|
|
CU_ADD_TEST(suite, test_spdk_accel_submit_crc32c);
|
2021-09-14 22:06:47 +00:00
|
|
|
CU_ADD_TEST(suite, test_spdk_accel_submit_crc32cv);
|
2021-09-14 22:24:37 +00:00
|
|
|
CU_ADD_TEST(suite, test_spdk_accel_submit_copy_crc32c);
|
2022-08-05 19:25:33 +00:00
|
|
|
CU_ADD_TEST(suite, test_spdk_accel_module_find_by_name);
|
|
|
|
CU_ADD_TEST(suite, test_spdk_accel_module_register);
|
2021-02-08 22:56:19 +00:00
|
|
|
|
|
|
|
CU_basic_set_mode(CU_BRM_VERBOSE);
|
|
|
|
CU_basic_run_tests();
|
|
|
|
num_failures = CU_get_number_of_failures();
|
|
|
|
CU_cleanup_registry();
|
|
|
|
|
|
|
|
return num_failures;
|
|
|
|
}
|