accel: allow modules to report memory domain support

Accel modules can now implement the get_memory_domains() callback to
indicate the types of memory domains they support.  If unimplemented, a
module is assumed not to support memory domains and accel will take care
of pulling/pushing data to local buffers prior to passing a task to be
executed by a module.

For now, similarly to the bdev layer, we only check if a module supports
memory domains, but we don't verify the types of the domains.  That
could be easily added in the future, if necessary.

Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Change-Id: Ia513f4f31124672b705b6dd33a2624f0ae94d3ce
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/16027
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Konrad Sztyber 2022-12-16 10:47:35 +01:00 committed by Jim Harris
parent a6fef9b194
commit 3de19b0b55
3 changed files with 218 additions and 8 deletions

View File

@ -141,6 +141,18 @@ struct spdk_accel_module_if {
int (*crypto_key_init)(struct spdk_accel_crypto_key *key);
void (*crypto_key_deinit)(struct spdk_accel_crypto_key *key);
/**
* Returns memory domains supported by the module. If NULL, the module does not support
* memory domains. The `domains` array can be NULL, in which case this function only
* returns the number of supported memory domains.
*
* \param domains Memory domain array.
* \param num_domains Size of the `domains` array.
*
* \return Number of supported memory domains.
*/
int (*get_memory_domains)(struct spdk_memory_domain **domains, int num_domains);
TAILQ_ENTRY(spdk_accel_module_if) tailq;
};

View File

@ -37,6 +37,7 @@
struct accel_module {
struct spdk_accel_module_if *module;
bool supports_memory_domains;
};
/* Largest context size for all accel modules */
@ -1240,6 +1241,7 @@ accel_task_pull_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *ta
assert(task->bounce.s.orig_iovs != NULL);
assert(task->bounce.s.orig_domain != NULL);
assert(task->bounce.s.orig_domain != g_accel_domain);
assert(!g_modules_opc[task->op_code].supports_memory_domains);
rc = spdk_memory_domain_pull_data(task->bounce.s.orig_domain,
task->bounce.s.orig_domain_ctx,
@ -1276,6 +1278,7 @@ accel_task_push_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *ta
assert(task->bounce.d.orig_iovs != NULL);
assert(task->bounce.d.orig_domain != NULL);
assert(task->bounce.d.orig_domain != g_accel_domain);
assert(!g_modules_opc[task->op_code].supports_memory_domains);
rc = spdk_memory_domain_push_data(task->bounce.d.orig_domain,
task->bounce.d.orig_domain_ctx,
@ -1321,8 +1324,13 @@ accel_process_sequence(struct spdk_accel_sequence *seq)
accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
/* Fall through */
case ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF:
/* If a module supports memory domains, we don't need to allocate bounce
* buffers */
if (g_modules_opc[task->op_code].supports_memory_domains) {
accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
break;
}
accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
/* For now, assume that none of the modules support memory domains */
rc = accel_sequence_check_bouncebuf(seq, task);
if (rc != 0) {
/* We couldn't allocate a buffer, wait until one is available */
@ -1342,9 +1350,6 @@ accel_process_sequence(struct spdk_accel_sequence *seq)
SPDK_DEBUGLOG(accel, "Executing %s operation, sequence: %p\n",
g_opcode_strings[task->op_code], seq);
assert(task->src_domain == NULL);
assert(task->dst_domain == NULL);
module = g_modules_opc[task->op_code].module;
module_ch = accel_ch->module_ch[task->op_code];
@ -1908,6 +1913,17 @@ accel_module_initialize(void)
}
}
static void
accel_module_init_opcode(enum accel_opcode opcode)
{
struct accel_module *module = &g_modules_opc[opcode];
struct spdk_accel_module_if *module_if = module->module;
if (module_if->get_memory_domains != NULL) {
module->supports_memory_domains = module_if->get_memory_domains(NULL, 0) > 0;
}
}
int
spdk_accel_initialize(void)
{
@ -1962,14 +1978,15 @@ spdk_accel_initialize(void)
if (g_modules_opc[ACCEL_OPC_ENCRYPT].module != g_modules_opc[ACCEL_OPC_DECRYPT].module) {
SPDK_ERRLOG("Different accel modules are assigned to encrypt and decrypt operations");
return -EINVAL;
rc = -EINVAL;
goto error;
}
#ifdef DEBUG
for (op = 0; op < ACCEL_OPC_LAST; op++) {
assert(g_modules_opc[op].module != NULL);
accel_module_init_opcode(op);
}
#endif
rc = spdk_iobuf_register_module("accel");
if (rc != 0) {
SPDK_ERRLOG("Failed to register accel iobuf module\n");

View File

@ -2280,6 +2280,7 @@ test_sequence_memory_domain(void)
/* Override the submit_tasks function */
g_module_if.submit_tasks = ut_sequnce_submit_tasks;
g_module.supports_memory_domains = false;
for (i = 0; i < ACCEL_OPC_LAST; ++i) {
modules[i] = g_modules_opc[i];
g_modules_opc[i] = g_module;
@ -2614,6 +2615,185 @@ test_sequence_memory_domain(void)
poll_threads();
}
static int
ut_submit_decompress_memory_domain(struct spdk_io_channel *ch, struct spdk_accel_task *task)
{
struct ut_domain_ctx *ctx;
struct iovec *src_iovs, *dst_iovs;
uint32_t src_iovcnt, dst_iovcnt;
src_iovs = task->s.iovs;
dst_iovs = task->d.iovs;
src_iovcnt = task->s.iovcnt;
dst_iovcnt = task->d.iovcnt;
if (task->src_domain != NULL) {
ctx = task->src_domain_ctx;
CU_ASSERT_EQUAL(memcmp(task->s.iovs, &ctx->expected, sizeof(struct iovec)), 0);
src_iovs = &ctx->iov;
src_iovcnt = 1;
}
if (task->dst_domain != NULL) {
ctx = task->dst_domain_ctx;
CU_ASSERT_EQUAL(memcmp(task->d.iovs, &ctx->expected, sizeof(struct iovec)), 0);
dst_iovs = &ctx->iov;
dst_iovcnt = 1;
}
spdk_iovcpy(src_iovs, src_iovcnt, dst_iovs, dst_iovcnt);
spdk_accel_task_complete(task, 0);
return 0;
}
static void
test_sequence_module_memory_domain(void)
{
struct spdk_accel_sequence *seq = NULL;
struct spdk_io_channel *ioch;
struct accel_module modules[ACCEL_OPC_LAST];
struct spdk_memory_domain *accel_domain;
struct ut_sequence ut_seq;
struct ut_domain_ctx domctx[2];
struct iovec src_iovs[2], dst_iovs[2];
void *buf, *accel_domain_ctx;
char srcbuf[4096], dstbuf[4096], tmp[4096], expected[4096];
int i, rc, completed;
ioch = spdk_accel_get_io_channel();
SPDK_CU_ASSERT_FATAL(ioch != NULL);
/* Override the submit_tasks function */
g_module_if.submit_tasks = ut_sequnce_submit_tasks;
g_module.supports_memory_domains = true;
for (i = 0; i < ACCEL_OPC_LAST; ++i) {
modules[i] = g_modules_opc[i];
g_modules_opc[i] = g_module;
}
g_seq_operations[ACCEL_OPC_DECOMPRESS].submit = ut_submit_decompress_memory_domain;
g_seq_operations[ACCEL_OPC_FILL].submit = sw_accel_submit_tasks;
/* Check a sequence with both buffers in memory domains */
memset(srcbuf, 0xa5, sizeof(srcbuf));
memset(expected, 0xa5, sizeof(expected));
memset(dstbuf, 0, sizeof(dstbuf));
seq = NULL;
completed = 0;
src_iovs[0].iov_base = (void *)0xcafebabe;
src_iovs[0].iov_len = sizeof(srcbuf);
dst_iovs[0].iov_base = (void *)0xfeedbeef;
dst_iovs[0].iov_len = sizeof(dstbuf);
ut_domain_ctx_init(&domctx[0], dstbuf, sizeof(dstbuf), &dst_iovs[0]);
ut_domain_ctx_init(&domctx[1], srcbuf, sizeof(srcbuf), &src_iovs[0]);
rc = spdk_accel_append_decompress(&seq, ioch, &dst_iovs[0], 1, g_ut_domain, &domctx[0],
&src_iovs[0], 1, g_ut_domain, &domctx[1], 0,
ut_sequence_step_cb, &completed);
CU_ASSERT_EQUAL(rc, 0);
ut_seq.complete = false;
rc = spdk_accel_sequence_finish(seq, ut_sequence_complete_cb, &ut_seq);
CU_ASSERT_EQUAL(rc, 0);
poll_threads();
CU_ASSERT_EQUAL(completed, 1);
CU_ASSERT(ut_seq.complete);
CU_ASSERT_EQUAL(ut_seq.status, 0);
CU_ASSERT_EQUAL(memcmp(dstbuf, expected, 4096), 0);
/* Check two operations each with a single buffer in memory domain */
memset(srcbuf, 0x5a, sizeof(srcbuf));
memset(expected, 0x5a, sizeof(expected));
memset(dstbuf, 0, sizeof(dstbuf));
memset(tmp, 0, sizeof(tmp));
seq = NULL;
completed = 0;
src_iovs[0].iov_base = srcbuf;
src_iovs[0].iov_len = sizeof(srcbuf);
dst_iovs[0].iov_base = (void *)0xfeedbeef;
dst_iovs[0].iov_len = sizeof(tmp);
ut_domain_ctx_init(&domctx[0], tmp, sizeof(tmp), &dst_iovs[0]);
rc = spdk_accel_append_decompress(&seq, ioch, &dst_iovs[0], 1, g_ut_domain, &domctx[0],
&src_iovs[0], 1, NULL, NULL, 0,
ut_sequence_step_cb, &completed);
CU_ASSERT_EQUAL(rc, 0);
src_iovs[1].iov_base = (void *)0xfeedbeef;
src_iovs[1].iov_len = sizeof(tmp);
dst_iovs[1].iov_base = dstbuf;
dst_iovs[1].iov_len = sizeof(dstbuf);
ut_domain_ctx_init(&domctx[1], tmp, sizeof(tmp), &src_iovs[1]);
rc = spdk_accel_append_decompress(&seq, ioch, &dst_iovs[1], 1, NULL, NULL,
&src_iovs[1], 1, g_ut_domain, &domctx[1], 0,
ut_sequence_step_cb, &completed);
CU_ASSERT_EQUAL(rc, 0);
ut_seq.complete = false;
rc = spdk_accel_sequence_finish(seq, ut_sequence_complete_cb, &ut_seq);
CU_ASSERT_EQUAL(rc, 0);
poll_threads();
CU_ASSERT_EQUAL(completed, 2);
CU_ASSERT(ut_seq.complete);
CU_ASSERT_EQUAL(ut_seq.status, 0);
CU_ASSERT_EQUAL(memcmp(dstbuf, expected, 4096), 0);
/* Check a sequence with an accel buffer and a buffer in a regular memory domain */
memset(expected, 0xa5, sizeof(expected));
memset(dstbuf, 0, sizeof(dstbuf));
memset(tmp, 0, sizeof(tmp));
seq = NULL;
completed = 0;
rc = spdk_accel_get_buf(ioch, 4096, &buf, &accel_domain, &accel_domain_ctx);
CU_ASSERT_EQUAL(rc, 0);
rc = spdk_accel_append_fill(&seq, ioch, buf, 4096, accel_domain, accel_domain_ctx,
0xa5, 0, ut_sequence_step_cb, &completed);
CU_ASSERT_EQUAL(rc, 0);
src_iovs[0].iov_base = buf;
src_iovs[0].iov_len = 4096;
dst_iovs[0].iov_base = (void *)0xfeedbeef;
dst_iovs[0].iov_len = sizeof(dstbuf);
ut_domain_ctx_init(&domctx[0], dstbuf, sizeof(dstbuf), &dst_iovs[0]);
rc = spdk_accel_append_decompress(&seq, ioch, &dst_iovs[0], 1, g_ut_domain, &domctx[0],
&src_iovs[0], 1, accel_domain, accel_domain_ctx, 0,
ut_sequence_step_cb, &completed);
CU_ASSERT_EQUAL(rc, 0);
ut_seq.complete = false;
rc = spdk_accel_sequence_finish(seq, ut_sequence_complete_cb, &ut_seq);
CU_ASSERT_EQUAL(rc, 0);
poll_threads();
CU_ASSERT_EQUAL(completed, 2);
CU_ASSERT(ut_seq.complete);
CU_ASSERT_EQUAL(ut_seq.status, 0);
CU_ASSERT_EQUAL(memcmp(dstbuf, expected, 4096), 0);
spdk_accel_put_buf(ioch, buf, accel_domain, accel_domain_ctx);
g_module.supports_memory_domains = false;
g_seq_operations[ACCEL_OPC_DECOMPRESS].submit = NULL;
g_seq_operations[ACCEL_OPC_FILL].submit = NULL;
for (i = 0; i < ACCEL_OPC_LAST; ++i) {
g_modules_opc[i] = modules[i];
}
spdk_put_io_channel(ioch);
poll_threads();
}
static int
test_sequence_setup(void)
{
@ -2694,6 +2874,7 @@ main(int argc, char **argv)
CU_ADD_TEST(seq_suite, test_sequence_copy_elision);
CU_ADD_TEST(seq_suite, test_sequence_accel_buffers);
CU_ADD_TEST(seq_suite, test_sequence_memory_domain);
CU_ADD_TEST(seq_suite, test_sequence_module_memory_domain);
suite = CU_add_suite("accel", test_setup, test_cleanup);
CU_ADD_TEST(suite, test_spdk_accel_task_complete);