bdev: accel sequence support for write requests

It is now possible to submit a write request with a sequence of accel
operations that need to be executed before actually writing the data.

Such requests will be directly passed to a bdev module (so that it can
append subsequent operations to an accel sequence) if that bdev supports
accel sequences and the request doesn't need to be split.  If either of
these conditions are not met, bdev layer will execute all the
accumulated accel operations before passing the request to a bdev
module.

The reason for not submitting split IOs with an accel sequence is that
we would need to split that accel sequence too.  Currently, there's no
such functionality in accel, so we treat this case in the same way as if
the underlying bdev module didn't support accel sequences (it's executed
before bdev_io is split).

Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Change-Id: I66c53b3a1a87a35ea2687292206c899f80aaed4a
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/16974
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Mellanox Build Bot
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
This commit is contained in:
Konrad Sztyber 2023-02-14 15:08:09 +01:00 committed by Ben Walker
parent 54a935a669
commit 22c0e97884
6 changed files with 186 additions and 9 deletions

View File

@ -13,6 +13,7 @@
#include "spdk/stdinc.h" #include "spdk/stdinc.h"
#include "spdk/accel.h"
#include "spdk/scsi_spec.h" #include "spdk/scsi_spec.h"
#include "spdk/nvme_spec.h" #include "spdk/nvme_spec.h"
#include "spdk/json.h" #include "spdk/json.h"
@ -221,8 +222,13 @@ struct spdk_bdev_ext_io_opts {
void *memory_domain_ctx; void *memory_domain_ctx;
/** Metadata buffer, optional */ /** Metadata buffer, optional */
void *metadata; void *metadata;
/**
* Sequence of accel operations to be executed before/after (depending on the IO type) the
* request is submitted.
*/
struct spdk_accel_sequence *accel_sequence;
} __attribute__((packed)); } __attribute__((packed));
SPDK_STATIC_ASSERT(sizeof(struct spdk_bdev_ext_io_opts) == 32, "Incorrect size"); SPDK_STATIC_ASSERT(sizeof(struct spdk_bdev_ext_io_opts) == 40, "Incorrect size");
/** /**
* Get the options for the bdev module. * Get the options for the bdev module.

View File

@ -800,6 +800,9 @@ struct spdk_bdev_io {
struct spdk_memory_domain *memory_domain; struct spdk_memory_domain *memory_domain;
void *memory_domain_ctx; void *memory_domain_ctx;
/* Sequence of accel operations */
struct spdk_accel_sequence *accel_sequence;
/** stored user callback in case we split the I/O and use a temporary callback */ /** stored user callback in case we split the I/O and use a temporary callback */
spdk_bdev_io_completion_cb stored_user_cb; spdk_bdev_io_completion_cb stored_user_cb;
@ -988,6 +991,9 @@ struct spdk_bdev_io {
struct spdk_memory_domain *memory_domain; struct spdk_memory_domain *memory_domain;
void *memory_domain_ctx; void *memory_domain_ctx;
/* Sequence of accel operations passed by the user */
struct spdk_accel_sequence *accel_sequence;
/** Data transfer completion callback */ /** Data transfer completion callback */
void (*data_transfer_cpl)(void *ctx, int rc); void (*data_transfer_cpl)(void *ctx, int rc);
} internal; } internal;

View File

@ -384,6 +384,7 @@ static int bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_i
struct iovec *iov, int iovcnt, void *md_buf, struct iovec *iov, int iovcnt, void *md_buf,
uint64_t offset_blocks, uint64_t num_blocks, uint64_t offset_blocks, uint64_t num_blocks,
struct spdk_memory_domain *domain, void *domain_ctx, struct spdk_memory_domain *domain, void *domain_ctx,
struct spdk_accel_sequence *seq,
spdk_bdev_io_completion_cb cb, void *cb_arg); spdk_bdev_io_completion_cb cb, void *cb_arg);
static int bdev_lock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch, static int bdev_lock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch,
@ -901,6 +902,12 @@ bdev_io_use_memory_domain(struct spdk_bdev_io *bdev_io)
return bdev_io->internal.memory_domain; return bdev_io->internal.memory_domain;
} }
static inline bool
bdev_io_use_accel_sequence(struct spdk_bdev_io *bdev_io)
{
return bdev_io->internal.accel_sequence;
}
void void
spdk_bdev_io_set_buf(struct spdk_bdev_io *bdev_io, void *buf, size_t len) spdk_bdev_io_set_buf(struct spdk_bdev_io *bdev_io, void *buf, size_t len)
{ {
@ -957,6 +964,52 @@ _are_iovs_aligned(struct iovec *iovs, int iovcnt, uint32_t alignment)
return true; return true;
} }
static inline bool
bdev_io_needs_sequence_exec(struct spdk_bdev_desc *desc, struct spdk_bdev_io *bdev_io)
{
if (!bdev_io_use_accel_sequence(bdev_io)) {
return false;
}
/* For now, we don't allow splitting IOs with an accel sequence and will treat them as if
* bdev module didn't support accel sequences */
return !desc->accel_sequence_supported[bdev_io->type] || bdev_io->internal.split;
}
static void
bdev_io_submit_sequence_cb(void *ctx, int status)
{
struct spdk_bdev_io *bdev_io = ctx;
bdev_io->u.bdev.accel_sequence = NULL;
bdev_io->internal.accel_sequence = NULL;
if (spdk_unlikely(status != 0)) {
SPDK_ERRLOG("Failed to execute accel sequence, status=%d\n", status);
bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
bdev_io_complete_unsubmitted(bdev_io);
return;
}
bdev_io_submit(bdev_io);
}
static void
bdev_io_exec_sequence(struct spdk_bdev_io *bdev_io, spdk_accel_completion_cb cb_fn)
{
int rc;
assert(bdev_io_needs_sequence_exec(bdev_io->internal.desc, bdev_io));
assert(bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE);
rc = spdk_accel_sequence_finish(bdev_io->internal.accel_sequence, cb_fn, bdev_io);
if (spdk_unlikely(rc != 0)) {
SPDK_ERRLOG("Failed to execute accel sequence, status=%d\n", rc);
bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
bdev_io_complete_unsubmitted(bdev_io);
}
}
static void static void
bdev_io_get_buf_complete(struct spdk_bdev_io *bdev_io, bool status) bdev_io_get_buf_complete(struct spdk_bdev_io *bdev_io, bool status)
{ {
@ -1031,6 +1084,8 @@ _bdev_io_set_md_buf(struct spdk_bdev_io *bdev_io)
void *buf; void *buf;
if (spdk_bdev_is_md_separate(bdev)) { if (spdk_bdev_is_md_separate(bdev)) {
assert(!bdev_io_use_accel_sequence(bdev_io));
buf = (char *)bdev_io->u.bdev.iovs[0].iov_base + bdev_io->u.bdev.iovs[0].iov_len; buf = (char *)bdev_io->u.bdev.iovs[0].iov_base + bdev_io->u.bdev.iovs[0].iov_len;
md_len = bdev_io->u.bdev.num_blocks * bdev->md_len; md_len = bdev_io->u.bdev.num_blocks * bdev->md_len;
@ -1066,6 +1121,7 @@ static void
_bdev_io_pull_bounce_data_buf(struct spdk_bdev_io *bdev_io, void *buf, size_t len, _bdev_io_pull_bounce_data_buf(struct spdk_bdev_io *bdev_io, void *buf, size_t len,
bdev_copy_bounce_buffer_cpl cpl_cb) bdev_copy_bounce_buffer_cpl cpl_cb)
{ {
struct spdk_bdev_channel *ch = bdev_io->internal.ch;
int rc = 0; int rc = 0;
bdev_io->internal.data_transfer_cpl = cpl_cb; bdev_io->internal.data_transfer_cpl = cpl_cb;
@ -1078,8 +1134,22 @@ _bdev_io_pull_bounce_data_buf(struct spdk_bdev_io *bdev_io, void *buf, size_t le
/* set bounce buffer for this operation */ /* set bounce buffer for this operation */
bdev_io->u.bdev.iovs[0].iov_base = buf; bdev_io->u.bdev.iovs[0].iov_base = buf;
bdev_io->u.bdev.iovs[0].iov_len = len; bdev_io->u.bdev.iovs[0].iov_len = len;
/* if this is write path, copy data from original buffer to bounce buffer */
if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { /* If we need to exec an accel sequence, append a copy operation making accel change the
* src/dst buffers of the previous operation */
if (bdev_io_needs_sequence_exec(bdev_io->internal.desc, bdev_io)) {
rc = spdk_accel_append_copy(&bdev_io->internal.accel_sequence, ch->accel_channel,
bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, NULL, NULL,
bdev_io->internal.orig_iovs,
bdev_io->internal.orig_iovcnt,
bdev_io->internal.memory_domain,
bdev_io->internal.memory_domain_ctx, 0, NULL, NULL);
if (spdk_unlikely(rc != 0)) {
SPDK_ERRLOG("Failed to append copy to accel sequence: %p\n",
bdev_io->internal.accel_sequence);
}
} else if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
/* if this is write path, copy data from original buffer to bounce buffer */
if (bdev_io_use_memory_domain(bdev_io)) { if (bdev_io_use_memory_domain(bdev_io)) {
rc = spdk_memory_domain_pull_data(bdev_io->internal.memory_domain, rc = spdk_memory_domain_pull_data(bdev_io->internal.memory_domain,
bdev_io->internal.memory_domain_ctx, bdev_io->internal.memory_domain_ctx,
@ -1174,6 +1244,14 @@ static inline void
bdev_submit_request(struct spdk_bdev *bdev, struct spdk_io_channel *ioch, bdev_submit_request(struct spdk_bdev *bdev, struct spdk_io_channel *ioch,
struct spdk_bdev_io *bdev_io) struct spdk_bdev_io *bdev_io)
{ {
/* After a request is submitted to a bdev module, the ownership of an accel sequence
* associated with that bdev_io is transferred to the bdev module. So, clear the internal
* sequence pointer to make sure we won't touch it anymore. */
if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE && bdev_io->u.bdev.accel_sequence != NULL) {
assert(!bdev_io_needs_sequence_exec(bdev_io->internal.desc, bdev_io));
bdev_io->internal.accel_sequence = NULL;
}
bdev->fn_table->submit_request(ioch, bdev_io); bdev->fn_table->submit_request(ioch, bdev_io);
} }
@ -1237,6 +1315,16 @@ _bdev_io_handle_no_mem(struct spdk_bdev_io *bdev_io)
*/ */
shared_resource->nomem_threshold = spdk_max((int64_t)shared_resource->io_outstanding / 2, shared_resource->nomem_threshold = spdk_max((int64_t)shared_resource->io_outstanding / 2,
(int64_t)shared_resource->io_outstanding - NOMEM_THRESHOLD_COUNT); (int64_t)shared_resource->io_outstanding - NOMEM_THRESHOLD_COUNT);
/* If bdev module completed an I/O that has an accel sequence with NOMEM status, the
* ownership of that sequence is transferred back to the bdev layer, so we need to
* restore internal.accel_sequence to make sure that the sequence is handled
* correctly in case the I/O is later aborted. */
if ((bdev_io->type == SPDK_BDEV_IO_TYPE_READ ||
bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) && bdev_io->u.bdev.accel_sequence) {
assert(bdev_io->internal.accel_sequence == NULL);
bdev_io->internal.accel_sequence = bdev_io->u.bdev.accel_sequence;
}
return true; return true;
} }
@ -1429,9 +1517,16 @@ _bdev_memory_domain_get_io_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *b
SPDK_ERRLOG("Failed to get data buffer, completing IO\n"); SPDK_ERRLOG("Failed to get data buffer, completing IO\n");
bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
bdev_io_complete_unsubmitted(bdev_io); bdev_io_complete_unsubmitted(bdev_io);
} else { return;
bdev_io_submit(bdev_io);
} }
if (bdev_io_needs_sequence_exec(bdev_io->internal.desc, bdev_io) &&
bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
bdev_io_exec_sequence(bdev_io, bdev_io_submit_sequence_cb);
return;
}
bdev_io_submit(bdev_io);
} }
static void static void
@ -2567,11 +2662,12 @@ bdev_io_split_submit(struct spdk_bdev_io *bdev_io, struct iovec *iov, int iovcnt
bdev_io_split_done, bdev_io); bdev_io_split_done, bdev_io);
break; break;
case SPDK_BDEV_IO_TYPE_WRITE: case SPDK_BDEV_IO_TYPE_WRITE:
assert(bdev_io->u.bdev.accel_sequence == NULL);
rc = bdev_writev_blocks_with_md(bdev_io->internal.desc, rc = bdev_writev_blocks_with_md(bdev_io->internal.desc,
spdk_io_channel_from_ctx(bdev_io->internal.ch), spdk_io_channel_from_ctx(bdev_io->internal.ch),
iov, iovcnt, md_buf, current_offset, iov, iovcnt, md_buf, current_offset,
num_blocks, bdev_io->internal.memory_domain, num_blocks, bdev_io->internal.memory_domain,
bdev_io->internal.memory_domain_ctx, bdev_io->internal.memory_domain_ctx, NULL,
bdev_io_split_done, bdev_io); bdev_io_split_done, bdev_io);
break; break;
case SPDK_BDEV_IO_TYPE_UNMAP: case SPDK_BDEV_IO_TYPE_UNMAP:
@ -3120,11 +3216,23 @@ _bdev_io_ext_use_bounce_buffer(struct spdk_bdev_io *bdev_io)
static inline void static inline void
_bdev_io_submit_ext(struct spdk_bdev_desc *desc, struct spdk_bdev_io *bdev_io) _bdev_io_submit_ext(struct spdk_bdev_desc *desc, struct spdk_bdev_io *bdev_io)
{ {
if (bdev_io->internal.memory_domain && !desc->memory_domains_supported) { bool needs_exec = bdev_io_needs_sequence_exec(desc, bdev_io);
/* We need to allocate bounce buffer if bdev doesn't support memory domains, or if it does
* support them, but we need to execute an accel sequence and the data buffer is from accel
* memory domain (to avoid doing a push/pull from that domain).
*/
if ((bdev_io->internal.memory_domain && !desc->memory_domains_supported) ||
(needs_exec && bdev_io->internal.memory_domain == spdk_accel_get_memory_domain())) {
_bdev_io_ext_use_bounce_buffer(bdev_io); _bdev_io_ext_use_bounce_buffer(bdev_io);
return; return;
} }
if (needs_exec && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
bdev_io_exec_sequence(bdev_io, bdev_io_submit_sequence_cb);
return;
}
bdev_io_submit(bdev_io); bdev_io_submit(bdev_io);
} }
@ -3165,6 +3273,7 @@ bdev_io_init(struct spdk_bdev_io *bdev_io,
bdev_io->internal.memory_domain_ctx = NULL; bdev_io->internal.memory_domain_ctx = NULL;
bdev_io->internal.data_transfer_cpl = NULL; bdev_io->internal.data_transfer_cpl = NULL;
bdev_io->internal.split = bdev_io_should_split(bdev_io); bdev_io->internal.split = bdev_io_should_split(bdev_io);
bdev_io->internal.accel_sequence = NULL;
} }
static bool static bool
@ -4673,6 +4782,9 @@ bdev_seek(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
bdev_io->internal.desc = desc; bdev_io->internal.desc = desc;
bdev_io->type = io_type; bdev_io->type = io_type;
bdev_io->u.bdev.offset_blocks = offset_blocks; bdev_io->u.bdev.offset_blocks = offset_blocks;
bdev_io->u.bdev.memory_domain = NULL;
bdev_io->u.bdev.memory_domain_ctx = NULL;
bdev_io->u.bdev.accel_sequence = NULL;
bdev_io_init(bdev_io, bdev, cb_arg, cb); bdev_io_init(bdev_io, bdev, cb_arg, cb);
if (!spdk_bdev_io_type_supported(bdev, io_type)) { if (!spdk_bdev_io_type_supported(bdev, io_type)) {
@ -4744,6 +4856,7 @@ bdev_read_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch
bdev_io->u.bdev.offset_blocks = offset_blocks; bdev_io->u.bdev.offset_blocks = offset_blocks;
bdev_io->u.bdev.memory_domain = NULL; bdev_io->u.bdev.memory_domain = NULL;
bdev_io->u.bdev.memory_domain_ctx = NULL; bdev_io->u.bdev.memory_domain_ctx = NULL;
bdev_io->u.bdev.accel_sequence = NULL;
bdev_io_init(bdev_io, bdev, cb_arg, cb); bdev_io_init(bdev_io, bdev, cb_arg, cb);
bdev_io_submit(bdev_io); bdev_io_submit(bdev_io);
@ -4842,6 +4955,7 @@ bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *c
bdev_io->internal.memory_domain_ctx = domain_ctx; bdev_io->internal.memory_domain_ctx = domain_ctx;
bdev_io->u.bdev.memory_domain = domain; bdev_io->u.bdev.memory_domain = domain;
bdev_io->u.bdev.memory_domain_ctx = domain_ctx; bdev_io->u.bdev.memory_domain_ctx = domain_ctx;
bdev_io->u.bdev.accel_sequence = NULL;
_bdev_io_submit_ext(desc, bdev_io); _bdev_io_submit_ext(desc, bdev_io);
@ -4915,6 +5029,10 @@ spdk_bdev_readv_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *
return -EINVAL; return -EINVAL;
} }
if (bdev_get_ext_io_opt(opts, accel_sequence, NULL) != NULL) {
return -ENOTSUP;
}
return bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, md, offset_blocks, return bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, md, offset_blocks,
num_blocks, num_blocks,
bdev_get_ext_io_opt(opts, memory_domain, NULL), bdev_get_ext_io_opt(opts, memory_domain, NULL),
@ -4956,6 +5074,7 @@ bdev_write_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *c
bdev_io->u.bdev.offset_blocks = offset_blocks; bdev_io->u.bdev.offset_blocks = offset_blocks;
bdev_io->u.bdev.memory_domain = NULL; bdev_io->u.bdev.memory_domain = NULL;
bdev_io->u.bdev.memory_domain_ctx = NULL; bdev_io->u.bdev.memory_domain_ctx = NULL;
bdev_io->u.bdev.accel_sequence = NULL;
bdev_io_init(bdev_io, bdev, cb_arg, cb); bdev_io_init(bdev_io, bdev, cb_arg, cb);
bdev_io_submit(bdev_io); bdev_io_submit(bdev_io);
@ -5012,6 +5131,7 @@ bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *
struct iovec *iov, int iovcnt, void *md_buf, struct iovec *iov, int iovcnt, void *md_buf,
uint64_t offset_blocks, uint64_t num_blocks, uint64_t offset_blocks, uint64_t num_blocks,
struct spdk_memory_domain *domain, void *domain_ctx, struct spdk_memory_domain *domain, void *domain_ctx,
struct spdk_accel_sequence *seq,
spdk_bdev_io_completion_cb cb, void *cb_arg) spdk_bdev_io_completion_cb cb, void *cb_arg)
{ {
struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc); struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
@ -5042,8 +5162,10 @@ bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *
bdev_io_init(bdev_io, bdev, cb_arg, cb); bdev_io_init(bdev_io, bdev, cb_arg, cb);
bdev_io->internal.memory_domain = domain; bdev_io->internal.memory_domain = domain;
bdev_io->internal.memory_domain_ctx = domain_ctx; bdev_io->internal.memory_domain_ctx = domain_ctx;
bdev_io->internal.accel_sequence = seq;
bdev_io->u.bdev.memory_domain = domain; bdev_io->u.bdev.memory_domain = domain;
bdev_io->u.bdev.memory_domain_ctx = domain_ctx; bdev_io->u.bdev.memory_domain_ctx = domain_ctx;
bdev_io->u.bdev.accel_sequence = seq;
_bdev_io_submit_ext(desc, bdev_io); _bdev_io_submit_ext(desc, bdev_io);
@ -5073,7 +5195,7 @@ spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
spdk_bdev_io_completion_cb cb, void *cb_arg) spdk_bdev_io_completion_cb cb, void *cb_arg)
{ {
return bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks, return bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks,
num_blocks, NULL, NULL, cb, cb_arg); num_blocks, NULL, NULL, NULL, cb, cb_arg);
} }
int int
@ -5091,7 +5213,7 @@ spdk_bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_chan
} }
return bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, md_buf, offset_blocks, return bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, md_buf, offset_blocks,
num_blocks, NULL, NULL, cb, cb_arg); num_blocks, NULL, NULL, NULL, cb, cb_arg);
} }
int int
@ -5121,6 +5243,7 @@ spdk_bdev_writev_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel
return bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, md, offset_blocks, num_blocks, return bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, md, offset_blocks, num_blocks,
bdev_get_ext_io_opt(opts, memory_domain, NULL), bdev_get_ext_io_opt(opts, memory_domain, NULL),
bdev_get_ext_io_opt(opts, memory_domain_ctx, NULL), bdev_get_ext_io_opt(opts, memory_domain_ctx, NULL),
bdev_get_ext_io_opt(opts, accel_sequence, NULL),
cb, cb_arg); cb, cb_arg);
} }
@ -5215,6 +5338,7 @@ bdev_comparev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel
bdev_io_init(bdev_io, bdev, cb_arg, cb); bdev_io_init(bdev_io, bdev, cb_arg, cb);
bdev_io->u.bdev.memory_domain = NULL; bdev_io->u.bdev.memory_domain = NULL;
bdev_io->u.bdev.memory_domain_ctx = NULL; bdev_io->u.bdev.memory_domain_ctx = NULL;
bdev_io->u.bdev.accel_sequence = NULL;
if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COMPARE)) { if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COMPARE)) {
bdev_io_submit(bdev_io); bdev_io_submit(bdev_io);
@ -5285,6 +5409,7 @@ bdev_compare_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel
bdev_io_init(bdev_io, bdev, cb_arg, cb); bdev_io_init(bdev_io, bdev, cb_arg, cb);
bdev_io->u.bdev.memory_domain = NULL; bdev_io->u.bdev.memory_domain = NULL;
bdev_io->u.bdev.memory_domain_ctx = NULL; bdev_io->u.bdev.memory_domain_ctx = NULL;
bdev_io->u.bdev.accel_sequence = NULL;
if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COMPARE)) { if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COMPARE)) {
bdev_io_submit(bdev_io); bdev_io_submit(bdev_io);
@ -5472,6 +5597,7 @@ spdk_bdev_comparev_and_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io
bdev_io_init(bdev_io, bdev, cb_arg, cb); bdev_io_init(bdev_io, bdev, cb_arg, cb);
bdev_io->u.bdev.memory_domain = NULL; bdev_io->u.bdev.memory_domain = NULL;
bdev_io->u.bdev.memory_domain_ctx = NULL; bdev_io->u.bdev.memory_domain_ctx = NULL;
bdev_io->u.bdev.accel_sequence = NULL;
if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE)) { if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE)) {
bdev_io_submit(bdev_io); bdev_io_submit(bdev_io);
@ -5524,6 +5650,7 @@ spdk_bdev_zcopy_start(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
bdev_io_init(bdev_io, bdev, cb_arg, cb); bdev_io_init(bdev_io, bdev, cb_arg, cb);
bdev_io->u.bdev.memory_domain = NULL; bdev_io->u.bdev.memory_domain = NULL;
bdev_io->u.bdev.memory_domain_ctx = NULL; bdev_io->u.bdev.memory_domain_ctx = NULL;
bdev_io->u.bdev.accel_sequence = NULL;
bdev_io_submit(bdev_io); bdev_io_submit(bdev_io);
@ -5600,6 +5727,7 @@ spdk_bdev_write_zeroes_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channe
bdev_io_init(bdev_io, bdev, cb_arg, cb); bdev_io_init(bdev_io, bdev, cb_arg, cb);
bdev_io->u.bdev.memory_domain = NULL; bdev_io->u.bdev.memory_domain = NULL;
bdev_io->u.bdev.memory_domain_ctx = NULL; bdev_io->u.bdev.memory_domain_ctx = NULL;
bdev_io->u.bdev.accel_sequence = NULL;
if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE_ZEROES)) { if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE_ZEROES)) {
bdev_io_submit(bdev_io); bdev_io_submit(bdev_io);
@ -5671,6 +5799,7 @@ spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
bdev_io_init(bdev_io, bdev, cb_arg, cb); bdev_io_init(bdev_io, bdev, cb_arg, cb);
bdev_io->u.bdev.memory_domain = NULL; bdev_io->u.bdev.memory_domain = NULL;
bdev_io->u.bdev.memory_domain_ctx = NULL; bdev_io->u.bdev.memory_domain_ctx = NULL;
bdev_io->u.bdev.accel_sequence = NULL;
bdev_io_submit(bdev_io); bdev_io_submit(bdev_io);
return 0; return 0;
@ -5720,6 +5849,9 @@ spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
bdev_io->u.bdev.iovcnt = 0; bdev_io->u.bdev.iovcnt = 0;
bdev_io->u.bdev.offset_blocks = offset_blocks; bdev_io->u.bdev.offset_blocks = offset_blocks;
bdev_io->u.bdev.num_blocks = num_blocks; bdev_io->u.bdev.num_blocks = num_blocks;
bdev_io->u.bdev.memory_domain = NULL;
bdev_io->u.bdev.memory_domain_ctx = NULL;
bdev_io->u.bdev.accel_sequence = NULL;
bdev_io_init(bdev_io, bdev, cb_arg, cb); bdev_io_init(bdev_io, bdev, cb_arg, cb);
bdev_io_submit(bdev_io); bdev_io_submit(bdev_io);
@ -6514,6 +6646,11 @@ _bdev_io_complete(void *ctx)
{ {
struct spdk_bdev_io *bdev_io = ctx; struct spdk_bdev_io *bdev_io = ctx;
if (spdk_unlikely(bdev_io->internal.accel_sequence != NULL)) {
assert(bdev_io->internal.status != SPDK_BDEV_IO_STATUS_SUCCESS);
spdk_accel_sequence_abort(bdev_io->internal.accel_sequence);
}
assert(bdev_io->internal.cb != NULL); assert(bdev_io->internal.cb != NULL);
assert(spdk_get_thread() == spdk_bdev_io_get_thread(bdev_io)); assert(spdk_get_thread() == spdk_bdev_io_get_thread(bdev_io));
@ -9250,6 +9387,7 @@ spdk_bdev_copy_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
bdev_io->u.bdev.iovs = NULL; bdev_io->u.bdev.iovs = NULL;
bdev_io->u.bdev.iovcnt = 0; bdev_io->u.bdev.iovcnt = 0;
bdev_io->u.bdev.md_buf = NULL; bdev_io->u.bdev.md_buf = NULL;
bdev_io->u.bdev.accel_sequence = NULL;
bdev_io_init(bdev_io, bdev, cb_arg, cb); bdev_io_init(bdev_io, bdev, cb_arg, cb);
if (dst_offset_blocks == src_offset_blocks) { if (dst_offset_blocks == src_offset_blocks) {

View File

@ -21,6 +21,15 @@ DEFINE_STUB(spdk_memory_domain_get_dma_device_id, const char *, (struct spdk_mem
"test_domain"); "test_domain");
DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type, DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type,
(struct spdk_memory_domain *domain), 0); (struct spdk_memory_domain *domain), 0);
DEFINE_STUB(spdk_accel_sequence_finish, int,
(struct spdk_accel_sequence *seq, spdk_accel_completion_cb cb_fn, void *cb_arg), 0);
DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq));
DEFINE_STUB(spdk_accel_append_copy, int,
(struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, struct iovec *dst_iovs,
uint32_t dst_iovcnt, struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
struct iovec *src_iovs, uint32_t src_iovcnt, struct spdk_memory_domain *src_domain,
void *src_domain_ctx, int flags, spdk_accel_step_cb cb_fn, void *cb_arg), 0);
DEFINE_STUB(spdk_accel_get_memory_domain, struct spdk_memory_domain *, (void), NULL);
static bool g_memory_domain_pull_data_called; static bool g_memory_domain_pull_data_called;
static bool g_memory_domain_push_data_called; static bool g_memory_domain_push_data_called;

View File

@ -25,6 +25,15 @@ DEFINE_STUB(spdk_memory_domain_get_dma_device_id, const char *, (struct spdk_mem
"test_domain"); "test_domain");
DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type, DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type,
(struct spdk_memory_domain *domain), 0); (struct spdk_memory_domain *domain), 0);
DEFINE_STUB(spdk_accel_sequence_finish, int,
(struct spdk_accel_sequence *seq, spdk_accel_completion_cb cb_fn, void *cb_arg), 0);
DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq));
DEFINE_STUB(spdk_accel_append_copy, int,
(struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, struct iovec *dst_iovs,
uint32_t dst_iovcnt, struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
struct iovec *src_iovs, uint32_t src_iovcnt, struct spdk_memory_domain *src_domain,
void *src_domain_ctx, int flags, spdk_accel_step_cb cb_fn, void *cb_arg), 0);
DEFINE_STUB(spdk_accel_get_memory_domain, struct spdk_memory_domain *, (void), NULL);
DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int); DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int);
int int

View File

@ -34,6 +34,15 @@ DEFINE_STUB(spdk_memory_domain_get_dma_device_id, const char *, (struct spdk_mem
"test_domain"); "test_domain");
DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type, DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type,
(struct spdk_memory_domain *domain), 0); (struct spdk_memory_domain *domain), 0);
DEFINE_STUB(spdk_accel_sequence_finish, int,
(struct spdk_accel_sequence *seq, spdk_accel_completion_cb cb_fn, void *cb_arg), 0);
DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq));
DEFINE_STUB(spdk_accel_append_copy, int,
(struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, struct iovec *dst_iovs,
uint32_t dst_iovcnt, struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
struct iovec *src_iovs, uint32_t src_iovcnt, struct spdk_memory_domain *src_domain,
void *src_domain_ctx, int flags, spdk_accel_step_cb cb_fn, void *cb_arg), 0);
DEFINE_STUB(spdk_accel_get_memory_domain, struct spdk_memory_domain *, (void), NULL);
DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int); DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int);
int int