bdev: remove spdk_bdev_ext_io_opts from spdk_bdev_io

The spdk_bdev_ext_io_opts structure is used to pass extra options when
submitting a bdev IO request, without having to modify/add functions to
handle new options.  Additionally, the structure has a size field to
allow adding new fields without breaking the ABI (and thus having to
bump up the major version of a library).

It is also a part of spdk_bdev_io and there are several reasons for
removing it from that structure:

  1. The size field only makes sense in structures that are passed
     through pointers.  And spdk_bdev_ext_io_opts is indeed passed as a
     pointer to spdk_bdev_{readv,writev}_blocks_ext(), however it is
     also embedded in spdk_bdev_io (internal.ext_opts_copy), which is
     also part of the API.  It means that each time a new field is added
     to spdk_bdev_ext_io_opts, the size of spdk_bdev_io will also
     change, so we will need to bump the major version of libspdk_bdev
     anyway, thus making spdk_bdev_ext_io_opts.size useless.
  2. The size field also makes internal.ext_opts cumbersome to use, as
     each time one of its fields is accessed, we need to check the size.
     Currently the code doesn't do that, because all of the existing
     spdk_bdev_ext_io_opts fields were present when this structure was
     initially introduced, but we'd need to do check the size before
     accessing any new fields.
  3. spdk_bdev_ext_io_opts has a metadata field, while spdk_bdev_io
     already has u.bdev.md_buf, which means that we store the same thing
     in several different places in spdk_bdev_io (u.bdev.md_buf,
     u.bdev.ext_opts->metadata, internal.ext_opts->metadata).

Therefore, this patch removes all references to spdk_bdev_ext_io_opts
from spdk_bdev_io and replaces them with fields (memory_domain,
memory_domain_ctx) that were missing in spdk_bdev_io.  Unfortunately,
this change breaks the API and requires changes in bdev modules that
supported spdk_bdev_io.u.bdev.ext_opts.

Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Change-Id: I49b7524eb84d1d4d7f12b7ab025fec36da1ee01f
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/16773
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Konrad Sztyber 2023-02-10 16:22:51 +01:00 committed by Tomasz Zawadzki
parent c30dfbc2d7
commit 55f9479333
18 changed files with 303 additions and 421 deletions

View File

@ -208,7 +208,6 @@ SPDK_STATIC_ASSERT(sizeof(struct spdk_bdev_opts) == 32, "Incorrect size");
/** /**
* Structure with optional IO request parameters * Structure with optional IO request parameters
* The content of this structure must be valid until the IO request is completed
*/ */
struct spdk_bdev_ext_io_opts { struct spdk_bdev_ext_io_opts {
/** Size of this structure in bytes */ /** Size of this structure in bytes */

View File

@ -790,8 +790,9 @@ struct spdk_bdev_io {
/** Starting offset (in blocks) of the bdev for this I/O. */ /** Starting offset (in blocks) of the bdev for this I/O. */
uint64_t offset_blocks; uint64_t offset_blocks;
/** Pointer to user's ext opts to be used by bdev modules */ /** Memory domain and its context to be used by bdev modules */
struct spdk_bdev_ext_io_opts *ext_opts; struct spdk_memory_domain *memory_domain;
void *memory_domain_ctx;
/** stored user callback in case we split the I/O and use a temporary callback */ /** stored user callback in case we split the I/O and use a temporary callback */
spdk_bdev_io_completion_cb stored_user_cb; spdk_bdev_io_completion_cb stored_user_cb;
@ -974,11 +975,9 @@ struct spdk_bdev_io {
/** Enables queuing parent I/O when no bdev_ios available for split children. */ /** Enables queuing parent I/O when no bdev_ios available for split children. */
struct spdk_bdev_io_wait_entry waitq_entry; struct spdk_bdev_io_wait_entry waitq_entry;
/** Pointer to a structure passed by the user in ext API */ /** Memory domain and its context passed by the user in ext API */
struct spdk_bdev_ext_io_opts *ext_opts; struct spdk_memory_domain *memory_domain;
void *memory_domain_ctx;
/** Copy of user's opts, used when I/O is split */
struct spdk_bdev_ext_io_opts ext_opts_copy;
/** Data transfer completion callback */ /** Data transfer completion callback */
void (*data_transfer_cpl)(void *ctx, int rc); void (*data_transfer_cpl)(void *ctx, int rc);

View File

@ -370,13 +370,14 @@ static void bdev_enable_qos_done(struct spdk_bdev *bdev, void *_ctx, int status)
static int bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, static int bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
struct iovec *iov, int iovcnt, void *md_buf, uint64_t offset_blocks, struct iovec *iov, int iovcnt, void *md_buf, uint64_t offset_blocks,
uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg, uint64_t num_blocks,
struct spdk_bdev_ext_io_opts *opts, bool copy_opts); struct spdk_memory_domain *domain, void *domain_ctx,
spdk_bdev_io_completion_cb cb, void *cb_arg);
static int bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, static int bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
struct iovec *iov, int iovcnt, void *md_buf, struct iovec *iov, int iovcnt, void *md_buf,
uint64_t offset_blocks, uint64_t num_blocks, uint64_t offset_blocks, uint64_t num_blocks,
spdk_bdev_io_completion_cb cb, void *cb_arg, struct spdk_memory_domain *domain, void *domain_ctx,
struct spdk_bdev_ext_io_opts *opts, bool copy_opts); spdk_bdev_io_completion_cb cb, void *cb_arg);
static int bdev_lock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch, static int bdev_lock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch,
uint64_t offset, uint64_t length, uint64_t offset, uint64_t length,
@ -395,6 +396,10 @@ static bool claim_type_is_v2(enum spdk_bdev_claim_type type);
static void bdev_desc_release_claims(struct spdk_bdev_desc *desc); static void bdev_desc_release_claims(struct spdk_bdev_desc *desc);
static void claim_reset(struct spdk_bdev *bdev); static void claim_reset(struct spdk_bdev *bdev);
#define bdev_get_ext_io_opt(opts, field, defval) \
(((opts) != NULL && offsetof(struct spdk_bdev_ext_io_opts, field) + \
sizeof((opts)->field) <= sizeof(*(opts))) ? (opts)->field : (defval))
void void
spdk_bdev_get_opts(struct spdk_bdev_opts *opts, size_t opts_size) spdk_bdev_get_opts(struct spdk_bdev_opts *opts, size_t opts_size)
{ {
@ -888,7 +893,7 @@ spdk_bdev_next_leaf(struct spdk_bdev *prev)
static inline bool static inline bool
bdev_io_use_memory_domain(struct spdk_bdev_io *bdev_io) bdev_io_use_memory_domain(struct spdk_bdev_io *bdev_io)
{ {
return bdev_io->internal.ext_opts && bdev_io->internal.ext_opts->memory_domain; return bdev_io->internal.memory_domain;
} }
void void
@ -992,8 +997,8 @@ _bdev_io_pull_bounce_md_buf(struct spdk_bdev_io *bdev_io, void *md_buf, size_t l
if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
if (bdev_io_use_memory_domain(bdev_io)) { if (bdev_io_use_memory_domain(bdev_io)) {
rc = spdk_memory_domain_pull_data(bdev_io->internal.ext_opts->memory_domain, rc = spdk_memory_domain_pull_data(bdev_io->internal.memory_domain,
bdev_io->internal.ext_opts->memory_domain_ctx, bdev_io->internal.memory_domain_ctx,
&bdev_io->internal.orig_md_iov, 1, &bdev_io->internal.orig_md_iov, 1,
&bdev_io->internal.bounce_md_iov, 1, &bdev_io->internal.bounce_md_iov, 1,
bdev_io->internal.data_transfer_cpl, bdev_io->internal.data_transfer_cpl,
@ -1003,7 +1008,7 @@ _bdev_io_pull_bounce_md_buf(struct spdk_bdev_io *bdev_io, void *md_buf, size_t l
return; return;
} }
SPDK_ERRLOG("Failed to pull data from memory domain %s, rc %d\n", SPDK_ERRLOG("Failed to pull data from memory domain %s, rc %d\n",
spdk_memory_domain_get_dma_device_id(bdev_io->internal.ext_opts->memory_domain), rc); spdk_memory_domain_get_dma_device_id(bdev_io->internal.memory_domain), rc);
} else { } else {
memcpy(md_buf, bdev_io->internal.orig_md_iov.iov_base, bdev_io->internal.orig_md_iov.iov_len); memcpy(md_buf, bdev_io->internal.orig_md_iov.iov_base, bdev_io->internal.orig_md_iov.iov_len);
} }
@ -1071,8 +1076,8 @@ _bdev_io_pull_bounce_data_buf(struct spdk_bdev_io *bdev_io, void *buf, size_t le
/* if this is write path, copy data from original buffer to bounce buffer */ /* if this is write path, copy data from original buffer to bounce buffer */
if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
if (bdev_io_use_memory_domain(bdev_io)) { if (bdev_io_use_memory_domain(bdev_io)) {
rc = spdk_memory_domain_pull_data(bdev_io->internal.ext_opts->memory_domain, rc = spdk_memory_domain_pull_data(bdev_io->internal.memory_domain,
bdev_io->internal.ext_opts->memory_domain_ctx, bdev_io->internal.memory_domain_ctx,
bdev_io->internal.orig_iovs, bdev_io->internal.orig_iovs,
(uint32_t) bdev_io->internal.orig_iovcnt, (uint32_t) bdev_io->internal.orig_iovcnt,
bdev_io->u.bdev.iovs, 1, bdev_io->u.bdev.iovs, 1,
@ -1083,7 +1088,7 @@ _bdev_io_pull_bounce_data_buf(struct spdk_bdev_io *bdev_io, void *buf, size_t le
return; return;
} }
SPDK_ERRLOG("Failed to pull data from memory domain %s\n", SPDK_ERRLOG("Failed to pull data from memory domain %s\n",
spdk_memory_domain_get_dma_device_id(bdev_io->internal.ext_opts->memory_domain)); spdk_memory_domain_get_dma_device_id(bdev_io->internal.memory_domain));
} else { } else {
spdk_copy_iovs_to_buf(buf, len, bdev_io->internal.orig_iovs, bdev_io->internal.orig_iovcnt); spdk_copy_iovs_to_buf(buf, len, bdev_io->internal.orig_iovs, bdev_io->internal.orig_iovcnt);
} }
@ -1267,8 +1272,8 @@ _bdev_io_push_bounce_md_buffer(struct spdk_bdev_io *bdev_io)
bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS) { bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS) {
if (bdev_io_use_memory_domain(bdev_io)) { if (bdev_io_use_memory_domain(bdev_io)) {
/* If memory domain is used then we need to call async push function */ /* If memory domain is used then we need to call async push function */
rc = spdk_memory_domain_push_data(bdev_io->internal.ext_opts->memory_domain, rc = spdk_memory_domain_push_data(bdev_io->internal.memory_domain,
bdev_io->internal.ext_opts->memory_domain_ctx, bdev_io->internal.memory_domain_ctx,
&bdev_io->internal.orig_md_iov, &bdev_io->internal.orig_md_iov,
(uint32_t)bdev_io->internal.orig_iovcnt, (uint32_t)bdev_io->internal.orig_iovcnt,
&bdev_io->internal.bounce_md_iov, 1, &bdev_io->internal.bounce_md_iov, 1,
@ -1279,7 +1284,7 @@ _bdev_io_push_bounce_md_buffer(struct spdk_bdev_io *bdev_io)
return; return;
} }
SPDK_ERRLOG("Failed to push md to memory domain %s\n", SPDK_ERRLOG("Failed to push md to memory domain %s\n",
spdk_memory_domain_get_dma_device_id(bdev_io->internal.ext_opts->memory_domain)); spdk_memory_domain_get_dma_device_id(bdev_io->internal.memory_domain));
} else { } else {
memcpy(bdev_io->internal.orig_md_iov.iov_base, bdev_io->u.bdev.md_buf, memcpy(bdev_io->internal.orig_md_iov.iov_base, bdev_io->u.bdev.md_buf,
bdev_io->internal.orig_md_iov.iov_len); bdev_io->internal.orig_md_iov.iov_len);
@ -1325,8 +1330,8 @@ _bdev_io_push_bounce_data_buffer(struct spdk_bdev_io *bdev_io, bdev_copy_bounce_
bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS) { bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS) {
if (bdev_io_use_memory_domain(bdev_io)) { if (bdev_io_use_memory_domain(bdev_io)) {
/* If memory domain is used then we need to call async push function */ /* If memory domain is used then we need to call async push function */
rc = spdk_memory_domain_push_data(bdev_io->internal.ext_opts->memory_domain, rc = spdk_memory_domain_push_data(bdev_io->internal.memory_domain,
bdev_io->internal.ext_opts->memory_domain_ctx, bdev_io->internal.memory_domain_ctx,
bdev_io->internal.orig_iovs, bdev_io->internal.orig_iovs,
(uint32_t)bdev_io->internal.orig_iovcnt, (uint32_t)bdev_io->internal.orig_iovcnt,
&bdev_io->internal.bounce_iov, 1, &bdev_io->internal.bounce_iov, 1,
@ -1337,7 +1342,7 @@ _bdev_io_push_bounce_data_buffer(struct spdk_bdev_io *bdev_io, bdev_copy_bounce_
return; return;
} }
SPDK_ERRLOG("Failed to push data to memory domain %s\n", SPDK_ERRLOG("Failed to push data to memory domain %s\n",
spdk_memory_domain_get_dma_device_id(bdev_io->internal.ext_opts->memory_domain)); spdk_memory_domain_get_dma_device_id(bdev_io->internal.memory_domain));
} else { } else {
spdk_copy_buf_to_iovs(bdev_io->internal.orig_iovs, spdk_copy_buf_to_iovs(bdev_io->internal.orig_iovs,
bdev_io->internal.orig_iovcnt, bdev_io->internal.orig_iovcnt,
@ -2544,17 +2549,17 @@ bdev_io_split_submit(struct spdk_bdev_io *bdev_io, struct iovec *iov, int iovcnt
rc = bdev_readv_blocks_with_md(bdev_io->internal.desc, rc = bdev_readv_blocks_with_md(bdev_io->internal.desc,
spdk_io_channel_from_ctx(bdev_io->internal.ch), spdk_io_channel_from_ctx(bdev_io->internal.ch),
iov, iovcnt, md_buf, current_offset, iov, iovcnt, md_buf, current_offset,
num_blocks, num_blocks, bdev_io->internal.memory_domain,
bdev_io_split_done, bdev_io, bdev_io->internal.memory_domain_ctx,
bdev_io->internal.ext_opts, true); bdev_io_split_done, bdev_io);
break; break;
case SPDK_BDEV_IO_TYPE_WRITE: case SPDK_BDEV_IO_TYPE_WRITE:
rc = bdev_writev_blocks_with_md(bdev_io->internal.desc, rc = bdev_writev_blocks_with_md(bdev_io->internal.desc,
spdk_io_channel_from_ctx(bdev_io->internal.ch), spdk_io_channel_from_ctx(bdev_io->internal.ch),
iov, iovcnt, md_buf, current_offset, iov, iovcnt, md_buf, current_offset,
num_blocks, num_blocks, bdev_io->internal.memory_domain,
bdev_io_split_done, bdev_io, bdev_io->internal.memory_domain_ctx,
bdev_io->internal.ext_opts, true); bdev_io_split_done, bdev_io);
break; break;
case SPDK_BDEV_IO_TYPE_UNMAP: case SPDK_BDEV_IO_TYPE_UNMAP:
io_wait_fn = _bdev_unmap_split; io_wait_fn = _bdev_unmap_split;
@ -3082,20 +3087,6 @@ bdev_io_submit(struct spdk_bdev_io *bdev_io)
} }
} }
static inline void
_bdev_io_copy_ext_opts(struct spdk_bdev_io *bdev_io, struct spdk_bdev_ext_io_opts *opts)
{
struct spdk_bdev_ext_io_opts *opts_copy = &bdev_io->internal.ext_opts_copy;
/* Zero part we don't copy */
memset(((char *)opts_copy) + opts->size, 0, sizeof(*opts) - opts->size);
memcpy(opts_copy, opts, opts->size);
opts_copy->size = sizeof(*opts_copy);
opts_copy->metadata = bdev_io->u.bdev.md_buf;
/* Save pointer to the copied ext_opts which will be used by bdev modules */
bdev_io->u.bdev.ext_opts = opts_copy;
}
static inline void static inline void
_bdev_io_ext_use_bounce_buffer(struct spdk_bdev_io *bdev_io) _bdev_io_ext_use_bounce_buffer(struct spdk_bdev_io *bdev_io)
{ {
@ -3104,33 +3095,21 @@ _bdev_io_ext_use_bounce_buffer(struct spdk_bdev_io *bdev_io)
* For write operation we need to pull buffers from memory domain before submitting IO. * For write operation we need to pull buffers from memory domain before submitting IO.
* Once read operation completes, we need to use memory_domain push functionality to * Once read operation completes, we need to use memory_domain push functionality to
* update data in original memory domain IO buffer * update data in original memory domain IO buffer
* This IO request will go through a regular IO flow, so clear memory domains pointers in * This IO request will go through a regular IO flow, so clear memory domains pointers */
* the copied ext_opts */ bdev_io->u.bdev.memory_domain = NULL;
bdev_io->internal.ext_opts_copy.memory_domain = NULL; bdev_io->u.bdev.memory_domain_ctx = NULL;
bdev_io->internal.ext_opts_copy.memory_domain_ctx = NULL;
_bdev_memory_domain_io_get_buf(bdev_io, _bdev_memory_domain_get_io_cb, _bdev_memory_domain_io_get_buf(bdev_io, _bdev_memory_domain_get_io_cb,
bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
} }
static inline void static inline void
_bdev_io_submit_ext(struct spdk_bdev_desc *desc, struct spdk_bdev_io *bdev_io, _bdev_io_submit_ext(struct spdk_bdev_desc *desc, struct spdk_bdev_io *bdev_io)
struct spdk_bdev_ext_io_opts *opts, bool copy_opts)
{ {
if (opts) { if (bdev_io->internal.memory_domain && !desc->memory_domains_supported) {
bool use_pull_push = opts->memory_domain && !desc->memory_domains_supported; _bdev_io_ext_use_bounce_buffer(bdev_io);
assert(opts->size <= sizeof(*opts)); return;
/*
* copy if size is smaller than opts struct to avoid having to check size
* on every access to bdev_io->u.bdev.ext_opts
*/
if (copy_opts || use_pull_push || opts->size < sizeof(*opts)) {
_bdev_io_copy_ext_opts(bdev_io, opts);
if (use_pull_push) {
_bdev_io_ext_use_bounce_buffer(bdev_io);
return;
}
}
} }
bdev_io_submit(bdev_io); bdev_io_submit(bdev_io);
} }
@ -3167,7 +3146,8 @@ bdev_io_init(struct spdk_bdev_io *bdev_io,
bdev_io->num_retries = 0; bdev_io->num_retries = 0;
bdev_io->internal.get_buf_cb = NULL; bdev_io->internal.get_buf_cb = NULL;
bdev_io->internal.get_aux_buf_cb = NULL; bdev_io->internal.get_aux_buf_cb = NULL;
bdev_io->internal.ext_opts = NULL; bdev_io->internal.memory_domain = NULL;
bdev_io->internal.memory_domain_ctx = NULL;
bdev_io->internal.data_transfer_cpl = NULL; bdev_io->internal.data_transfer_cpl = NULL;
} }
@ -4722,7 +4702,8 @@ bdev_read_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch
bdev_io->u.bdev.md_buf = md_buf; bdev_io->u.bdev.md_buf = md_buf;
bdev_io->u.bdev.num_blocks = num_blocks; bdev_io->u.bdev.num_blocks = num_blocks;
bdev_io->u.bdev.offset_blocks = offset_blocks; bdev_io->u.bdev.offset_blocks = offset_blocks;
bdev_io->u.bdev.ext_opts = NULL; bdev_io->u.bdev.memory_domain = NULL;
bdev_io->u.bdev.memory_domain_ctx = NULL;
bdev_io_init(bdev_io, bdev, cb_arg, cb); bdev_io_init(bdev_io, bdev, cb_arg, cb);
bdev_io_submit(bdev_io); bdev_io_submit(bdev_io);
@ -4792,8 +4773,8 @@ spdk_bdev_readv(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
static int static int
bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
struct iovec *iov, int iovcnt, void *md_buf, uint64_t offset_blocks, struct iovec *iov, int iovcnt, void *md_buf, uint64_t offset_blocks,
uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg, uint64_t num_blocks, struct spdk_memory_domain *domain, void *domain_ctx,
struct spdk_bdev_ext_io_opts *opts, bool copy_opts) spdk_bdev_io_completion_cb cb, void *cb_arg)
{ {
struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc); struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
struct spdk_bdev_io *bdev_io; struct spdk_bdev_io *bdev_io;
@ -4817,10 +4798,12 @@ bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *c
bdev_io->u.bdev.num_blocks = num_blocks; bdev_io->u.bdev.num_blocks = num_blocks;
bdev_io->u.bdev.offset_blocks = offset_blocks; bdev_io->u.bdev.offset_blocks = offset_blocks;
bdev_io_init(bdev_io, bdev, cb_arg, cb); bdev_io_init(bdev_io, bdev, cb_arg, cb);
bdev_io->internal.ext_opts = opts; bdev_io->internal.memory_domain = domain;
bdev_io->u.bdev.ext_opts = opts; bdev_io->internal.memory_domain_ctx = domain_ctx;
bdev_io->u.bdev.memory_domain = domain;
bdev_io->u.bdev.memory_domain_ctx = domain_ctx;
_bdev_io_submit_ext(desc, bdev_io, opts, copy_opts); _bdev_io_submit_ext(desc, bdev_io);
return 0; return 0;
} }
@ -4832,7 +4815,7 @@ spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
spdk_bdev_io_completion_cb cb, void *cb_arg) spdk_bdev_io_completion_cb cb, void *cb_arg)
{ {
return bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks, return bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks,
num_blocks, cb, cb_arg, NULL, false); num_blocks, NULL, NULL, cb, cb_arg);
} }
int int
@ -4850,7 +4833,7 @@ spdk_bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_chann
} }
return bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, md_buf, offset_blocks, return bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, md_buf, offset_blocks,
num_blocks, cb, cb_arg, NULL, false); num_blocks, NULL, NULL, cb, cb_arg);
} }
static inline bool static inline bool
@ -4893,7 +4876,10 @@ spdk_bdev_readv_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *
} }
return bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, md, offset_blocks, return bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, md, offset_blocks,
num_blocks, cb, cb_arg, opts, false); num_blocks,
bdev_get_ext_io_opt(opts, memory_domain, NULL),
bdev_get_ext_io_opt(opts, memory_domain_ctx, NULL),
cb, cb_arg);
} }
static int static int
@ -4928,7 +4914,8 @@ bdev_write_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *c
bdev_io->u.bdev.md_buf = md_buf; bdev_io->u.bdev.md_buf = md_buf;
bdev_io->u.bdev.num_blocks = num_blocks; bdev_io->u.bdev.num_blocks = num_blocks;
bdev_io->u.bdev.offset_blocks = offset_blocks; bdev_io->u.bdev.offset_blocks = offset_blocks;
bdev_io->u.bdev.ext_opts = NULL; bdev_io->u.bdev.memory_domain = NULL;
bdev_io->u.bdev.memory_domain_ctx = NULL;
bdev_io_init(bdev_io, bdev, cb_arg, cb); bdev_io_init(bdev_io, bdev, cb_arg, cb);
bdev_io_submit(bdev_io); bdev_io_submit(bdev_io);
@ -4984,8 +4971,8 @@ static int
bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
struct iovec *iov, int iovcnt, void *md_buf, struct iovec *iov, int iovcnt, void *md_buf,
uint64_t offset_blocks, uint64_t num_blocks, uint64_t offset_blocks, uint64_t num_blocks,
spdk_bdev_io_completion_cb cb, void *cb_arg, struct spdk_memory_domain *domain, void *domain_ctx,
struct spdk_bdev_ext_io_opts *opts, bool copy_opts) spdk_bdev_io_completion_cb cb, void *cb_arg)
{ {
struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc); struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
struct spdk_bdev_io *bdev_io; struct spdk_bdev_io *bdev_io;
@ -5013,10 +5000,12 @@ bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *
bdev_io->u.bdev.num_blocks = num_blocks; bdev_io->u.bdev.num_blocks = num_blocks;
bdev_io->u.bdev.offset_blocks = offset_blocks; bdev_io->u.bdev.offset_blocks = offset_blocks;
bdev_io_init(bdev_io, bdev, cb_arg, cb); bdev_io_init(bdev_io, bdev, cb_arg, cb);
bdev_io->internal.ext_opts = opts; bdev_io->internal.memory_domain = domain;
bdev_io->u.bdev.ext_opts = opts; bdev_io->internal.memory_domain_ctx = domain_ctx;
bdev_io->u.bdev.memory_domain = domain;
bdev_io->u.bdev.memory_domain_ctx = domain_ctx;
_bdev_io_submit_ext(desc, bdev_io, opts, copy_opts); _bdev_io_submit_ext(desc, bdev_io);
return 0; return 0;
} }
@ -5044,7 +5033,7 @@ spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
spdk_bdev_io_completion_cb cb, void *cb_arg) spdk_bdev_io_completion_cb cb, void *cb_arg)
{ {
return bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks, return bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks,
num_blocks, cb, cb_arg, NULL, false); num_blocks, NULL, NULL, cb, cb_arg);
} }
int int
@ -5062,7 +5051,7 @@ spdk_bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_chan
} }
return bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, md_buf, offset_blocks, return bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, md_buf, offset_blocks,
num_blocks, cb, cb_arg, NULL, false); num_blocks, NULL, NULL, cb, cb_arg);
} }
int int
@ -5089,8 +5078,10 @@ spdk_bdev_writev_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel
return -EINVAL; return -EINVAL;
} }
return bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, md, offset_blocks, return bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, md, offset_blocks, num_blocks,
num_blocks, cb, cb_arg, opts, false); bdev_get_ext_io_opt(opts, memory_domain, NULL),
bdev_get_ext_io_opt(opts, memory_domain_ctx, NULL),
cb, cb_arg);
} }
static void static void
@ -5182,7 +5173,8 @@ bdev_comparev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel
bdev_io->u.bdev.num_blocks = num_blocks; bdev_io->u.bdev.num_blocks = num_blocks;
bdev_io->u.bdev.offset_blocks = offset_blocks; bdev_io->u.bdev.offset_blocks = offset_blocks;
bdev_io_init(bdev_io, bdev, cb_arg, cb); bdev_io_init(bdev_io, bdev, cb_arg, cb);
bdev_io->u.bdev.ext_opts = NULL; bdev_io->u.bdev.memory_domain = NULL;
bdev_io->u.bdev.memory_domain_ctx = NULL;
if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COMPARE)) { if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COMPARE)) {
bdev_io_submit(bdev_io); bdev_io_submit(bdev_io);
@ -5251,7 +5243,8 @@ bdev_compare_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel
bdev_io->u.bdev.num_blocks = num_blocks; bdev_io->u.bdev.num_blocks = num_blocks;
bdev_io->u.bdev.offset_blocks = offset_blocks; bdev_io->u.bdev.offset_blocks = offset_blocks;
bdev_io_init(bdev_io, bdev, cb_arg, cb); bdev_io_init(bdev_io, bdev, cb_arg, cb);
bdev_io->u.bdev.ext_opts = NULL; bdev_io->u.bdev.memory_domain = NULL;
bdev_io->u.bdev.memory_domain_ctx = NULL;
if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COMPARE)) { if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COMPARE)) {
bdev_io_submit(bdev_io); bdev_io_submit(bdev_io);
@ -5437,7 +5430,8 @@ spdk_bdev_comparev_and_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io
bdev_io->u.bdev.num_blocks = num_blocks; bdev_io->u.bdev.num_blocks = num_blocks;
bdev_io->u.bdev.offset_blocks = offset_blocks; bdev_io->u.bdev.offset_blocks = offset_blocks;
bdev_io_init(bdev_io, bdev, cb_arg, cb); bdev_io_init(bdev_io, bdev, cb_arg, cb);
bdev_io->u.bdev.ext_opts = NULL; bdev_io->u.bdev.memory_domain = NULL;
bdev_io->u.bdev.memory_domain_ctx = NULL;
if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE)) { if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE)) {
bdev_io_submit(bdev_io); bdev_io_submit(bdev_io);
@ -5488,7 +5482,8 @@ spdk_bdev_zcopy_start(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
bdev_io->u.bdev.zcopy.commit = 0; bdev_io->u.bdev.zcopy.commit = 0;
bdev_io->u.bdev.zcopy.start = 1; bdev_io->u.bdev.zcopy.start = 1;
bdev_io_init(bdev_io, bdev, cb_arg, cb); bdev_io_init(bdev_io, bdev, cb_arg, cb);
bdev_io->u.bdev.ext_opts = NULL; bdev_io->u.bdev.memory_domain = NULL;
bdev_io->u.bdev.memory_domain_ctx = NULL;
bdev_io_submit(bdev_io); bdev_io_submit(bdev_io);
@ -5563,7 +5558,8 @@ spdk_bdev_write_zeroes_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channe
bdev_io->u.bdev.offset_blocks = offset_blocks; bdev_io->u.bdev.offset_blocks = offset_blocks;
bdev_io->u.bdev.num_blocks = num_blocks; bdev_io->u.bdev.num_blocks = num_blocks;
bdev_io_init(bdev_io, bdev, cb_arg, cb); bdev_io_init(bdev_io, bdev, cb_arg, cb);
bdev_io->u.bdev.ext_opts = NULL; bdev_io->u.bdev.memory_domain = NULL;
bdev_io->u.bdev.memory_domain_ctx = NULL;
if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE_ZEROES)) { if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE_ZEROES)) {
bdev_io_submit(bdev_io); bdev_io_submit(bdev_io);
@ -5633,7 +5629,8 @@ spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
bdev_io->u.bdev.offset_blocks = offset_blocks; bdev_io->u.bdev.offset_blocks = offset_blocks;
bdev_io->u.bdev.num_blocks = num_blocks; bdev_io->u.bdev.num_blocks = num_blocks;
bdev_io_init(bdev_io, bdev, cb_arg, cb); bdev_io_init(bdev_io, bdev, cb_arg, cb);
bdev_io->u.bdev.ext_opts = NULL; bdev_io->u.bdev.memory_domain = NULL;
bdev_io->u.bdev.memory_domain_ctx = NULL;
bdev_io_submit(bdev_io); bdev_io_submit(bdev_io);
return 0; return 0;
@ -9077,7 +9074,8 @@ spdk_bdev_copy_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
bdev_io->u.bdev.offset_blocks = dst_offset_blocks; bdev_io->u.bdev.offset_blocks = dst_offset_blocks;
bdev_io->u.bdev.copy.src_offset_blocks = src_offset_blocks; bdev_io->u.bdev.copy.src_offset_blocks = src_offset_blocks;
bdev_io->u.bdev.num_blocks = num_blocks; bdev_io->u.bdev.num_blocks = num_blocks;
bdev_io->u.bdev.ext_opts = NULL; bdev_io->u.bdev.memory_domain = NULL;
bdev_io->u.bdev.memory_domain_ctx = NULL;
bdev_io_init(bdev_io, bdev, cb_arg, cb); bdev_io_init(bdev_io, bdev, cb_arg, cb);
bdev_io_submit(bdev_io); bdev_io_submit(bdev_io);

View File

@ -275,6 +275,16 @@ bdev_part_complete_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
spdk_bdev_free_io(bdev_io); spdk_bdev_free_io(bdev_io);
} }
static inline void
bdev_part_init_ext_io_opts(struct spdk_bdev_io *bdev_io, struct spdk_bdev_ext_io_opts *opts)
{
memset(opts, 0, sizeof(*opts));
opts->size = sizeof(*opts);
opts->memory_domain = bdev_io->u.bdev.memory_domain;
opts->memory_domain_ctx = bdev_io->u.bdev.memory_domain_ctx;
opts->metadata = bdev_io->u.bdev.md_buf;
}
int int
spdk_bdev_part_submit_request_ext(struct spdk_bdev_part_channel *ch, struct spdk_bdev_io *bdev_io, spdk_bdev_part_submit_request_ext(struct spdk_bdev_part_channel *ch, struct spdk_bdev_io *bdev_io,
spdk_bdev_io_completion_cb cb) spdk_bdev_io_completion_cb cb)
@ -282,6 +292,7 @@ spdk_bdev_part_submit_request_ext(struct spdk_bdev_part_channel *ch, struct spdk
struct spdk_bdev_part *part = ch->part; struct spdk_bdev_part *part = ch->part;
struct spdk_io_channel *base_ch = ch->base_ch; struct spdk_io_channel *base_ch = ch->base_ch;
struct spdk_bdev_desc *base_desc = part->internal.base->desc; struct spdk_bdev_desc *base_desc = part->internal.base->desc;
struct spdk_bdev_ext_io_opts io_opts;
uint64_t offset, remapped_offset, remapped_src_offset; uint64_t offset, remapped_offset, remapped_src_offset;
int rc = 0; int rc = 0;
@ -293,41 +304,22 @@ spdk_bdev_part_submit_request_ext(struct spdk_bdev_part_channel *ch, struct spdk
/* Modify the I/O to adjust for the offset within the base bdev. */ /* Modify the I/O to adjust for the offset within the base bdev. */
switch (bdev_io->type) { switch (bdev_io->type) {
case SPDK_BDEV_IO_TYPE_READ: case SPDK_BDEV_IO_TYPE_READ:
if (bdev_io->u.bdev.ext_opts) { bdev_part_init_ext_io_opts(bdev_io, &io_opts);
rc = spdk_bdev_readv_blocks_ext(base_desc, base_ch, bdev_io->u.bdev.iovs, rc = spdk_bdev_readv_blocks_ext(base_desc, base_ch, bdev_io->u.bdev.iovs,
bdev_io->u.bdev.iovcnt, remapped_offset, bdev_io->u.bdev.iovcnt, remapped_offset,
bdev_io->u.bdev.num_blocks, bdev_io->u.bdev.num_blocks,
bdev_part_complete_io, bdev_io, bdev_part_complete_io, bdev_io, &io_opts);
bdev_io->u.bdev.ext_opts);
} else {
rc = spdk_bdev_readv_blocks_with_md(base_desc, base_ch,
bdev_io->u.bdev.iovs,
bdev_io->u.bdev.iovcnt,
bdev_io->u.bdev.md_buf, remapped_offset,
bdev_io->u.bdev.num_blocks,
bdev_part_complete_io, bdev_io);
}
break; break;
case SPDK_BDEV_IO_TYPE_WRITE: case SPDK_BDEV_IO_TYPE_WRITE:
rc = bdev_part_remap_dif(bdev_io, offset, remapped_offset); rc = bdev_part_remap_dif(bdev_io, offset, remapped_offset);
if (rc != 0) { if (rc != 0) {
return SPDK_BDEV_IO_STATUS_FAILED; return SPDK_BDEV_IO_STATUS_FAILED;
} }
bdev_part_init_ext_io_opts(bdev_io, &io_opts);
if (bdev_io->u.bdev.ext_opts) { rc = spdk_bdev_writev_blocks_ext(base_desc, base_ch, bdev_io->u.bdev.iovs,
rc = spdk_bdev_writev_blocks_ext(base_desc, base_ch, bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, remapped_offset,
bdev_io->u.bdev.iovcnt, remapped_offset, bdev_io->u.bdev.num_blocks,
bdev_io->u.bdev.num_blocks, bdev_part_complete_io, bdev_io, &io_opts);
bdev_part_complete_io, bdev_io,
bdev_io->u.bdev.ext_opts);
} else {
rc = spdk_bdev_writev_blocks_with_md(base_desc, base_ch,
bdev_io->u.bdev.iovs,
bdev_io->u.bdev.iovcnt,
bdev_io->u.bdev.md_buf, remapped_offset,
bdev_io->u.bdev.num_blocks,
bdev_part_complete_io, bdev_io);
}
break; break;
case SPDK_BDEV_IO_TYPE_WRITE_ZEROES: case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
rc = spdk_bdev_write_zeroes_blocks(base_desc, base_ch, remapped_offset, rc = spdk_bdev_write_zeroes_blocks(base_desc, base_ch, remapped_offset,

View File

@ -253,12 +253,23 @@ vbdev_delay_queue_io(struct spdk_bdev_io *bdev_io)
} }
} }
static void
delay_init_ext_io_opts(struct spdk_bdev_io *bdev_io, struct spdk_bdev_ext_io_opts *opts)
{
memset(opts, 0, sizeof(*opts));
opts->size = sizeof(*opts);
opts->memory_domain = bdev_io->u.bdev.memory_domain;
opts->memory_domain_ctx = bdev_io->u.bdev.memory_domain_ctx;
opts->metadata = bdev_io->u.bdev.md_buf;
}
static void static void
delay_read_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success) delay_read_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success)
{ {
struct vbdev_delay *delay_node = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_delay, struct vbdev_delay *delay_node = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_delay,
delay_bdev); delay_bdev);
struct delay_io_channel *delay_ch = spdk_io_channel_get_ctx(ch); struct delay_io_channel *delay_ch = spdk_io_channel_get_ctx(ch);
struct spdk_bdev_ext_io_opts io_opts;
int rc; int rc;
if (!success) { if (!success) {
@ -266,10 +277,11 @@ delay_read_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
return; return;
} }
delay_init_ext_io_opts(bdev_io, &io_opts);
rc = spdk_bdev_readv_blocks_ext(delay_node->base_desc, delay_ch->base_ch, bdev_io->u.bdev.iovs, rc = spdk_bdev_readv_blocks_ext(delay_node->base_desc, delay_ch->base_ch, bdev_io->u.bdev.iovs,
bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.offset_blocks,
bdev_io->u.bdev.num_blocks, _delay_complete_io, bdev_io->u.bdev.num_blocks, _delay_complete_io,
bdev_io, bdev_io->u.bdev.ext_opts); bdev_io, &io_opts);
if (rc == -ENOMEM) { if (rc == -ENOMEM) {
SPDK_ERRLOG("No memory, start to queue io for delay.\n"); SPDK_ERRLOG("No memory, start to queue io for delay.\n");
@ -381,6 +393,7 @@ vbdev_delay_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev
struct vbdev_delay *delay_node = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_delay, delay_bdev); struct vbdev_delay *delay_node = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_delay, delay_bdev);
struct delay_io_channel *delay_ch = spdk_io_channel_get_ctx(ch); struct delay_io_channel *delay_ch = spdk_io_channel_get_ctx(ch);
struct delay_bdev_io *io_ctx = (struct delay_bdev_io *)bdev_io->driver_ctx; struct delay_bdev_io *io_ctx = (struct delay_bdev_io *)bdev_io->driver_ctx;
struct spdk_bdev_ext_io_opts io_opts;
int rc = 0; int rc = 0;
bool is_p99; bool is_p99;
@ -400,10 +413,11 @@ vbdev_delay_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev
break; break;
case SPDK_BDEV_IO_TYPE_WRITE: case SPDK_BDEV_IO_TYPE_WRITE:
io_ctx->type = is_p99 ? DELAY_P99_WRITE : DELAY_AVG_WRITE; io_ctx->type = is_p99 ? DELAY_P99_WRITE : DELAY_AVG_WRITE;
delay_init_ext_io_opts(bdev_io, &io_opts);
rc = spdk_bdev_writev_blocks_ext(delay_node->base_desc, delay_ch->base_ch, bdev_io->u.bdev.iovs, rc = spdk_bdev_writev_blocks_ext(delay_node->base_desc, delay_ch->base_ch, bdev_io->u.bdev.iovs,
bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.offset_blocks,
bdev_io->u.bdev.num_blocks, _delay_complete_io, bdev_io->u.bdev.num_blocks, _delay_complete_io,
bdev_io, bdev_io->u.bdev.ext_opts); bdev_io, &io_opts);
break; break;
case SPDK_BDEV_IO_TYPE_WRITE_ZEROES: case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
rc = spdk_bdev_write_zeroes_blocks(delay_node->base_desc, delay_ch->base_ch, rc = spdk_bdev_write_zeroes_blocks(delay_node->base_desc, delay_ch->base_ch,

View File

@ -839,27 +839,17 @@ lvol_read(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
uint64_t start_page, num_pages; uint64_t start_page, num_pages;
struct spdk_lvol *lvol = bdev_io->bdev->ctxt; struct spdk_lvol *lvol = bdev_io->bdev->ctxt;
struct spdk_blob *blob = lvol->blob; struct spdk_blob *blob = lvol->blob;
struct vbdev_lvol_io *lvol_io = (struct vbdev_lvol_io *)bdev_io->driver_ctx;
start_page = bdev_io->u.bdev.offset_blocks; start_page = bdev_io->u.bdev.offset_blocks;
num_pages = bdev_io->u.bdev.num_blocks; num_pages = bdev_io->u.bdev.num_blocks;
if (bdev_io->u.bdev.ext_opts) { lvol_io->ext_io_opts.size = sizeof(lvol_io->ext_io_opts);
struct vbdev_lvol_io *lvol_io = (struct vbdev_lvol_io *)bdev_io->driver_ctx; lvol_io->ext_io_opts.memory_domain = bdev_io->u.bdev.memory_domain;
lvol_io->ext_io_opts.memory_domain_ctx = bdev_io->u.bdev.memory_domain_ctx;
lvol_io->ext_io_opts.size = sizeof(lvol_io->ext_io_opts); spdk_blob_io_readv_ext(blob, ch, bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, start_page,
lvol_io->ext_io_opts.memory_domain = bdev_io->u.bdev.ext_opts->memory_domain; num_pages, lvol_op_comp, bdev_io, &lvol_io->ext_io_opts);
lvol_io->ext_io_opts.memory_domain_ctx = bdev_io->u.bdev.ext_opts->memory_domain_ctx;
/* Save a pointer to ext_opts passed by the user, it will be used in bs_dev readv/writev_ext functions
* to restore ext_opts structure. That is done since bdev and blob extended functions use different
* extended opts structures */
lvol_io->ext_io_opts.user_ctx = bdev_io->u.bdev.ext_opts;
spdk_blob_io_readv_ext(blob, ch, bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, start_page,
num_pages, lvol_op_comp, bdev_io, &lvol_io->ext_io_opts);
} else {
spdk_blob_io_readv(blob, ch, bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, start_page,
num_pages, lvol_op_comp, bdev_io);
}
} }
static void static void
@ -867,27 +857,17 @@ lvol_write(struct spdk_lvol *lvol, struct spdk_io_channel *ch, struct spdk_bdev_
{ {
uint64_t start_page, num_pages; uint64_t start_page, num_pages;
struct spdk_blob *blob = lvol->blob; struct spdk_blob *blob = lvol->blob;
struct vbdev_lvol_io *lvol_io = (struct vbdev_lvol_io *)bdev_io->driver_ctx;
start_page = bdev_io->u.bdev.offset_blocks; start_page = bdev_io->u.bdev.offset_blocks;
num_pages = bdev_io->u.bdev.num_blocks; num_pages = bdev_io->u.bdev.num_blocks;
if (bdev_io->u.bdev.ext_opts) { lvol_io->ext_io_opts.size = sizeof(lvol_io->ext_io_opts);
struct vbdev_lvol_io *lvol_io = (struct vbdev_lvol_io *)bdev_io->driver_ctx; lvol_io->ext_io_opts.memory_domain = bdev_io->u.bdev.memory_domain;
lvol_io->ext_io_opts.memory_domain_ctx = bdev_io->u.bdev.memory_domain_ctx;
lvol_io->ext_io_opts.size = sizeof(lvol_io->ext_io_opts); spdk_blob_io_writev_ext(blob, ch, bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, start_page,
lvol_io->ext_io_opts.memory_domain = bdev_io->u.bdev.ext_opts->memory_domain; num_pages, lvol_op_comp, bdev_io, &lvol_io->ext_io_opts);
lvol_io->ext_io_opts.memory_domain_ctx = bdev_io->u.bdev.ext_opts->memory_domain_ctx;
/* Save a pointer to ext_opts passed by the user, it will be used in bs_dev readv/writev_ext functions
* to restore ext_opts structure. That is done since bdev and blob extended functions use different
* extended opts structures */
lvol_io->ext_io_opts.user_ctx = bdev_io->u.bdev.ext_opts;
spdk_blob_io_writev_ext(blob, ch, bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, start_page,
num_pages, lvol_op_comp, bdev_io, &lvol_io->ext_io_opts);
} else {
spdk_blob_io_writev(blob, ch, bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, start_page,
num_pages, lvol_op_comp, bdev_io);
}
} }
static int static int

View File

@ -155,12 +155,12 @@ static void bdev_nvme_submit_request(struct spdk_io_channel *ch,
struct spdk_bdev_io *bdev_io); struct spdk_bdev_io *bdev_io);
static int bdev_nvme_readv(struct nvme_bdev_io *bio, struct iovec *iov, int iovcnt, static int bdev_nvme_readv(struct nvme_bdev_io *bio, struct iovec *iov, int iovcnt,
void *md, uint64_t lba_count, uint64_t lba, void *md, uint64_t lba_count, uint64_t lba,
uint32_t flags, struct spdk_bdev_ext_io_opts *ext_opts); uint32_t flags, struct spdk_memory_domain *domain, void *domain_ctx);
static int bdev_nvme_no_pi_readv(struct nvme_bdev_io *bio, struct iovec *iov, int iovcnt, static int bdev_nvme_no_pi_readv(struct nvme_bdev_io *bio, struct iovec *iov, int iovcnt,
void *md, uint64_t lba_count, uint64_t lba); void *md, uint64_t lba_count, uint64_t lba);
static int bdev_nvme_writev(struct nvme_bdev_io *bio, struct iovec *iov, int iovcnt, static int bdev_nvme_writev(struct nvme_bdev_io *bio, struct iovec *iov, int iovcnt,
void *md, uint64_t lba_count, uint64_t lba, void *md, uint64_t lba_count, uint64_t lba,
uint32_t flags, struct spdk_bdev_ext_io_opts *ext_opts); uint32_t flags, struct spdk_memory_domain *domain, void *domain_ctx);
static int bdev_nvme_zone_appendv(struct nvme_bdev_io *bio, struct iovec *iov, int iovcnt, static int bdev_nvme_zone_appendv(struct nvme_bdev_io *bio, struct iovec *iov, int iovcnt,
void *md, uint64_t lba_count, void *md, uint64_t lba_count,
uint64_t zslba, uint32_t flags); uint64_t zslba, uint32_t flags);
@ -2298,7 +2298,8 @@ bdev_nvme_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
bdev_io->u.bdev.num_blocks, bdev_io->u.bdev.num_blocks,
bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.offset_blocks,
bdev->dif_check_flags, bdev->dif_check_flags,
bdev_io->u.bdev.ext_opts); bdev_io->u.bdev.memory_domain,
bdev_io->u.bdev.memory_domain_ctx);
exit: exit:
if (spdk_unlikely(ret != 0)) { if (spdk_unlikely(ret != 0)) {
@ -2324,7 +2325,8 @@ _bdev_nvme_submit_request(struct nvme_bdev_channel *nbdev_ch, struct spdk_bdev_i
bdev_io->u.bdev.num_blocks, bdev_io->u.bdev.num_blocks,
bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.offset_blocks,
bdev->dif_check_flags, bdev->dif_check_flags,
bdev_io->u.bdev.ext_opts); bdev_io->u.bdev.memory_domain,
bdev_io->u.bdev.memory_domain_ctx);
} else { } else {
spdk_bdev_io_get_buf(bdev_io, bdev_nvme_get_buf_cb, spdk_bdev_io_get_buf(bdev_io, bdev_nvme_get_buf_cb,
bdev_io->u.bdev.num_blocks * bdev->blocklen); bdev_io->u.bdev.num_blocks * bdev->blocklen);
@ -2339,7 +2341,8 @@ _bdev_nvme_submit_request(struct nvme_bdev_channel *nbdev_ch, struct spdk_bdev_i
bdev_io->u.bdev.num_blocks, bdev_io->u.bdev.num_blocks,
bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.offset_blocks,
bdev->dif_check_flags, bdev->dif_check_flags,
bdev_io->u.bdev.ext_opts); bdev_io->u.bdev.memory_domain,
bdev_io->u.bdev.memory_domain_ctx);
break; break;
case SPDK_BDEV_IO_TYPE_COMPARE: case SPDK_BDEV_IO_TYPE_COMPARE:
rc = bdev_nvme_comparev(nbdev_io, rc = bdev_nvme_comparev(nbdev_io,
@ -6571,7 +6574,7 @@ bdev_nvme_no_pi_readv(struct nvme_bdev_io *bio, struct iovec *iov, int iovcnt,
static int static int
bdev_nvme_readv(struct nvme_bdev_io *bio, struct iovec *iov, int iovcnt, bdev_nvme_readv(struct nvme_bdev_io *bio, struct iovec *iov, int iovcnt,
void *md, uint64_t lba_count, uint64_t lba, uint32_t flags, void *md, uint64_t lba_count, uint64_t lba, uint32_t flags,
struct spdk_bdev_ext_io_opts *ext_opts) struct spdk_memory_domain *domain, void *domain_ctx)
{ {
struct spdk_nvme_ns *ns = bio->io_path->nvme_ns->ns; struct spdk_nvme_ns *ns = bio->io_path->nvme_ns->ns;
struct spdk_nvme_qpair *qpair = bio->io_path->qpair->qpair; struct spdk_nvme_qpair *qpair = bio->io_path->qpair->qpair;
@ -6585,30 +6588,16 @@ bdev_nvme_readv(struct nvme_bdev_io *bio, struct iovec *iov, int iovcnt,
bio->iovpos = 0; bio->iovpos = 0;
bio->iov_offset = 0; bio->iov_offset = 0;
if (ext_opts) { bio->ext_opts.size = sizeof(struct spdk_nvme_ns_cmd_ext_io_opts);
bio->ext_opts.size = sizeof(struct spdk_nvme_ns_cmd_ext_io_opts); bio->ext_opts.memory_domain = domain;
bio->ext_opts.memory_domain = ext_opts->memory_domain; bio->ext_opts.memory_domain_ctx = domain_ctx;
bio->ext_opts.memory_domain_ctx = ext_opts->memory_domain_ctx; bio->ext_opts.io_flags = flags;
bio->ext_opts.io_flags = flags; bio->ext_opts.metadata = md;
bio->ext_opts.metadata = md;
rc = spdk_nvme_ns_cmd_readv_ext(ns, qpair, lba, lba_count,
bdev_nvme_readv_done, bio,
bdev_nvme_queued_reset_sgl, bdev_nvme_queued_next_sge,
&bio->ext_opts);
} else if (iovcnt == 1) {
rc = spdk_nvme_ns_cmd_read_with_md(ns, qpair, iov[0].iov_base, md, lba,
lba_count,
bdev_nvme_readv_done, bio,
flags,
0, 0);
} else {
rc = spdk_nvme_ns_cmd_readv_with_md(ns, qpair, lba, lba_count,
bdev_nvme_readv_done, bio, flags,
bdev_nvme_queued_reset_sgl, bdev_nvme_queued_next_sge,
md, 0, 0);
}
rc = spdk_nvme_ns_cmd_readv_ext(ns, qpair, lba, lba_count,
bdev_nvme_readv_done, bio,
bdev_nvme_queued_reset_sgl, bdev_nvme_queued_next_sge,
&bio->ext_opts);
if (rc != 0 && rc != -ENOMEM) { if (rc != 0 && rc != -ENOMEM) {
SPDK_ERRLOG("readv failed: rc = %d\n", rc); SPDK_ERRLOG("readv failed: rc = %d\n", rc);
} }
@ -6617,8 +6606,8 @@ bdev_nvme_readv(struct nvme_bdev_io *bio, struct iovec *iov, int iovcnt,
static int static int
bdev_nvme_writev(struct nvme_bdev_io *bio, struct iovec *iov, int iovcnt, bdev_nvme_writev(struct nvme_bdev_io *bio, struct iovec *iov, int iovcnt,
void *md, uint64_t lba_count, uint64_t lba, void *md, uint64_t lba_count, uint64_t lba, uint32_t flags,
uint32_t flags, struct spdk_bdev_ext_io_opts *ext_opts) struct spdk_memory_domain *domain, void *domain_ctx)
{ {
struct spdk_nvme_ns *ns = bio->io_path->nvme_ns->ns; struct spdk_nvme_ns *ns = bio->io_path->nvme_ns->ns;
struct spdk_nvme_qpair *qpair = bio->io_path->qpair->qpair; struct spdk_nvme_qpair *qpair = bio->io_path->qpair->qpair;
@ -6632,30 +6621,16 @@ bdev_nvme_writev(struct nvme_bdev_io *bio, struct iovec *iov, int iovcnt,
bio->iovpos = 0; bio->iovpos = 0;
bio->iov_offset = 0; bio->iov_offset = 0;
if (ext_opts) { bio->ext_opts.size = sizeof(struct spdk_nvme_ns_cmd_ext_io_opts);
bio->ext_opts.size = sizeof(struct spdk_nvme_ns_cmd_ext_io_opts); bio->ext_opts.memory_domain = domain;
bio->ext_opts.memory_domain = ext_opts->memory_domain; bio->ext_opts.memory_domain_ctx = domain_ctx;
bio->ext_opts.memory_domain_ctx = ext_opts->memory_domain_ctx; bio->ext_opts.io_flags = flags;
bio->ext_opts.io_flags = flags; bio->ext_opts.metadata = md;
bio->ext_opts.metadata = md;
rc = spdk_nvme_ns_cmd_writev_ext(ns, qpair, lba, lba_count,
bdev_nvme_writev_done, bio,
bdev_nvme_queued_reset_sgl, bdev_nvme_queued_next_sge,
&bio->ext_opts);
} else if (iovcnt == 1) {
rc = spdk_nvme_ns_cmd_write_with_md(ns, qpair, iov[0].iov_base, md, lba,
lba_count,
bdev_nvme_writev_done, bio,
flags,
0, 0);
} else {
rc = spdk_nvme_ns_cmd_writev_with_md(ns, qpair, lba, lba_count,
bdev_nvme_writev_done, bio, flags,
bdev_nvme_queued_reset_sgl, bdev_nvme_queued_next_sge,
md, 0, 0);
}
rc = spdk_nvme_ns_cmd_writev_ext(ns, qpair, lba, lba_count,
bdev_nvme_writev_done, bio,
bdev_nvme_queued_reset_sgl, bdev_nvme_queued_next_sge,
&bio->ext_opts);
if (rc != 0 && rc != -ENOMEM) { if (rc != 0 && rc != -ENOMEM) {
SPDK_ERRLOG("writev failed: rc = %d\n", rc); SPDK_ERRLOG("writev failed: rc = %d\n", rc);
} }

View File

@ -213,6 +213,16 @@ vbdev_passthru_queue_io(struct spdk_bdev_io *bdev_io)
} }
} }
static void
pt_init_ext_io_opts(struct spdk_bdev_io *bdev_io, struct spdk_bdev_ext_io_opts *opts)
{
memset(opts, 0, sizeof(*opts));
opts->size = sizeof(*opts);
opts->memory_domain = bdev_io->u.bdev.memory_domain;
opts->memory_domain_ctx = bdev_io->u.bdev.memory_domain_ctx;
opts->metadata = bdev_io->u.bdev.md_buf;
}
/* Callback for getting a buf from the bdev pool in the event that the caller passed /* Callback for getting a buf from the bdev pool in the event that the caller passed
* in NULL, we need to own the buffer so it doesn't get freed by another vbdev module * in NULL, we need to own the buffer so it doesn't get freed by another vbdev module
* beneath us before we're done with it. That won't happen in this example but it could * beneath us before we're done with it. That won't happen in this example but it could
@ -225,6 +235,7 @@ pt_read_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, boo
pt_bdev); pt_bdev);
struct pt_io_channel *pt_ch = spdk_io_channel_get_ctx(ch); struct pt_io_channel *pt_ch = spdk_io_channel_get_ctx(ch);
struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)bdev_io->driver_ctx; struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)bdev_io->driver_ctx;
struct spdk_bdev_ext_io_opts io_opts;
int rc; int rc;
if (!success) { if (!success) {
@ -232,20 +243,11 @@ pt_read_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, boo
return; return;
} }
if (bdev_io->u.bdev.ext_opts) { pt_init_ext_io_opts(bdev_io, &io_opts);
rc = spdk_bdev_readv_blocks_ext(pt_node->base_desc, pt_ch->base_ch, bdev_io->u.bdev.iovs, rc = spdk_bdev_readv_blocks_ext(pt_node->base_desc, pt_ch->base_ch, bdev_io->u.bdev.iovs,
bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.offset_blocks,
bdev_io->u.bdev.num_blocks, _pt_complete_io, bdev_io->u.bdev.num_blocks, _pt_complete_io,
bdev_io, bdev_io->u.bdev.ext_opts); bdev_io, &io_opts);
} else {
rc = spdk_bdev_readv_blocks_with_md(pt_node->base_desc, pt_ch->base_ch,
bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
bdev_io->u.bdev.md_buf,
bdev_io->u.bdev.offset_blocks,
bdev_io->u.bdev.num_blocks,
_pt_complete_io, bdev_io);
}
if (rc != 0) { if (rc != 0) {
if (rc == -ENOMEM) { if (rc == -ENOMEM) {
SPDK_ERRLOG("No memory, start to queue io for passthru.\n"); SPDK_ERRLOG("No memory, start to queue io for passthru.\n");
@ -268,6 +270,7 @@ vbdev_passthru_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *b
struct vbdev_passthru *pt_node = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_passthru, pt_bdev); struct vbdev_passthru *pt_node = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_passthru, pt_bdev);
struct pt_io_channel *pt_ch = spdk_io_channel_get_ctx(ch); struct pt_io_channel *pt_ch = spdk_io_channel_get_ctx(ch);
struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)bdev_io->driver_ctx; struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)bdev_io->driver_ctx;
struct spdk_bdev_ext_io_opts io_opts;
int rc = 0; int rc = 0;
/* Setup a per IO context value; we don't do anything with it in the vbdev other /* Setup a per IO context value; we don't do anything with it in the vbdev other
@ -282,19 +285,11 @@ vbdev_passthru_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *b
bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
break; break;
case SPDK_BDEV_IO_TYPE_WRITE: case SPDK_BDEV_IO_TYPE_WRITE:
if (bdev_io->u.bdev.ext_opts) { pt_init_ext_io_opts(bdev_io, &io_opts);
rc = spdk_bdev_writev_blocks_ext(pt_node->base_desc, pt_ch->base_ch, bdev_io->u.bdev.iovs, rc = spdk_bdev_writev_blocks_ext(pt_node->base_desc, pt_ch->base_ch, bdev_io->u.bdev.iovs,
bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.offset_blocks,
bdev_io->u.bdev.num_blocks, _pt_complete_io, bdev_io->u.bdev.num_blocks, _pt_complete_io,
bdev_io, bdev_io->u.bdev.ext_opts); bdev_io, &io_opts);
} else {
rc = spdk_bdev_writev_blocks_with_md(pt_node->base_desc, pt_ch->base_ch,
bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
bdev_io->u.bdev.md_buf,
bdev_io->u.bdev.offset_blocks,
bdev_io->u.bdev.num_blocks,
_pt_complete_io, bdev_io);
}
break; break;
case SPDK_BDEV_IO_TYPE_WRITE_ZEROES: case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
rc = spdk_bdev_write_zeroes_blocks(pt_node->base_desc, pt_ch->base_ch, rc = spdk_bdev_write_zeroes_blocks(pt_node->base_desc, pt_ch->base_ch,

View File

@ -75,6 +75,7 @@ concat_submit_rw_request(struct raid_bdev_io *raid_io)
int ret = 0; int ret = 0;
struct raid_base_bdev_info *base_info; struct raid_base_bdev_info *base_info;
struct spdk_io_channel *base_ch; struct spdk_io_channel *base_ch;
struct spdk_bdev_ext_io_opts io_opts = {};
int i; int i;
pd_idx = -1; pd_idx = -1;
@ -102,32 +103,22 @@ concat_submit_rw_request(struct raid_bdev_io *raid_io)
assert(raid_ch != NULL); assert(raid_ch != NULL);
assert(raid_ch->base_channel); assert(raid_ch->base_channel);
base_ch = raid_ch->base_channel[pd_idx]; base_ch = raid_ch->base_channel[pd_idx];
io_opts.size = sizeof(io_opts);
io_opts.memory_domain = bdev_io->u.bdev.memory_domain;
io_opts.memory_domain_ctx = bdev_io->u.bdev.memory_domain_ctx;
io_opts.metadata = bdev_io->u.bdev.md_buf;
if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) {
if (bdev_io->u.bdev.ext_opts != NULL) { ret = spdk_bdev_readv_blocks_ext(base_info->desc, base_ch,
ret = spdk_bdev_readv_blocks_ext(base_info->desc, base_ch, bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, pd_lba, pd_blocks, concat_bdev_io_completion,
pd_lba, pd_blocks, concat_bdev_io_completion, raid_io, &io_opts);
raid_io, bdev_io->u.bdev.ext_opts);
} else {
ret = spdk_bdev_readv_blocks_with_md(base_info->desc, base_ch,
bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
bdev_io->u.bdev.md_buf,
pd_lba, pd_blocks,
concat_bdev_io_completion, raid_io);
}
} else if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { } else if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
if (bdev_io->u.bdev.ext_opts != NULL) { ret = spdk_bdev_writev_blocks_ext(base_info->desc, base_ch,
ret = spdk_bdev_writev_blocks_ext(base_info->desc, base_ch, bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, pd_lba, pd_blocks, concat_bdev_io_completion,
pd_lba, pd_blocks, concat_bdev_io_completion, raid_io, &io_opts);
raid_io, bdev_io->u.bdev.ext_opts);
} else {
ret = spdk_bdev_writev_blocks_with_md(base_info->desc, base_ch,
bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
bdev_io->u.bdev.md_buf,
pd_lba, pd_blocks,
concat_bdev_io_completion, raid_io);
}
} else { } else {
SPDK_ERRLOG("Recvd not supported io type %u\n", bdev_io->type); SPDK_ERRLOG("Recvd not supported io type %u\n", bdev_io->type);
assert(0); assert(0);

View File

@ -61,6 +61,7 @@ static void
raid0_submit_rw_request(struct raid_bdev_io *raid_io) raid0_submit_rw_request(struct raid_bdev_io *raid_io)
{ {
struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(raid_io); struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(raid_io);
struct spdk_bdev_ext_io_opts io_opts = {};
struct raid_bdev_io_channel *raid_ch = raid_io->raid_ch; struct raid_bdev_io_channel *raid_ch = raid_io->raid_ch;
struct raid_bdev *raid_bdev = raid_io->raid_bdev; struct raid_bdev *raid_bdev = raid_io->raid_bdev;
uint64_t pd_strip; uint64_t pd_strip;
@ -103,32 +104,22 @@ raid0_submit_rw_request(struct raid_bdev_io *raid_io)
assert(raid_ch != NULL); assert(raid_ch != NULL);
assert(raid_ch->base_channel); assert(raid_ch->base_channel);
base_ch = raid_ch->base_channel[pd_idx]; base_ch = raid_ch->base_channel[pd_idx];
io_opts.size = sizeof(io_opts);
io_opts.memory_domain = bdev_io->u.bdev.memory_domain;
io_opts.memory_domain_ctx = bdev_io->u.bdev.memory_domain_ctx;
io_opts.metadata = bdev_io->u.bdev.md_buf;
if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) {
if (bdev_io->u.bdev.ext_opts != NULL) { ret = spdk_bdev_readv_blocks_ext(base_info->desc, base_ch,
ret = spdk_bdev_readv_blocks_ext(base_info->desc, base_ch, bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, pd_lba, pd_blocks, raid0_bdev_io_completion,
pd_lba, pd_blocks, raid0_bdev_io_completion, raid_io, &io_opts);
raid_io, bdev_io->u.bdev.ext_opts);
} else {
ret = spdk_bdev_readv_blocks_with_md(base_info->desc, base_ch,
bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
bdev_io->u.bdev.md_buf,
pd_lba, pd_blocks,
raid0_bdev_io_completion, raid_io);
}
} else if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { } else if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
if (bdev_io->u.bdev.ext_opts != NULL) { ret = spdk_bdev_writev_blocks_ext(base_info->desc, base_ch,
ret = spdk_bdev_writev_blocks_ext(base_info->desc, base_ch, bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, pd_lba, pd_blocks, raid0_bdev_io_completion,
pd_lba, pd_blocks, raid0_bdev_io_completion, raid_io, &io_opts);
raid_io, bdev_io->u.bdev.ext_opts);
} else {
ret = spdk_bdev_writev_blocks_with_md(base_info->desc, base_ch,
bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
bdev_io->u.bdev.md_buf,
pd_lba, pd_blocks,
raid0_bdev_io_completion, raid_io);
}
} else { } else {
SPDK_ERRLOG("Recvd not supported io type %u\n", bdev_io->type); SPDK_ERRLOG("Recvd not supported io type %u\n", bdev_io->type);
assert(0); assert(0);

View File

@ -35,11 +35,22 @@ _raid1_submit_rw_request(void *_raid_io)
raid1_submit_rw_request(raid_io); raid1_submit_rw_request(raid_io);
} }
static void
raid1_init_ext_io_opts(struct spdk_bdev_io *bdev_io, struct spdk_bdev_ext_io_opts *opts)
{
memset(opts, 0, sizeof(*opts));
opts->size = sizeof(*opts);
opts->memory_domain = bdev_io->u.bdev.memory_domain;
opts->memory_domain_ctx = bdev_io->u.bdev.memory_domain_ctx;
opts->metadata = bdev_io->u.bdev.md_buf;
}
static int static int
raid1_submit_read_request(struct raid_bdev_io *raid_io) raid1_submit_read_request(struct raid_bdev_io *raid_io)
{ {
struct raid_bdev *raid_bdev = raid_io->raid_bdev; struct raid_bdev *raid_bdev = raid_io->raid_bdev;
struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(raid_io); struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(raid_io);
struct spdk_bdev_ext_io_opts io_opts;
uint8_t ch_idx = 0; uint8_t ch_idx = 0;
struct raid_base_bdev_info *base_info = &raid_bdev->base_bdev_info[ch_idx]; struct raid_base_bdev_info *base_info = &raid_bdev->base_bdev_info[ch_idx];
struct spdk_io_channel *base_ch = raid_io->raid_ch->base_channel[ch_idx]; struct spdk_io_channel *base_ch = raid_io->raid_ch->base_channel[ch_idx];
@ -51,18 +62,11 @@ raid1_submit_read_request(struct raid_bdev_io *raid_io)
raid_io->base_bdev_io_remaining = 1; raid_io->base_bdev_io_remaining = 1;
if (bdev_io->u.bdev.ext_opts != NULL) { raid1_init_ext_io_opts(bdev_io, &io_opts);
ret = spdk_bdev_readv_blocks_ext(base_info->desc, base_ch, ret = spdk_bdev_readv_blocks_ext(base_info->desc, base_ch,
bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
pd_lba, pd_blocks, raid1_bdev_io_completion, pd_lba, pd_blocks, raid1_bdev_io_completion,
raid_io, bdev_io->u.bdev.ext_opts); raid_io, &io_opts);
} else {
ret = spdk_bdev_readv_blocks_with_md(base_info->desc, base_ch,
bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
bdev_io->u.bdev.md_buf,
pd_lba, pd_blocks,
raid1_bdev_io_completion, raid_io);
}
if (spdk_likely(ret == 0)) { if (spdk_likely(ret == 0)) {
raid_io->base_bdev_io_submitted++; raid_io->base_bdev_io_submitted++;
@ -80,6 +84,7 @@ raid1_submit_write_request(struct raid_bdev_io *raid_io)
{ {
struct raid_bdev *raid_bdev = raid_io->raid_bdev; struct raid_bdev *raid_bdev = raid_io->raid_bdev;
struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(raid_io); struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(raid_io);
struct spdk_bdev_ext_io_opts io_opts;
struct raid_base_bdev_info *base_info; struct raid_base_bdev_info *base_info;
struct spdk_io_channel *base_ch; struct spdk_io_channel *base_ch;
uint64_t pd_lba, pd_blocks; uint64_t pd_lba, pd_blocks;
@ -94,23 +99,15 @@ raid1_submit_write_request(struct raid_bdev_io *raid_io)
raid_io->base_bdev_io_remaining = raid_bdev->num_base_bdevs; raid_io->base_bdev_io_remaining = raid_bdev->num_base_bdevs;
} }
raid1_init_ext_io_opts(bdev_io, &io_opts);
for (; idx < raid_bdev->num_base_bdevs; idx++) { for (; idx < raid_bdev->num_base_bdevs; idx++) {
base_info = &raid_bdev->base_bdev_info[idx]; base_info = &raid_bdev->base_bdev_info[idx];
base_ch = raid_io->raid_ch->base_channel[idx]; base_ch = raid_io->raid_ch->base_channel[idx];
if (bdev_io->u.bdev.ext_opts != NULL) { ret = spdk_bdev_writev_blocks_ext(base_info->desc, base_ch,
ret = spdk_bdev_writev_blocks_ext(base_info->desc, base_ch, bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, pd_lba, pd_blocks, raid1_bdev_io_completion,
pd_lba, pd_blocks, raid1_bdev_io_completion, raid_io, &io_opts);
raid_io, bdev_io->u.bdev.ext_opts);
} else {
ret = spdk_bdev_writev_blocks_with_md(base_info->desc, base_ch,
bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
bdev_io->u.bdev.md_buf,
pd_lba, pd_blocks,
raid1_bdev_io_completion, raid_io);
}
if (spdk_unlikely(ret != 0)) { if (spdk_unlikely(ret != 0)) {
if (spdk_unlikely(ret == -ENOMEM)) { if (spdk_unlikely(ret == -ENOMEM)) {
raid_bdev_queue_io_wait(raid_io, base_info->bdev, base_ch, raid_bdev_queue_io_wait(raid_io, base_info->bdev, base_ch,

View File

@ -273,11 +273,13 @@ raid5f_chunk_write_retry(void *_raid_io)
} }
static inline void static inline void
copy_ext_io_opts(struct spdk_bdev_ext_io_opts *dst, struct spdk_bdev_ext_io_opts *src) raid5f_init_ext_io_opts(struct spdk_bdev_io *bdev_io, struct spdk_bdev_ext_io_opts *opts)
{ {
memset(dst, 0, sizeof(*dst)); memset(opts, 0, sizeof(*opts));
memcpy(dst, src, src->size); opts->size = sizeof(*opts);
dst->size = sizeof(*dst); opts->memory_domain = bdev_io->u.bdev.memory_domain;
opts->memory_domain_ctx = bdev_io->u.bdev.memory_domain_ctx;
opts->metadata = bdev_io->u.bdev.md_buf;
} }
static int static int
@ -292,18 +294,12 @@ raid5f_chunk_write(struct chunk *chunk)
uint64_t base_offset_blocks = (stripe_req->stripe_index << raid_bdev->strip_size_shift); uint64_t base_offset_blocks = (stripe_req->stripe_index << raid_bdev->strip_size_shift);
int ret; int ret;
if (bdev_io->u.bdev.ext_opts != NULL) { raid5f_init_ext_io_opts(bdev_io, &chunk->ext_opts);
copy_ext_io_opts(&chunk->ext_opts, bdev_io->u.bdev.ext_opts); chunk->ext_opts.metadata = chunk->md_buf;
chunk->ext_opts.metadata = chunk->md_buf;
ret = spdk_bdev_writev_blocks_ext(base_info->desc, base_ch, chunk->iovs, chunk->iovcnt, ret = spdk_bdev_writev_blocks_ext(base_info->desc, base_ch, chunk->iovs, chunk->iovcnt,
base_offset_blocks, raid_bdev->strip_size, raid5f_chunk_write_complete_bdev_io, base_offset_blocks, raid_bdev->strip_size, raid5f_chunk_write_complete_bdev_io,
chunk, &chunk->ext_opts); chunk, &chunk->ext_opts);
} else {
ret = spdk_bdev_writev_blocks_with_md(base_info->desc, base_ch, chunk->iovs, chunk->iovcnt,
chunk->md_buf, base_offset_blocks, raid_bdev->strip_size,
raid5f_chunk_write_complete_bdev_io, chunk);
}
if (spdk_unlikely(ret)) { if (spdk_unlikely(ret)) {
if (ret == -ENOMEM) { if (ret == -ENOMEM) {
@ -498,20 +494,14 @@ raid5f_submit_read_request(struct raid_bdev_io *raid_io, uint64_t stripe_index,
uint64_t chunk_offset = stripe_offset - (chunk_data_idx << raid_bdev->strip_size_shift); uint64_t chunk_offset = stripe_offset - (chunk_data_idx << raid_bdev->strip_size_shift);
uint64_t base_offset_blocks = (stripe_index << raid_bdev->strip_size_shift) + chunk_offset; uint64_t base_offset_blocks = (stripe_index << raid_bdev->strip_size_shift) + chunk_offset;
struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(raid_io); struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(raid_io);
struct spdk_bdev_ext_io_opts io_opts;
int ret; int ret;
if (bdev_io->u.bdev.ext_opts != NULL) { raid5f_init_ext_io_opts(bdev_io, &io_opts);
ret = spdk_bdev_readv_blocks_ext(base_info->desc, base_ch, bdev_io->u.bdev.iovs, ret = spdk_bdev_readv_blocks_ext(base_info->desc, base_ch, bdev_io->u.bdev.iovs,
bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.iovcnt,
base_offset_blocks, bdev_io->u.bdev.num_blocks, raid5f_chunk_read_complete, raid_io, base_offset_blocks, bdev_io->u.bdev.num_blocks, raid5f_chunk_read_complete, raid_io,
bdev_io->u.bdev.ext_opts); &io_opts);
} else {
ret = spdk_bdev_readv_blocks_with_md(base_info->desc, base_ch,
bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
bdev_io->u.bdev.md_buf,
base_offset_blocks, bdev_io->u.bdev.num_blocks,
raid5f_chunk_read_complete, raid_io);
}
if (spdk_unlikely(ret == -ENOMEM)) { if (spdk_unlikely(ret == -ENOMEM)) {
raid_bdev_queue_io_wait(raid_io, base_info->bdev, base_ch, raid_bdev_queue_io_wait(raid_io, base_info->bdev, base_ch,

View File

@ -169,25 +169,27 @@ bdev_blob_writev(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
} }
} }
static inline void
blob_ext_io_opts_to_bdev_opts(struct spdk_bdev_ext_io_opts *dst, struct spdk_blob_ext_io_opts *src)
{
memset(dst, 0, sizeof(*dst));
dst->size = sizeof(*dst);
dst->memory_domain = src->memory_domain;
dst->memory_domain_ctx = src->memory_domain_ctx;
}
static void static void
bdev_blob_readv_ext(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, bdev_blob_readv_ext(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
struct iovec *iov, int iovcnt, struct iovec *iov, int iovcnt,
uint64_t lba, uint32_t lba_count, struct spdk_bs_dev_cb_args *cb_args, uint64_t lba, uint32_t lba_count, struct spdk_bs_dev_cb_args *cb_args,
struct spdk_blob_ext_io_opts *io_opts) struct spdk_blob_ext_io_opts *io_opts)
{ {
struct spdk_bdev_ext_io_opts *bdev_io_opts = NULL; struct spdk_bdev_ext_io_opts bdev_io_opts;
int rc; int rc;
if (io_opts) { blob_ext_io_opts_to_bdev_opts(&bdev_io_opts, io_opts);
/* bdev ext API requires ext_io_opts to be allocated by the user, we don't have enough context to allocate
* bdev ext_opts structure here. Also blob and bdev ext_opts are not API/ABI compatible, so we can't use the given
* io_opts. Restore ext_opts passed by the user of this bs_dev */
bdev_io_opts = io_opts->user_ctx;
assert(bdev_io_opts);
}
rc = spdk_bdev_readv_blocks_ext(__get_desc(dev), channel, iov, iovcnt, lba, lba_count, rc = spdk_bdev_readv_blocks_ext(__get_desc(dev), channel, iov, iovcnt, lba, lba_count,
bdev_blob_io_complete, cb_args, bdev_io_opts); bdev_blob_io_complete, cb_args, &bdev_io_opts);
if (rc == -ENOMEM) { if (rc == -ENOMEM) {
bdev_blob_queue_io(dev, channel, iov, iovcnt, lba, 0, lba_count, SPDK_BDEV_IO_TYPE_READ, cb_args, bdev_blob_queue_io(dev, channel, iov, iovcnt, lba, 0, lba_count, SPDK_BDEV_IO_TYPE_READ, cb_args,
io_opts); io_opts);
@ -202,19 +204,12 @@ bdev_blob_writev_ext(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
uint64_t lba, uint32_t lba_count, struct spdk_bs_dev_cb_args *cb_args, uint64_t lba, uint32_t lba_count, struct spdk_bs_dev_cb_args *cb_args,
struct spdk_blob_ext_io_opts *io_opts) struct spdk_blob_ext_io_opts *io_opts)
{ {
struct spdk_bdev_ext_io_opts *bdev_io_opts = NULL; struct spdk_bdev_ext_io_opts bdev_io_opts;
int rc; int rc;
if (io_opts) { blob_ext_io_opts_to_bdev_opts(&bdev_io_opts, io_opts);
/* bdev ext API requires ext_io_opts to be allocated by the user, we don't have enough context to allocate
* bdev ext_opts structure here. Also blob and bdev ext_opts are not API/ABI compatible, so we can't use the given
* io_opts. Restore ext_opts passed by the user of this bs_dev */
bdev_io_opts = io_opts->user_ctx;
assert(bdev_io_opts);
}
rc = spdk_bdev_writev_blocks_ext(__get_desc(dev), channel, iov, iovcnt, lba, lba_count, rc = spdk_bdev_writev_blocks_ext(__get_desc(dev), channel, iov, iovcnt, lba, lba_count,
bdev_blob_io_complete, cb_args, bdev_io_opts); bdev_blob_io_complete, cb_args, &bdev_io_opts);
if (rc == -ENOMEM) { if (rc == -ENOMEM) {
bdev_blob_queue_io(dev, channel, iov, iovcnt, lba, 0, lba_count, SPDK_BDEV_IO_TYPE_WRITE, cb_args, bdev_blob_queue_io(dev, channel, iov, iovcnt, lba, 0, lba_count, SPDK_BDEV_IO_TYPE_WRITE, cb_args,
io_opts); io_opts);

View File

@ -91,8 +91,6 @@ struct ut_expected_io {
int iovcnt; int iovcnt;
struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV]; struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV];
void *md_buf; void *md_buf;
struct spdk_bdev_ext_io_opts *ext_io_opts;
bool copy_opts;
TAILQ_ENTRY(ut_expected_io) link; TAILQ_ENTRY(ut_expected_io) link;
}; };
@ -282,25 +280,6 @@ stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
if (expected_io->md_buf != NULL) { if (expected_io->md_buf != NULL) {
CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf); CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf);
if (bdev_io->u.bdev.ext_opts) {
CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.ext_opts->metadata);
}
}
if (expected_io->copy_opts) {
if (expected_io->ext_io_opts) {
/* opts are not NULL so it should have been copied */
CU_ASSERT(expected_io->ext_io_opts != bdev_io->u.bdev.ext_opts);
CU_ASSERT(bdev_io->u.bdev.ext_opts == &bdev_io->internal.ext_opts_copy);
/* internal opts always points to opts passed */
CU_ASSERT(expected_io->ext_io_opts == bdev_io->internal.ext_opts);
} else {
/* passed opts was NULL so we expect bdev_io opts to be NULL */
CU_ASSERT(bdev_io->u.bdev.ext_opts == NULL);
}
} else {
/* opts were not copied so they should be equal */
CU_ASSERT(expected_io->ext_io_opts == bdev_io->u.bdev.ext_opts);
} }
if (expected_io->length == 0) { if (expected_io->length == 0) {
@ -5555,7 +5534,6 @@ _bdev_io_ext(struct spdk_bdev_ext_io_opts *ext_io_opts)
expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1);
if (ext_io_opts) { if (ext_io_opts) {
expected_io->md_buf = ext_io_opts->metadata; expected_io->md_buf = ext_io_opts->metadata;
expected_io->ext_io_opts = ext_io_opts;
} }
ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len);
TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
@ -5573,7 +5551,6 @@ _bdev_io_ext(struct spdk_bdev_ext_io_opts *ext_io_opts)
expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1);
if (ext_io_opts) { if (ext_io_opts) {
expected_io->md_buf = ext_io_opts->metadata; expected_io->md_buf = ext_io_opts->metadata;
expected_io->ext_io_opts = ext_io_opts;
} }
ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len);
TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
@ -5711,15 +5688,11 @@ bdev_io_ext_split(void)
/* read */ /* read */
expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
expected_io->md_buf = ext_io_opts.metadata; expected_io->md_buf = ext_io_opts.metadata;
expected_io->ext_io_opts = &ext_io_opts;
expected_io->copy_opts = true;
ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
expected_io->md_buf = ext_io_opts.metadata + 2 * 8; expected_io->md_buf = ext_io_opts.metadata + 2 * 8;
expected_io->ext_io_opts = &ext_io_opts;
expected_io->copy_opts = true;
ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
@ -5736,15 +5709,11 @@ bdev_io_ext_split(void)
g_io_done = false; g_io_done = false;
expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1);
expected_io->md_buf = ext_io_opts.metadata; expected_io->md_buf = ext_io_opts.metadata;
expected_io->ext_io_opts = &ext_io_opts;
expected_io->copy_opts = true;
ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 6, 1); expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 6, 1);
expected_io->md_buf = ext_io_opts.metadata + 2 * 8; expected_io->md_buf = ext_io_opts.metadata + 2 * 8;
expected_io->ext_io_opts = &ext_io_opts;
expected_io->copy_opts = true;
ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
@ -5799,8 +5768,6 @@ bdev_io_ext_bounce_buffer(void)
g_io_done = false; g_io_done = false;
expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1);
ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len);
expected_io->ext_io_opts = &ext_io_opts;
expected_io->copy_opts = true;
TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
@ -5816,8 +5783,6 @@ bdev_io_ext_bounce_buffer(void)
g_io_done = false; g_io_done = false;
expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1);
ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len);
expected_io->ext_io_opts = &ext_io_opts;
expected_io->copy_opts = true;
TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);

View File

@ -2315,7 +2315,6 @@ test_submit_nvme_cmd(void)
struct nvme_bdev *bdev; struct nvme_bdev *bdev;
struct spdk_bdev_io *bdev_io; struct spdk_bdev_io *bdev_io;
struct spdk_io_channel *ch; struct spdk_io_channel *ch;
struct spdk_bdev_ext_io_opts ext_io_opts = {};
int rc; int rc;
memset(attached_names, 0, sizeof(char *) * STRING_SIZE); memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
@ -2364,19 +2363,12 @@ test_submit_nvme_cmd(void)
ut_test_submit_fused_nvme_cmd(ch, bdev_io); ut_test_submit_fused_nvme_cmd(ch, bdev_io);
/* Verify that ext NVME API is called if bdev_io ext_opts is set */ /* Verify that ext NVME API is called */
bdev_io->u.bdev.ext_opts = &ext_io_opts;
g_ut_readv_ext_called = false; g_ut_readv_ext_called = false;
ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ);
CU_ASSERT(g_ut_readv_ext_called == true); CU_ASSERT(g_ut_readv_ext_called == true);
g_ut_readv_ext_called = false; g_ut_readv_ext_called = false;
g_ut_writev_ext_called = false;
ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE);
CU_ASSERT(g_ut_writev_ext_called == true);
g_ut_writev_ext_called = false;
bdev_io->u.bdev.ext_opts = NULL;
ut_test_submit_admin_cmd(ch, bdev_io, ctrlr); ut_test_submit_admin_cmd(ch, bdev_io, ctrlr);
free(bdev_io); free(bdev_io);

View File

@ -270,12 +270,6 @@ bdev_io_cleanup(struct spdk_bdev_io *bdev_io)
free(bdev_io->u.bdev.iovs); free(bdev_io->u.bdev.iovs);
} }
if (bdev_io->u.bdev.ext_opts) {
if (bdev_io->u.bdev.ext_opts->metadata) {
bdev_io->u.bdev.ext_opts->metadata = NULL;
}
free(bdev_io->u.bdev.ext_opts);
}
free(bdev_io); free(bdev_io);
} }
@ -301,9 +295,7 @@ bdev_io_initialize(struct spdk_bdev_io *bdev_io, struct spdk_io_channel *ch, str
SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs->iov_base != NULL); SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs->iov_base != NULL);
bdev_io->u.bdev.iovs->iov_len = bdev_io->u.bdev.num_blocks * BLOCK_LEN; bdev_io->u.bdev.iovs->iov_len = bdev_io->u.bdev.num_blocks * BLOCK_LEN;
bdev_io->internal.ch = channel; bdev_io->internal.ch = channel;
bdev_io->u.bdev.ext_opts = calloc(1, sizeof(struct spdk_bdev_ext_io_opts)); bdev_io->u.bdev.md_buf = (void *)0xAEDFEBAC;
SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.ext_opts != NULL);
bdev_io->u.bdev.ext_opts->metadata = (void *)0xAEDFEBAC;
} }
static void static void

View File

@ -15,14 +15,6 @@
DEFINE_STUB_V(raid_bdev_module_list_add, (struct raid_bdev_module *raid_module)); DEFINE_STUB_V(raid_bdev_module_list_add, (struct raid_bdev_module *raid_module));
DEFINE_STUB(spdk_bdev_get_buf_align, size_t, (const struct spdk_bdev *bdev), 0); DEFINE_STUB(spdk_bdev_get_buf_align, size_t, (const struct spdk_bdev *bdev), 0);
DEFINE_STUB_V(raid_bdev_module_stop_done, (struct raid_bdev *raid_bdev)); DEFINE_STUB_V(raid_bdev_module_stop_done, (struct raid_bdev *raid_bdev));
DEFINE_STUB(spdk_bdev_readv_blocks_ext, int, (struct spdk_bdev_desc *desc,
struct spdk_io_channel *ch,
struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
spdk_bdev_io_completion_cb cb, void *cb_arg, struct spdk_bdev_ext_io_opts *opts), 0);
DEFINE_STUB(spdk_bdev_writev_blocks_ext, int, (struct spdk_bdev_desc *desc,
struct spdk_io_channel *ch,
struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
spdk_bdev_io_completion_cb cb, void *cb_arg, struct spdk_bdev_ext_io_opts *opts), 0);
void * void *
spdk_bdev_io_get_md_buf(struct spdk_bdev_io *bdev_io) spdk_bdev_io_get_md_buf(struct spdk_bdev_io *bdev_io)
@ -446,6 +438,19 @@ spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
cb_arg); cb_arg);
} }
int
spdk_bdev_writev_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
struct iovec *iov, int iovcnt, uint64_t offset_blocks,
uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg,
struct spdk_bdev_ext_io_opts *opts)
{
CU_ASSERT_PTR_NULL(opts->memory_domain);
CU_ASSERT_PTR_NULL(opts->memory_domain_ctx);
return spdk_bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, opts->metadata, offset_blocks,
num_blocks, cb, cb_arg);
}
int int
spdk_bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, spdk_bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
struct iovec *iov, int iovcnt, void *md_buf, struct iovec *iov, int iovcnt, void *md_buf,
@ -479,6 +484,19 @@ spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
cb_arg); cb_arg);
} }
int
spdk_bdev_readv_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
struct iovec *iov, int iovcnt, uint64_t offset_blocks,
uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg,
struct spdk_bdev_ext_io_opts *opts)
{
CU_ASSERT_PTR_NULL(opts->memory_domain);
CU_ASSERT_PTR_NULL(opts->memory_domain_ctx);
return spdk_bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, opts->metadata, offset_blocks,
num_blocks, cb, cb_arg);
}
static void static void
xor_block(uint8_t *a, uint8_t *b, size_t size) xor_block(uint8_t *a, uint8_t *b, size_t size)
{ {
@ -681,6 +699,9 @@ test_raid5f_submit_rw_request(struct raid5f_info *r5f_info, struct raid_bdev_io_
CU_FAIL_FATAL("unsupported io_type"); CU_FAIL_FATAL("unsupported io_type");
} }
assert(io_info.status == SPDK_BDEV_IO_STATUS_SUCCESS);
assert(memcmp(io_info.src_buf, io_info.dest_buf, io_info.buf_size) == 0);
CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_SUCCESS); CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_SUCCESS);
CU_ASSERT(memcmp(io_info.src_buf, io_info.dest_buf, io_info.buf_size) == 0); CU_ASSERT(memcmp(io_info.src_buf, io_info.dest_buf, io_info.buf_size) == 0);

View File

@ -1419,8 +1419,6 @@ ut_vbdev_lvol_io_type_supported(void)
static void static void
ut_lvol_read_write(void) ut_lvol_read_write(void)
{ {
struct spdk_bdev_ext_io_opts bdev_ext_opts = {};
g_io = calloc(1, sizeof(struct spdk_bdev_io) + vbdev_lvs_get_ctx_size()); g_io = calloc(1, sizeof(struct spdk_bdev_io) + vbdev_lvs_get_ctx_size());
SPDK_CU_ASSERT_FATAL(g_io != NULL); SPDK_CU_ASSERT_FATAL(g_io != NULL);
g_base_bdev = calloc(1, sizeof(struct spdk_bdev)); g_base_bdev = calloc(1, sizeof(struct spdk_bdev));
@ -1440,8 +1438,6 @@ ut_lvol_read_write(void)
CU_ASSERT(g_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); CU_ASSERT(g_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
g_ext_api_called = false; g_ext_api_called = false;
g_io->u.bdev.ext_opts = &bdev_ext_opts;
lvol_read(g_ch, g_io); lvol_read(g_ch, g_io);
CU_ASSERT(g_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); CU_ASSERT(g_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
CU_ASSERT(g_ext_api_called == true); CU_ASSERT(g_ext_api_called == true);