bdev: keep IOs on the io_memory_domain queue during pull/push
The IOs are now kept on the io_memory_domain queue only if they have an outstanding pull/push operation. It'll make it easier to support retrying pull/push in case of ENOMEM. Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com> Change-Id: If5a54fac532206ee8472bacf364a5ef6cde8edea Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/17677 Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Community-CI: Mellanox Build Bot Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
parent
72345f3a69
commit
417d3f6738
@ -1084,23 +1084,36 @@ _bdev_io_pull_buffer_cpl(void *ctx, int rc)
|
||||
bdev_io_get_buf_complete(bdev_io, !rc);
|
||||
}
|
||||
|
||||
static void
|
||||
bdev_io_pull_md_buf_done(void *ctx, int status)
|
||||
{
|
||||
struct spdk_bdev_io *bdev_io = ctx;
|
||||
struct spdk_bdev_channel *ch = bdev_io->internal.ch;
|
||||
|
||||
TAILQ_REMOVE(&ch->io_memory_domain, bdev_io, internal.link);
|
||||
assert(bdev_io->internal.data_transfer_cpl);
|
||||
bdev_io->internal.data_transfer_cpl(bdev_io, status);
|
||||
}
|
||||
|
||||
static void
|
||||
bdev_io_pull_md_buf(struct spdk_bdev_io *bdev_io)
|
||||
{
|
||||
struct spdk_bdev_channel *ch = bdev_io->internal.ch;
|
||||
int rc = 0;
|
||||
|
||||
if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
|
||||
if (bdev_io_use_memory_domain(bdev_io)) {
|
||||
TAILQ_INSERT_TAIL(&ch->io_memory_domain, bdev_io, internal.link);
|
||||
rc = spdk_memory_domain_pull_data(bdev_io->internal.memory_domain,
|
||||
bdev_io->internal.memory_domain_ctx,
|
||||
&bdev_io->internal.orig_md_iov, 1,
|
||||
&bdev_io->internal.bounce_md_iov, 1,
|
||||
bdev_io->internal.data_transfer_cpl,
|
||||
bdev_io);
|
||||
bdev_io_pull_md_buf_done, bdev_io);
|
||||
if (rc == 0) {
|
||||
/* Continue to submit IO in completion callback */
|
||||
return;
|
||||
}
|
||||
TAILQ_REMOVE(&ch->io_memory_domain, bdev_io, internal.link);
|
||||
SPDK_ERRLOG("Failed to pull data from memory domain %s, rc %d\n",
|
||||
spdk_memory_domain_get_dma_device_id(bdev_io->internal.memory_domain), rc);
|
||||
} else {
|
||||
@ -1154,8 +1167,8 @@ _bdev_io_set_md_buf(struct spdk_bdev_io *bdev_io)
|
||||
bdev_io_get_buf_complete(bdev_io, true);
|
||||
}
|
||||
|
||||
static void
|
||||
_bdev_io_pull_bounce_data_buf_done(void *ctx, int rc)
|
||||
static inline void
|
||||
bdev_io_pull_bounce_data_buf_done(void *ctx, int rc)
|
||||
{
|
||||
struct spdk_bdev_io *bdev_io = ctx;
|
||||
|
||||
@ -1169,6 +1182,16 @@ _bdev_io_pull_bounce_data_buf_done(void *ctx, int rc)
|
||||
_bdev_io_set_md_buf(bdev_io);
|
||||
}
|
||||
|
||||
static void
|
||||
_bdev_io_pull_bounce_data_buf_done(void *ctx, int status)
|
||||
{
|
||||
struct spdk_bdev_io *bdev_io = ctx;
|
||||
struct spdk_bdev_channel *ch = bdev_io->internal.ch;
|
||||
|
||||
TAILQ_REMOVE(&ch->io_memory_domain, bdev_io, internal.link);
|
||||
bdev_io_pull_bounce_data_buf_done(ctx, status);
|
||||
}
|
||||
|
||||
static void
|
||||
bdev_io_pull_data(struct spdk_bdev_io *bdev_io)
|
||||
{
|
||||
@ -1206,6 +1229,7 @@ bdev_io_pull_data(struct spdk_bdev_io *bdev_io)
|
||||
} else if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
|
||||
/* if this is write path, copy data from original buffer to bounce buffer */
|
||||
if (bdev_io_use_memory_domain(bdev_io)) {
|
||||
TAILQ_INSERT_TAIL(&ch->io_memory_domain, bdev_io, internal.link);
|
||||
rc = spdk_memory_domain_pull_data(bdev_io->internal.memory_domain,
|
||||
bdev_io->internal.memory_domain_ctx,
|
||||
bdev_io->internal.orig_iovs,
|
||||
@ -1217,6 +1241,7 @@ bdev_io_pull_data(struct spdk_bdev_io *bdev_io)
|
||||
/* Continue to submit IO in completion callback */
|
||||
return;
|
||||
}
|
||||
TAILQ_REMOVE(&ch->io_memory_domain, bdev_io, internal.link);
|
||||
SPDK_ERRLOG("Failed to pull data from memory domain %s\n",
|
||||
spdk_memory_domain_get_dma_device_id(bdev_io->internal.memory_domain));
|
||||
} else {
|
||||
@ -1228,7 +1253,7 @@ bdev_io_pull_data(struct spdk_bdev_io *bdev_io)
|
||||
}
|
||||
}
|
||||
|
||||
_bdev_io_pull_bounce_data_buf_done(bdev_io, rc);
|
||||
bdev_io_pull_bounce_data_buf_done(bdev_io, rc);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1625,10 +1650,6 @@ static void
|
||||
_bdev_memory_domain_get_io_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
|
||||
bool success)
|
||||
{
|
||||
struct spdk_bdev_channel *bdev_ch = bdev_io->internal.ch;
|
||||
|
||||
TAILQ_REMOVE(&bdev_ch->io_memory_domain, bdev_io, internal.link);
|
||||
|
||||
if (!success) {
|
||||
SPDK_ERRLOG("Failed to get data buffer, completing IO\n");
|
||||
bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
|
||||
@ -3348,8 +3369,6 @@ bdev_io_submit(struct spdk_bdev_io *bdev_io)
|
||||
static inline void
|
||||
_bdev_io_ext_use_bounce_buffer(struct spdk_bdev_io *bdev_io)
|
||||
{
|
||||
struct spdk_bdev_channel *ch = bdev_io->internal.ch;
|
||||
|
||||
/* bdev doesn't support memory domains, thereby buffers in this IO request can't
|
||||
* be accessed directly. It is needed to allocate buffers before issuing IO operation.
|
||||
* For write operation we need to pull buffers from memory domain before submitting IO.
|
||||
@ -3358,7 +3377,6 @@ _bdev_io_ext_use_bounce_buffer(struct spdk_bdev_io *bdev_io)
|
||||
* This IO request will go through a regular IO flow, so clear memory domains pointers */
|
||||
bdev_io->u.bdev.memory_domain = NULL;
|
||||
bdev_io->u.bdev.memory_domain_ctx = NULL;
|
||||
TAILQ_INSERT_TAIL(&ch->io_memory_domain, bdev_io, internal.link);
|
||||
_bdev_memory_domain_io_get_buf(bdev_io, _bdev_memory_domain_get_io_cb,
|
||||
bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user