bdev/nvme: Factor out request submit functions into a helper function
The following patches will change I/O retry to use the same io_path if it is still available. However, bdev_nvme_submit_request() always calls bdev_nvme_find_io_path() first. For I/O retry, if possible, we want to skip calling bdev_nvme_find_io_path() and use nbdev_io->io_path instead. To reuse the code as much as possible and not to touch the fast code path, factor out request submit functions from bdev_nvme_submit_request() into _bdev_nvme_submit_request(). While developing this patch, a bug/mismatch was found such that bdev_io->internal.ch was different from ch of bdev_nvme_submit_request(). Fix it together in this patch. Signed-off-by: Shuhei Matsumoto <smatsumoto@nvidia.com> Change-Id: Id003e033ecde218d1902bca5706c772edef5d5e5 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/16013 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
parent
2796687d54
commit
21160add26
@ -2111,28 +2111,14 @@ exit:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static inline void
|
||||||
bdev_nvme_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
|
_bdev_nvme_submit_request(struct nvme_bdev_channel *nbdev_ch, struct spdk_bdev_io *bdev_io)
|
||||||
{
|
{
|
||||||
struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
|
|
||||||
struct spdk_bdev *bdev = bdev_io->bdev;
|
|
||||||
struct nvme_bdev_io *nbdev_io = (struct nvme_bdev_io *)bdev_io->driver_ctx;
|
struct nvme_bdev_io *nbdev_io = (struct nvme_bdev_io *)bdev_io->driver_ctx;
|
||||||
|
struct spdk_bdev *bdev = bdev_io->bdev;
|
||||||
struct nvme_bdev_io *nbdev_io_to_abort;
|
struct nvme_bdev_io *nbdev_io_to_abort;
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
|
||||||
spdk_trace_record(TRACE_BDEV_NVME_IO_START, 0, 0, (uintptr_t)nbdev_io, (uintptr_t)bdev_io);
|
|
||||||
nbdev_io->io_path = bdev_nvme_find_io_path(nbdev_ch);
|
|
||||||
if (spdk_unlikely(!nbdev_io->io_path)) {
|
|
||||||
if (!bdev_nvme_io_type_is_admin(bdev_io->type)) {
|
|
||||||
rc = -ENXIO;
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Admin commands do not use the optimal I/O path.
|
|
||||||
* Simply fall through even if it is not found.
|
|
||||||
*/
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (bdev_io->type) {
|
switch (bdev_io->type) {
|
||||||
case SPDK_BDEV_IO_TYPE_READ:
|
case SPDK_BDEV_IO_TYPE_READ:
|
||||||
if (bdev_io->u.bdev.iovs && bdev_io->u.bdev.iovs[0].iov_base) {
|
if (bdev_io->u.bdev.iovs && bdev_io->u.bdev.iovs[0].iov_base) {
|
||||||
@ -2257,12 +2243,33 @@ bdev_nvme_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_i
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
exit:
|
|
||||||
if (spdk_unlikely(rc != 0)) {
|
if (spdk_unlikely(rc != 0)) {
|
||||||
bdev_nvme_io_complete(nbdev_io, rc);
|
bdev_nvme_io_complete(nbdev_io, rc);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
bdev_nvme_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
|
||||||
|
{
|
||||||
|
struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
|
||||||
|
struct nvme_bdev_io *nbdev_io = (struct nvme_bdev_io *)bdev_io->driver_ctx;
|
||||||
|
|
||||||
|
spdk_trace_record(TRACE_BDEV_NVME_IO_START, 0, 0, (uintptr_t)nbdev_io, (uintptr_t)bdev_io);
|
||||||
|
nbdev_io->io_path = bdev_nvme_find_io_path(nbdev_ch);
|
||||||
|
if (spdk_unlikely(!nbdev_io->io_path)) {
|
||||||
|
if (!bdev_nvme_io_type_is_admin(bdev_io->type)) {
|
||||||
|
bdev_nvme_io_complete(nbdev_io, -ENXIO);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Admin commands do not use the optimal I/O path.
|
||||||
|
* Simply fall through even if it is not found.
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
|
||||||
|
_bdev_nvme_submit_request(nbdev_ch, bdev_io);
|
||||||
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
bdev_nvme_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type)
|
bdev_nvme_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type)
|
||||||
{
|
{
|
||||||
|
@ -1858,9 +1858,6 @@ test_pending_reset(void)
|
|||||||
ctrlr_ch1 = io_path1->qpair->ctrlr_ch;
|
ctrlr_ch1 = io_path1->qpair->ctrlr_ch;
|
||||||
SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
|
SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
|
||||||
|
|
||||||
first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
|
|
||||||
first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
|
|
||||||
|
|
||||||
set_thread(1);
|
set_thread(1);
|
||||||
|
|
||||||
ch2 = spdk_get_io_channel(bdev);
|
ch2 = spdk_get_io_channel(bdev);
|
||||||
@ -1872,7 +1869,10 @@ test_pending_reset(void)
|
|||||||
ctrlr_ch2 = io_path2->qpair->ctrlr_ch;
|
ctrlr_ch2 = io_path2->qpair->ctrlr_ch;
|
||||||
SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
|
SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
|
||||||
|
|
||||||
second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
|
first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
|
||||||
|
first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
|
||||||
|
|
||||||
|
second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
|
||||||
second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
|
second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
|
||||||
|
|
||||||
/* The first reset request is submitted on thread 1, and the second reset request
|
/* The first reset request is submitted on thread 1, and the second reset request
|
||||||
|
Loading…
Reference in New Issue
Block a user