From 7b8e7212a6b9541dda27a83042cc40a76c29a58c Mon Sep 17 00:00:00 2001 From: Shuhei Matsumoto Date: Fri, 22 Oct 2021 05:15:09 +0900 Subject: [PATCH] bdev/nvme: Abort the queued I/O for retry The NVMe bdev module queues retried I/Os itself now. bdev_nvme_abort() needs to check and abort the target I/O if it is queued for retry. This change will cover admin passthrough requests too because they will be queued on the same thread as their callers and the public API spdk_bdev_reset() requires to be submitted on the same thread as the target I/O or admin passthrough requests. Signed-off-by: Shuhei Matsumoto Change-Id: If37e8188bd3875805cef436437439220698124b9 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/9913 Community-CI: Broadcom CI Community-CI: Mellanox Build Bot Tested-by: SPDK CI Jenkins Reviewed-by: Ben Walker Reviewed-by: Aleksey Marchuk --- module/bdev/nvme/bdev_nvme.c | 17 ++++++++-- .../lib/bdev/nvme/bdev_nvme.c/bdev_nvme_ut.c | 31 +++++++++++++++++++ 2 files changed, 45 insertions(+), 3 deletions(-) diff --git a/module/bdev/nvme/bdev_nvme.c b/module/bdev/nvme/bdev_nvme.c index 19779a59f..09af86898 100644 --- a/module/bdev/nvme/bdev_nvme.c +++ b/module/bdev/nvme/bdev_nvme.c @@ -4687,14 +4687,25 @@ static void bdev_nvme_abort(struct nvme_bdev_channel *nbdev_ch, struct nvme_bdev_io *bio, struct nvme_bdev_io *bio_to_abort) { + struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(bio); + struct spdk_bdev_io *bdev_io_to_abort; struct nvme_io_path *io_path; struct nvme_ctrlr *nvme_ctrlr; int rc = 0; - bio->io_path = NULL; - bio->orig_thread = spdk_get_thread(); + /* Traverse the retry_io_list first. */ + TAILQ_FOREACH(bdev_io_to_abort, &nbdev_ch->retry_io_list, module_link) { + if ((struct nvme_bdev_io *)bdev_io_to_abort->driver_ctx == bio_to_abort) { + TAILQ_REMOVE(&nbdev_ch->retry_io_list, bdev_io_to_abort, module_link); + spdk_bdev_io_complete(bdev_io_to_abort, SPDK_BDEV_IO_STATUS_ABORTED); + + spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS); + return; + } + } + /* Even admin commands, they were submitted to only nvme_ctrlrs which were * on any io_path. So traverse the io_path list for not only I/O commands * but also admin commands. @@ -4725,7 +4736,7 @@ bdev_nvme_abort(struct nvme_bdev_channel *nbdev_ch, struct nvme_bdev_io *bio, /* If no command was found or there was any error, complete the abort * request with failure. */ - spdk_bdev_io_complete(spdk_bdev_io_from_ctx(bio), SPDK_BDEV_IO_STATUS_FAILED); + spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); } } diff --git a/test/unit/lib/bdev/nvme/bdev_nvme.c/bdev_nvme_ut.c b/test/unit/lib/bdev/nvme/bdev_nvme.c/bdev_nvme_ut.c index de01de17a..027d13775 100644 --- a/test/unit/lib/bdev/nvme/bdev_nvme.c/bdev_nvme_ut.c +++ b/test/unit/lib/bdev/nvme/bdev_nvme.c/bdev_nvme_ut.c @@ -2445,6 +2445,37 @@ test_abort(void) set_thread(0); + /* If qpair is disconnected, it is freed and then reconnected via resetting + * the corresponding nvme_ctrlr. I/O should be queued if it is submitted + * while resetting the nvme_ctrlr. + */ + ctrlr_ch1->qpair->is_connected = false; + + poll_thread_times(0, 3); + + CU_ASSERT(ctrlr_ch1->qpair == NULL); + CU_ASSERT(nvme_ctrlr->resetting == true); + + write_io->internal.in_submit_request = true; + + bdev_nvme_submit_request(ch1, write_io); + + CU_ASSERT(write_io->internal.in_submit_request == true); + CU_ASSERT(write_io == TAILQ_FIRST(&nbdev_ch1->retry_io_list)); + + /* Aborting the queued write request should succeed immediately. */ + abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; + abort_io->u.abort.bio_to_abort = write_io; + abort_io->internal.in_submit_request = true; + + bdev_nvme_submit_request(ch1, abort_io); + + CU_ASSERT(abort_io->internal.in_submit_request == false); + CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); + CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); + CU_ASSERT(write_io->internal.in_submit_request == false); + CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); + spdk_put_io_channel(ch1); set_thread(1);