bdev/nvme: Further clean-up of bdev_nvme_reset()

nvme_bdev_ctrlr->reset_bio is accessed only by the thread which called
spdk_for_each_channel() and the callback to spdk_for_each_channel()
is called after unwinding stack via message.

Hence bdev_nvme_reset() can call _bdev_nvme_reset() rather than
_bdev_nvme_reset_start() and store bio into nvme_bdev_ctrlr->reset_bio
after _bdev_nvme_reset() returns zero.

Then inline _bdev_nvme_reset_start() into _bdev_nvme_reset() because
_bdev_nvme_reset_start() has only a single caller now.

The following patches will introduce subsystem and bdev_nvme_reset()
will reset all controllers of a subsystem sequentially. These further
clean-ups will be helpful for such enhancement.

Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: I25abaa5be511c967ce20c92e4caa7dfeb3e09dd1
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/7240
Community-CI: Broadcom CI
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Shuhei Matsumoto 2021-04-05 21:58:55 +09:00 committed by Tomasz Zawadzki
parent 6d573781b5
commit 888def105f

View File

@ -564,7 +564,7 @@ _bdev_nvme_reset_destroy_qpair(struct spdk_io_channel_iter *i)
}
static int
_bdev_nvme_reset_start(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr)
_bdev_nvme_reset(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr)
{
pthread_mutex_lock(&nvme_bdev_ctrlr->mutex);
if (nvme_bdev_ctrlr->destruct) {
@ -579,44 +579,27 @@ _bdev_nvme_reset_start(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr)
}
nvme_bdev_ctrlr->resetting = true;
pthread_mutex_unlock(&nvme_bdev_ctrlr->mutex);
/* First, delete all NVMe I/O queue pairs. */
spdk_for_each_channel(nvme_bdev_ctrlr,
_bdev_nvme_reset_destroy_qpair,
nvme_bdev_ctrlr,
_bdev_nvme_reset_ctrlr);
return 0;
}
static int
_bdev_nvme_reset(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr)
{
int rc;
rc = _bdev_nvme_reset_start(nvme_bdev_ctrlr);
if (rc == 0) {
/* First, delete all NVMe I/O queue pairs. */
spdk_for_each_channel(nvme_bdev_ctrlr,
_bdev_nvme_reset_destroy_qpair,
nvme_bdev_ctrlr,
_bdev_nvme_reset_ctrlr);
}
return rc;
}
static int
bdev_nvme_reset(struct nvme_io_channel *nvme_ch, struct nvme_bdev_io *bio)
{
struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(bio);
int rc;
rc = _bdev_nvme_reset_start(nvme_ch->ctrlr);
rc = _bdev_nvme_reset(nvme_ch->ctrlr);
if (rc == 0) {
assert(nvme_ch->ctrlr->reset_bio == NULL);
nvme_ch->ctrlr->reset_bio = bio;
/* First, delete all NVMe I/O queue pairs. */
spdk_for_each_channel(nvme_ch->ctrlr,
_bdev_nvme_reset_destroy_qpair,
nvme_ch->ctrlr,
_bdev_nvme_reset_ctrlr);
} else if (rc == -EBUSY) {
/* Don't bother resetting if the controller is in the process of being destructed. */
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);