bdev/nvme: Revert reset_io to bring nvme_bdev_io as context
When we support multipath, reset_io will hold the controller currently being reset to reset all underlying controllers sequentially. bdev_nvme_submit_request() basically passes nvme_bdev_io to each I/O type, and we have a convenient helper function bdev_nvme_io_complete() which has nvme_bdev_io as a parametetr. So revert the previous change to bring nvme_bdev_io as context for reset processing. Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Change-Id: I19697e8252505bab519a42889d1a88d967932f22 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8586 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com> Community-CI: Mellanox Build Bot Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com> Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
This commit is contained in:
parent
07df4b0141
commit
c213f1b452
@ -187,7 +187,7 @@ static int bdev_nvme_io_passthru_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qp
|
|||||||
struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes, void *md_buf, size_t md_len);
|
struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes, void *md_buf, size_t md_len);
|
||||||
static int bdev_nvme_abort(struct nvme_ctrlr_channel *ctrlr_ch,
|
static int bdev_nvme_abort(struct nvme_ctrlr_channel *ctrlr_ch,
|
||||||
struct nvme_bdev_io *bio, struct nvme_bdev_io *bio_to_abort);
|
struct nvme_bdev_io *bio, struct nvme_bdev_io *bio_to_abort);
|
||||||
static int bdev_nvme_reset_io(struct nvme_ctrlr_channel *ctrlr_ch, struct spdk_bdev_io *bdev_io);
|
static int bdev_nvme_reset_io(struct nvme_ctrlr_channel *ctrlr_ch, struct nvme_bdev_io *bio);
|
||||||
static int bdev_nvme_failover(struct nvme_ctrlr *nvme_ctrlr, bool remove);
|
static int bdev_nvme_failover(struct nvme_ctrlr *nvme_ctrlr, bool remove);
|
||||||
static void remove_cb(void *cb_ctx, struct spdk_nvme_ctrlr *ctrlr);
|
static void remove_cb(void *cb_ctx, struct spdk_nvme_ctrlr *ctrlr);
|
||||||
|
|
||||||
@ -501,15 +501,9 @@ bdev_nvme_abort_pending_resets(struct spdk_io_channel_iter *i)
|
|||||||
|
|
||||||
static void
|
static void
|
||||||
bdev_nvme_reset_io_complete(struct nvme_ctrlr *nvme_ctrlr,
|
bdev_nvme_reset_io_complete(struct nvme_ctrlr *nvme_ctrlr,
|
||||||
struct spdk_bdev_io *bdev_io, int rc)
|
struct nvme_bdev_io *bio, int rc)
|
||||||
{
|
{
|
||||||
enum spdk_bdev_io_status io_status = SPDK_BDEV_IO_STATUS_SUCCESS;
|
bdev_nvme_io_complete(bio, rc);
|
||||||
|
|
||||||
if (rc) {
|
|
||||||
io_status = SPDK_BDEV_IO_STATUS_FAILED;
|
|
||||||
}
|
|
||||||
|
|
||||||
spdk_bdev_io_complete(bdev_io, io_status);
|
|
||||||
|
|
||||||
/* Make sure we clear any pending resets before returning. */
|
/* Make sure we clear any pending resets before returning. */
|
||||||
spdk_for_each_channel(nvme_ctrlr,
|
spdk_for_each_channel(nvme_ctrlr,
|
||||||
@ -523,9 +517,9 @@ static void
|
|||||||
bdev_nvme_reset_complete(struct nvme_ctrlr *nvme_ctrlr, int rc)
|
bdev_nvme_reset_complete(struct nvme_ctrlr *nvme_ctrlr, int rc)
|
||||||
{
|
{
|
||||||
struct nvme_ctrlr_trid *curr_trid;
|
struct nvme_ctrlr_trid *curr_trid;
|
||||||
struct spdk_bdev_io *bdev_io = nvme_ctrlr->reset_bdev_io;
|
struct nvme_bdev_io *bio = nvme_ctrlr->reset_bio;
|
||||||
|
|
||||||
nvme_ctrlr->reset_bdev_io = NULL;
|
nvme_ctrlr->reset_bio = NULL;
|
||||||
|
|
||||||
if (rc) {
|
if (rc) {
|
||||||
SPDK_ERRLOG("Resetting controller failed.\n");
|
SPDK_ERRLOG("Resetting controller failed.\n");
|
||||||
@ -550,8 +544,8 @@ bdev_nvme_reset_complete(struct nvme_ctrlr *nvme_ctrlr, int rc)
|
|||||||
|
|
||||||
pthread_mutex_unlock(&nvme_ctrlr->mutex);
|
pthread_mutex_unlock(&nvme_ctrlr->mutex);
|
||||||
|
|
||||||
if (bdev_io) {
|
if (bio) {
|
||||||
bdev_nvme_reset_io_complete(nvme_ctrlr, bdev_io, rc);
|
bdev_nvme_reset_io_complete(nvme_ctrlr, bio, rc);
|
||||||
} else {
|
} else {
|
||||||
/* Make sure we clear any pending resets before returning. */
|
/* Make sure we clear any pending resets before returning. */
|
||||||
spdk_for_each_channel(nvme_ctrlr,
|
spdk_for_each_channel(nvme_ctrlr,
|
||||||
@ -647,20 +641,22 @@ bdev_nvme_reset(struct nvme_ctrlr *nvme_ctrlr)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
bdev_nvme_reset_io(struct nvme_ctrlr_channel *ctrlr_ch, struct spdk_bdev_io *bdev_io)
|
bdev_nvme_reset_io(struct nvme_ctrlr_channel *ctrlr_ch, struct nvme_bdev_io *bio)
|
||||||
{
|
{
|
||||||
|
struct spdk_bdev_io *bdev_io;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
rc = bdev_nvme_reset(ctrlr_ch->ctrlr);
|
rc = bdev_nvme_reset(ctrlr_ch->ctrlr);
|
||||||
if (rc == 0) {
|
if (rc == 0) {
|
||||||
assert(ctrlr_ch->ctrlr->reset_bdev_io == NULL);
|
assert(ctrlr_ch->ctrlr->reset_bio == NULL);
|
||||||
ctrlr_ch->ctrlr->reset_bdev_io = bdev_io;
|
ctrlr_ch->ctrlr->reset_bio = bio;
|
||||||
} else if (rc == -EAGAIN) {
|
} else if (rc == -EAGAIN) {
|
||||||
/*
|
/*
|
||||||
* Reset call is queued only if it is from the app framework. This is on purpose so that
|
* Reset call is queued only if it is from the app framework. This is on purpose so that
|
||||||
* we don't interfere with the app framework reset strategy. i.e. we are deferring to the
|
* we don't interfere with the app framework reset strategy. i.e. we are deferring to the
|
||||||
* upper level. If they are in the middle of a reset, we won't try to schedule another one.
|
* upper level. If they are in the middle of a reset, we won't try to schedule another one.
|
||||||
*/
|
*/
|
||||||
|
bdev_io = spdk_bdev_io_from_ctx(bio);
|
||||||
TAILQ_INSERT_TAIL(&ctrlr_ch->pending_resets, bdev_io, module_link);
|
TAILQ_INSERT_TAIL(&ctrlr_ch->pending_resets, bdev_io, module_link);
|
||||||
} else {
|
} else {
|
||||||
return rc;
|
return rc;
|
||||||
@ -880,7 +876,7 @@ bdev_nvme_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_i
|
|||||||
bdev_io->u.bdev.num_blocks);
|
bdev_io->u.bdev.num_blocks);
|
||||||
break;
|
break;
|
||||||
case SPDK_BDEV_IO_TYPE_RESET:
|
case SPDK_BDEV_IO_TYPE_RESET:
|
||||||
rc = bdev_nvme_reset_io(ctrlr_ch, bdev_io);
|
rc = bdev_nvme_reset_io(ctrlr_ch, nbdev_io);
|
||||||
break;
|
break;
|
||||||
case SPDK_BDEV_IO_TYPE_FLUSH:
|
case SPDK_BDEV_IO_TYPE_FLUSH:
|
||||||
rc = bdev_nvme_flush(ns,
|
rc = bdev_nvme_flush(ns,
|
||||||
|
@ -68,6 +68,7 @@ struct nvme_ns {
|
|||||||
void *type_ctx;
|
void *type_ctx;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct nvme_bdev_io;
|
||||||
struct ocssd_bdev_ctrlr;
|
struct ocssd_bdev_ctrlr;
|
||||||
|
|
||||||
struct nvme_ctrlr_trid {
|
struct nvme_ctrlr_trid {
|
||||||
@ -107,7 +108,7 @@ struct nvme_ctrlr {
|
|||||||
|
|
||||||
struct ocssd_bdev_ctrlr *ocssd_ctrlr;
|
struct ocssd_bdev_ctrlr *ocssd_ctrlr;
|
||||||
|
|
||||||
struct spdk_bdev_io *reset_bdev_io;
|
struct nvme_bdev_io *reset_bio;
|
||||||
|
|
||||||
/** linked list pointer for device list */
|
/** linked list pointer for device list */
|
||||||
TAILQ_ENTRY(nvme_ctrlr) tailq;
|
TAILQ_ENTRY(nvme_ctrlr) tailq;
|
||||||
|
Loading…
Reference in New Issue
Block a user