bdev/nvme: Rename nvme_io_path by nvme_ctrlr_channel

We will name the lower level objects starting with nvme_* and the
upper level objects starting with nvme_bdev_*.

This object is a channel per ctrlr and another new channel will be
added on top of this object.

Rename nvme_io_path by nvme_ctrlr_channel based on the new naming rule.

nvme_io_path will be used for a new object which is used to find an
optimal I/O path and to reset multiple ctrlrs sequentially when
multipath is supported.

Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: I1d4fa6d4625de3413d629a1ff412e00de12dfaf4
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8378
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Reviewed-by: Ziye Yang <ziye.yang@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
This commit is contained in:
Shuhei Matsumoto 2021-07-07 10:02:14 +09:00 committed by Tomasz Zawadzki
parent efbd101b8b
commit c710c9acbe
7 changed files with 162 additions and 162 deletions

View File

@ -176,7 +176,7 @@ static int bdev_nvme_get_zone_info(struct spdk_nvme_ns *ns, struct spdk_nvme_qpa
static int bdev_nvme_zone_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, static int bdev_nvme_zone_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
struct nvme_bdev_io *bio, uint64_t zone_id, struct nvme_bdev_io *bio, uint64_t zone_id,
enum spdk_bdev_zone_action action); enum spdk_bdev_zone_action action);
static int bdev_nvme_admin_passthru(struct nvme_io_path *io_path, static int bdev_nvme_admin_passthru(struct nvme_ctrlr_channel *ctrlr_ch,
struct nvme_bdev_io *bio, struct nvme_bdev_io *bio,
struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes); struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes);
static int bdev_nvme_io_passthru(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, static int bdev_nvme_io_passthru(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
@ -185,9 +185,9 @@ static int bdev_nvme_io_passthru(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair
static int bdev_nvme_io_passthru_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, static int bdev_nvme_io_passthru_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
struct nvme_bdev_io *bio, struct nvme_bdev_io *bio,
struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes, void *md_buf, size_t md_len); struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes, void *md_buf, size_t md_len);
static int bdev_nvme_abort(struct nvme_io_path *io_path, static int bdev_nvme_abort(struct nvme_ctrlr_channel *ctrlr_ch,
struct nvme_bdev_io *bio, struct nvme_bdev_io *bio_to_abort); struct nvme_bdev_io *bio, struct nvme_bdev_io *bio_to_abort);
static int bdev_nvme_reset(struct nvme_io_path *io_path, struct spdk_bdev_io *bdev_io); static int bdev_nvme_reset(struct nvme_ctrlr_channel *ctrlr_ch, struct spdk_bdev_io *bdev_io);
static int bdev_nvme_failover(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, bool remove); static int bdev_nvme_failover(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, bool remove);
static void remove_cb(void *cb_ctx, struct spdk_nvme_ctrlr *ctrlr); static void remove_cb(void *cb_ctx, struct spdk_nvme_ctrlr *ctrlr);
@ -223,15 +223,15 @@ static config_json_namespace_fn g_config_json_namespace_fn[] = {
}; };
struct spdk_nvme_qpair * struct spdk_nvme_qpair *
bdev_nvme_get_io_qpair(struct spdk_io_channel *io_path_ch) bdev_nvme_get_io_qpair(struct spdk_io_channel *ctrlr_io_ch)
{ {
struct nvme_io_path *io_path; struct nvme_ctrlr_channel *ctrlr_ch;
assert(io_path_ch != NULL); assert(ctrlr_io_ch != NULL);
io_path = spdk_io_channel_get_ctx(io_path_ch); ctrlr_ch = spdk_io_channel_get_ctx(ctrlr_io_ch);
return io_path->qpair; return ctrlr_ch->qpair;
} }
static int static int
@ -252,24 +252,24 @@ static struct spdk_bdev_module nvme_if = {
SPDK_BDEV_MODULE_REGISTER(nvme, &nvme_if) SPDK_BDEV_MODULE_REGISTER(nvme, &nvme_if)
static inline bool static inline bool
bdev_nvme_find_io_path(struct nvme_bdev *nbdev, struct nvme_io_path *io_path, bdev_nvme_find_io_path(struct nvme_bdev *nbdev, struct nvme_ctrlr_channel *ctrlr_ch,
struct spdk_nvme_ns **_ns, struct spdk_nvme_qpair **_qpair) struct spdk_nvme_ns **_ns, struct spdk_nvme_qpair **_qpair)
{ {
if (spdk_unlikely(io_path->qpair == NULL)) { if (spdk_unlikely(ctrlr_ch->qpair == NULL)) {
/* The device is currently resetting. */ /* The device is currently resetting. */
return false; return false;
} }
*_ns = nbdev->nvme_ns->ns; *_ns = nbdev->nvme_ns->ns;
*_qpair = io_path->qpair; *_qpair = ctrlr_ch->qpair;
return true; return true;
} }
static inline bool static inline bool
bdev_nvme_find_admin_path(struct nvme_io_path *io_path, bdev_nvme_find_admin_path(struct nvme_ctrlr_channel *ctrlr_ch,
struct nvme_bdev_ctrlr **_nvme_bdev_ctrlr) struct nvme_bdev_ctrlr **_nvme_bdev_ctrlr)
{ {
*_nvme_bdev_ctrlr = io_path->ctrlr; *_nvme_bdev_ctrlr = ctrlr_ch->ctrlr;
return true; return true;
} }
@ -390,9 +390,9 @@ bdev_nvme_flush(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
} }
static int static int
bdev_nvme_create_qpair(struct nvme_io_path *io_path) bdev_nvme_create_qpair(struct nvme_ctrlr_channel *ctrlr_ch)
{ {
struct spdk_nvme_ctrlr *ctrlr = io_path->ctrlr->ctrlr; struct spdk_nvme_ctrlr *ctrlr = ctrlr_ch->ctrlr->ctrlr;
struct spdk_nvme_io_qpair_opts opts; struct spdk_nvme_io_qpair_opts opts;
struct spdk_nvme_qpair *qpair; struct spdk_nvme_qpair *qpair;
int rc; int rc;
@ -408,9 +408,9 @@ bdev_nvme_create_qpair(struct nvme_io_path *io_path)
return -1; return -1;
} }
assert(io_path->group != NULL); assert(ctrlr_ch->group != NULL);
rc = spdk_nvme_poll_group_add(io_path->group->group, qpair); rc = spdk_nvme_poll_group_add(ctrlr_ch->group->group, qpair);
if (rc != 0) { if (rc != 0) {
SPDK_ERRLOG("Unable to begin polling on NVMe Channel.\n"); SPDK_ERRLOG("Unable to begin polling on NVMe Channel.\n");
goto err; goto err;
@ -422,7 +422,7 @@ bdev_nvme_create_qpair(struct nvme_io_path *io_path)
goto err; goto err;
} }
io_path->qpair = qpair; ctrlr_ch->qpair = qpair;
return 0; return 0;
@ -433,11 +433,11 @@ err:
} }
static void static void
bdev_nvme_destroy_qpair(struct nvme_io_path *io_path) bdev_nvme_destroy_qpair(struct nvme_ctrlr_channel *ctrlr_ch)
{ {
if (io_path->qpair != NULL) { if (ctrlr_ch->qpair != NULL) {
spdk_nvme_ctrlr_free_io_qpair(io_path->qpair); spdk_nvme_ctrlr_free_io_qpair(ctrlr_ch->qpair);
io_path->qpair = NULL; ctrlr_ch->qpair = NULL;
} }
} }
@ -465,14 +465,14 @@ bdev_nvme_check_pending_destruct(struct spdk_io_channel_iter *i, int status)
} }
static void static void
_bdev_nvme_complete_pending_resets(struct nvme_io_path *io_path, _bdev_nvme_complete_pending_resets(struct nvme_ctrlr_channel *ctrlr_ch,
enum spdk_bdev_io_status status) enum spdk_bdev_io_status status)
{ {
struct spdk_bdev_io *bdev_io; struct spdk_bdev_io *bdev_io;
while (!TAILQ_EMPTY(&io_path->pending_resets)) { while (!TAILQ_EMPTY(&ctrlr_ch->pending_resets)) {
bdev_io = TAILQ_FIRST(&io_path->pending_resets); bdev_io = TAILQ_FIRST(&ctrlr_ch->pending_resets);
TAILQ_REMOVE(&io_path->pending_resets, bdev_io, module_link); TAILQ_REMOVE(&ctrlr_ch->pending_resets, bdev_io, module_link);
spdk_bdev_io_complete(bdev_io, status); spdk_bdev_io_complete(bdev_io, status);
} }
} }
@ -481,9 +481,9 @@ static void
bdev_nvme_complete_pending_resets(struct spdk_io_channel_iter *i) bdev_nvme_complete_pending_resets(struct spdk_io_channel_iter *i)
{ {
struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i); struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
struct nvme_io_path *io_path = spdk_io_channel_get_ctx(_ch); struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(_ch);
_bdev_nvme_complete_pending_resets(io_path, SPDK_BDEV_IO_STATUS_SUCCESS); _bdev_nvme_complete_pending_resets(ctrlr_ch, SPDK_BDEV_IO_STATUS_SUCCESS);
spdk_for_each_channel_continue(i, 0); spdk_for_each_channel_continue(i, 0);
} }
@ -492,9 +492,9 @@ static void
bdev_nvme_abort_pending_resets(struct spdk_io_channel_iter *i) bdev_nvme_abort_pending_resets(struct spdk_io_channel_iter *i)
{ {
struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i); struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
struct nvme_io_path *io_path = spdk_io_channel_get_ctx(_ch); struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(_ch);
_bdev_nvme_complete_pending_resets(io_path, SPDK_BDEV_IO_STATUS_FAILED); _bdev_nvme_complete_pending_resets(ctrlr_ch, SPDK_BDEV_IO_STATUS_FAILED);
spdk_for_each_channel_continue(i, 0); spdk_for_each_channel_continue(i, 0);
} }
@ -574,10 +574,10 @@ static void
_bdev_nvme_reset_create_qpair(struct spdk_io_channel_iter *i) _bdev_nvme_reset_create_qpair(struct spdk_io_channel_iter *i)
{ {
struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i); struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
struct nvme_io_path *io_path = spdk_io_channel_get_ctx(_ch); struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(_ch);
int rc; int rc;
rc = bdev_nvme_create_qpair(io_path); rc = bdev_nvme_create_qpair(ctrlr_ch);
spdk_for_each_channel_continue(i, rc); spdk_for_each_channel_continue(i, rc);
} }
@ -613,9 +613,9 @@ static void
_bdev_nvme_reset_destroy_qpair(struct spdk_io_channel_iter *i) _bdev_nvme_reset_destroy_qpair(struct spdk_io_channel_iter *i)
{ {
struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i); struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch); struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch);
bdev_nvme_destroy_qpair(io_path); bdev_nvme_destroy_qpair(ctrlr_ch);
spdk_for_each_channel_continue(i, 0); spdk_for_each_channel_continue(i, 0);
} }
@ -647,21 +647,21 @@ _bdev_nvme_reset(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr)
} }
static int static int
bdev_nvme_reset(struct nvme_io_path *io_path, struct spdk_bdev_io *bdev_io) bdev_nvme_reset(struct nvme_ctrlr_channel *ctrlr_ch, struct spdk_bdev_io *bdev_io)
{ {
int rc; int rc;
rc = _bdev_nvme_reset(io_path->ctrlr); rc = _bdev_nvme_reset(ctrlr_ch->ctrlr);
if (rc == 0) { if (rc == 0) {
assert(io_path->ctrlr->reset_bdev_io == NULL); assert(ctrlr_ch->ctrlr->reset_bdev_io == NULL);
io_path->ctrlr->reset_bdev_io = bdev_io; ctrlr_ch->ctrlr->reset_bdev_io = bdev_io;
} else if (rc == -EAGAIN) { } else if (rc == -EAGAIN) {
/* /*
* Reset call is queued only if it is from the app framework. This is on purpose so that * Reset call is queued only if it is from the app framework. This is on purpose so that
* we don't interfere with the app framework reset strategy. i.e. we are deferring to the * we don't interfere with the app framework reset strategy. i.e. we are deferring to the
* upper level. If they are in the middle of a reset, we won't try to schedule another one. * upper level. If they are in the middle of a reset, we won't try to schedule another one.
*/ */
TAILQ_INSERT_TAIL(&io_path->pending_resets, bdev_io, module_link); TAILQ_INSERT_TAIL(&ctrlr_ch->pending_resets, bdev_io, module_link);
} else { } else {
return rc; return rc;
} }
@ -765,7 +765,7 @@ bdev_nvme_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
struct spdk_bdev *bdev = bdev_io->bdev; struct spdk_bdev *bdev = bdev_io->bdev;
struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev->ctxt; struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev->ctxt;
struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch); struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch);
struct spdk_nvme_ns *ns; struct spdk_nvme_ns *ns;
struct spdk_nvme_qpair *qpair; struct spdk_nvme_qpair *qpair;
int ret; int ret;
@ -775,7 +775,7 @@ bdev_nvme_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
goto exit; goto exit;
} }
if (spdk_unlikely(!bdev_nvme_find_io_path(nbdev, io_path, &ns, &qpair))) { if (spdk_unlikely(!bdev_nvme_find_io_path(nbdev, ctrlr_ch, &ns, &qpair))) {
ret = -ENXIO; ret = -ENXIO;
goto exit; goto exit;
} }
@ -799,7 +799,7 @@ exit:
static void static void
bdev_nvme_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) bdev_nvme_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
{ {
struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch); struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch);
struct spdk_bdev *bdev = bdev_io->bdev; struct spdk_bdev *bdev = bdev_io->bdev;
struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev->ctxt; struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev->ctxt;
struct nvme_bdev_io *nbdev_io = (struct nvme_bdev_io *)bdev_io->driver_ctx; struct nvme_bdev_io *nbdev_io = (struct nvme_bdev_io *)bdev_io->driver_ctx;
@ -808,7 +808,7 @@ bdev_nvme_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_i
struct spdk_nvme_qpair *qpair; struct spdk_nvme_qpair *qpair;
int rc = 0; int rc = 0;
if (spdk_unlikely(!bdev_nvme_find_io_path(nbdev, io_path, &ns, &qpair))) { if (spdk_unlikely(!bdev_nvme_find_io_path(nbdev, ctrlr_ch, &ns, &qpair))) {
rc = -ENXIO; rc = -ENXIO;
goto exit; goto exit;
} }
@ -880,7 +880,7 @@ bdev_nvme_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_i
bdev_io->u.bdev.num_blocks); bdev_io->u.bdev.num_blocks);
break; break;
case SPDK_BDEV_IO_TYPE_RESET: case SPDK_BDEV_IO_TYPE_RESET:
rc = bdev_nvme_reset(io_path, bdev_io); rc = bdev_nvme_reset(ctrlr_ch, bdev_io);
break; break;
case SPDK_BDEV_IO_TYPE_FLUSH: case SPDK_BDEV_IO_TYPE_FLUSH:
rc = bdev_nvme_flush(ns, rc = bdev_nvme_flush(ns,
@ -916,7 +916,7 @@ bdev_nvme_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_i
bdev_io->u.zone_mgmt.zone_action); bdev_io->u.zone_mgmt.zone_action);
break; break;
case SPDK_BDEV_IO_TYPE_NVME_ADMIN: case SPDK_BDEV_IO_TYPE_NVME_ADMIN:
rc = bdev_nvme_admin_passthru(io_path, rc = bdev_nvme_admin_passthru(ctrlr_ch,
nbdev_io, nbdev_io,
&bdev_io->u.nvme_passthru.cmd, &bdev_io->u.nvme_passthru.cmd,
bdev_io->u.nvme_passthru.buf, bdev_io->u.nvme_passthru.buf,
@ -942,7 +942,7 @@ bdev_nvme_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_i
break; break;
case SPDK_BDEV_IO_TYPE_ABORT: case SPDK_BDEV_IO_TYPE_ABORT:
nbdev_io_to_abort = (struct nvme_bdev_io *)bdev_io->u.abort.bio_to_abort->driver_ctx; nbdev_io_to_abort = (struct nvme_bdev_io *)bdev_io->u.abort.bio_to_abort->driver_ctx;
rc = bdev_nvme_abort(io_path, rc = bdev_nvme_abort(ctrlr_ch,
nbdev_io, nbdev_io,
nbdev_io_to_abort); nbdev_io_to_abort);
break; break;
@ -1016,10 +1016,10 @@ bdev_nvme_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type)
} }
static int static int
bdev_nvme_create_path_cb(void *io_device, void *ctx_buf) bdev_nvme_create_ctrlr_channel_cb(void *io_device, void *ctx_buf)
{ {
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = io_device; struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = io_device;
struct nvme_io_path *io_path = ctx_buf; struct nvme_ctrlr_channel *ctrlr_ch = ctx_buf;
struct spdk_io_channel *pg_ch; struct spdk_io_channel *pg_ch;
int rc; int rc;
@ -1028,26 +1028,26 @@ bdev_nvme_create_path_cb(void *io_device, void *ctx_buf)
return -1; return -1;
} }
io_path->group = spdk_io_channel_get_ctx(pg_ch); ctrlr_ch->group = spdk_io_channel_get_ctx(pg_ch);
#ifdef SPDK_CONFIG_VTUNE #ifdef SPDK_CONFIG_VTUNE
io_path->group->collect_spin_stat = true; ctrlr_ch->group->collect_spin_stat = true;
#else #else
io_path->group->collect_spin_stat = false; ctrlr_ch->group->collect_spin_stat = false;
#endif #endif
TAILQ_INIT(&io_path->pending_resets); TAILQ_INIT(&ctrlr_ch->pending_resets);
if (spdk_nvme_ctrlr_is_ocssd_supported(nvme_bdev_ctrlr->ctrlr)) { if (spdk_nvme_ctrlr_is_ocssd_supported(nvme_bdev_ctrlr->ctrlr)) {
rc = bdev_ocssd_create_io_channel(io_path); rc = bdev_ocssd_create_io_channel(ctrlr_ch);
if (rc != 0) { if (rc != 0) {
goto err_ocssd_ch; goto err_ocssd_ch;
} }
} }
io_path->ctrlr = nvme_bdev_ctrlr; ctrlr_ch->ctrlr = nvme_bdev_ctrlr;
rc = bdev_nvme_create_qpair(io_path); rc = bdev_nvme_create_qpair(ctrlr_ch);
if (rc != 0) { if (rc != 0) {
goto err_qpair; goto err_qpair;
} }
@ -1055,8 +1055,8 @@ bdev_nvme_create_path_cb(void *io_device, void *ctx_buf)
return 0; return 0;
err_qpair: err_qpair:
if (io_path->ocssd_ch) { if (ctrlr_ch->ocssd_ch) {
bdev_ocssd_destroy_io_channel(io_path); bdev_ocssd_destroy_io_channel(ctrlr_ch);
} }
err_ocssd_ch: err_ocssd_ch:
spdk_put_io_channel(pg_ch); spdk_put_io_channel(pg_ch);
@ -1065,19 +1065,19 @@ err_ocssd_ch:
} }
static void static void
bdev_nvme_destroy_path_cb(void *io_device, void *ctx_buf) bdev_nvme_destroy_ctrlr_channel_cb(void *io_device, void *ctx_buf)
{ {
struct nvme_io_path *io_path = ctx_buf; struct nvme_ctrlr_channel *ctrlr_ch = ctx_buf;
assert(io_path->group != NULL); assert(ctrlr_ch->group != NULL);
if (io_path->ocssd_ch != NULL) { if (ctrlr_ch->ocssd_ch != NULL) {
bdev_ocssd_destroy_io_channel(io_path); bdev_ocssd_destroy_io_channel(ctrlr_ch);
} }
bdev_nvme_destroy_qpair(io_path); bdev_nvme_destroy_qpair(ctrlr_ch);
spdk_put_io_channel(spdk_io_channel_from_ctx(io_path->group)); spdk_put_io_channel(spdk_io_channel_from_ctx(ctrlr_ch->group));
} }
static void static void
@ -1314,8 +1314,8 @@ bdev_nvme_write_config_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *
static uint64_t static uint64_t
bdev_nvme_get_spin_time(struct spdk_io_channel *ch) bdev_nvme_get_spin_time(struct spdk_io_channel *ch)
{ {
struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch); struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch);
struct nvme_bdev_poll_group *group = io_path->group; struct nvme_bdev_poll_group *group = ctrlr_ch->group;
uint64_t spin_time; uint64_t spin_time;
if (!group || !group->collect_spin_stat) { if (!group || !group->collect_spin_stat) {
@ -1808,9 +1808,9 @@ nvme_bdev_ctrlr_create_done(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr,
struct nvme_async_probe_ctx *ctx) struct nvme_async_probe_ctx *ctx)
{ {
spdk_io_device_register(nvme_bdev_ctrlr, spdk_io_device_register(nvme_bdev_ctrlr,
bdev_nvme_create_path_cb, bdev_nvme_create_ctrlr_channel_cb,
bdev_nvme_destroy_path_cb, bdev_nvme_destroy_ctrlr_channel_cb,
sizeof(struct nvme_io_path), sizeof(struct nvme_ctrlr_channel),
nvme_bdev_ctrlr->name); nvme_bdev_ctrlr->name);
nvme_ctrlr_populate_namespaces(nvme_bdev_ctrlr, ctx); nvme_ctrlr_populate_namespaces(nvme_bdev_ctrlr, ctx);
@ -2578,7 +2578,7 @@ bdev_nvme_readv_done(void *ref, const struct spdk_nvme_cpl *cpl)
struct nvme_bdev_io *bio = ref; struct nvme_bdev_io *bio = ref;
struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(bio); struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(bio);
struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev_io->bdev->ctxt; struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev_io->bdev->ctxt;
struct nvme_io_path *io_path; struct nvme_ctrlr_channel *ctrlr_ch;
struct spdk_nvme_ns *ns; struct spdk_nvme_ns *ns;
struct spdk_nvme_qpair *qpair; struct spdk_nvme_qpair *qpair;
int ret; int ret;
@ -2590,9 +2590,9 @@ bdev_nvme_readv_done(void *ref, const struct spdk_nvme_cpl *cpl)
/* Save completion status to use after verifying PI error. */ /* Save completion status to use after verifying PI error. */
bio->cpl = *cpl; bio->cpl = *cpl;
io_path = spdk_io_channel_get_ctx(spdk_bdev_io_get_io_channel(bdev_io)); ctrlr_ch = spdk_io_channel_get_ctx(spdk_bdev_io_get_io_channel(bdev_io));
if (spdk_likely(bdev_nvme_find_io_path(nbdev, io_path, &ns, &qpair))) { if (spdk_likely(bdev_nvme_find_io_path(nbdev, ctrlr_ch, &ns, &qpair))) {
/* Read without PI checking to verify PI error. */ /* Read without PI checking to verify PI error. */
ret = bdev_nvme_no_pi_readv(ns, ret = bdev_nvme_no_pi_readv(ns,
qpair, qpair,
@ -2741,7 +2741,7 @@ bdev_nvme_get_zone_info_done(void *ref, const struct spdk_nvme_cpl *cpl)
struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(bio); struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(bio);
struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev_io->bdev->ctxt; struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev_io->bdev->ctxt;
struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io); struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io);
struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch); struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch);
uint64_t zone_id = bdev_io->u.zone_mgmt.zone_id; uint64_t zone_id = bdev_io->u.zone_mgmt.zone_id;
uint32_t zones_to_copy = bdev_io->u.zone_mgmt.num_zones; uint32_t zones_to_copy = bdev_io->u.zone_mgmt.num_zones;
struct spdk_bdev_zone_info *info = bdev_io->u.zone_mgmt.buf; struct spdk_bdev_zone_info *info = bdev_io->u.zone_mgmt.buf;
@ -2755,7 +2755,7 @@ bdev_nvme_get_zone_info_done(void *ref, const struct spdk_nvme_cpl *cpl)
goto out_complete_io_nvme_cpl; goto out_complete_io_nvme_cpl;
} }
if (!bdev_nvme_find_io_path(nbdev, io_path, &ns, &qpair)) { if (!bdev_nvme_find_io_path(nbdev, ctrlr_ch, &ns, &qpair)) {
ret = -ENXIO; ret = -ENXIO;
goto out_complete_io_ret; goto out_complete_io_ret;
} }
@ -3284,13 +3284,13 @@ bdev_nvme_zone_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair
} }
static int static int
bdev_nvme_admin_passthru(struct nvme_io_path *io_path, struct nvme_bdev_io *bio, bdev_nvme_admin_passthru(struct nvme_ctrlr_channel *ctrlr_ch, struct nvme_bdev_io *bio,
struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes) struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes)
{ {
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; struct nvme_bdev_ctrlr *nvme_bdev_ctrlr;
uint32_t max_xfer_size; uint32_t max_xfer_size;
if (!bdev_nvme_find_admin_path(io_path, &nvme_bdev_ctrlr)) { if (!bdev_nvme_find_admin_path(ctrlr_ch, &nvme_bdev_ctrlr)) {
return -EINVAL; return -EINVAL;
} }
@ -3360,22 +3360,22 @@ bdev_nvme_io_passthru_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
} }
static int static int
bdev_nvme_abort(struct nvme_io_path *io_path, struct nvme_bdev_io *bio, bdev_nvme_abort(struct nvme_ctrlr_channel *ctrlr_ch, struct nvme_bdev_io *bio,
struct nvme_bdev_io *bio_to_abort) struct nvme_bdev_io *bio_to_abort)
{ {
int rc; int rc;
bio->orig_thread = spdk_get_thread(); bio->orig_thread = spdk_get_thread();
rc = spdk_nvme_ctrlr_cmd_abort_ext(io_path->ctrlr->ctrlr, rc = spdk_nvme_ctrlr_cmd_abort_ext(ctrlr_ch->ctrlr->ctrlr,
io_path->qpair, ctrlr_ch->qpair,
bio_to_abort, bio_to_abort,
bdev_nvme_abort_done, bio); bdev_nvme_abort_done, bio);
if (rc == -ENOENT) { if (rc == -ENOENT) {
/* If no command was found in I/O qpair, the target command may be /* If no command was found in I/O qpair, the target command may be
* admin command. * admin command.
*/ */
rc = spdk_nvme_ctrlr_cmd_abort_ext(io_path->ctrlr->ctrlr, rc = spdk_nvme_ctrlr_cmd_abort_ext(ctrlr_ch->ctrlr->ctrlr,
NULL, NULL,
bio_to_abort, bio_to_abort,
bdev_nvme_abort_done, bio); bdev_nvme_abort_done, bio);

View File

@ -63,7 +63,7 @@ struct spdk_bdev_nvme_opts {
bool delay_cmd_submit; bool delay_cmd_submit;
}; };
struct spdk_nvme_qpair *bdev_nvme_get_io_qpair(struct spdk_io_channel *io_path_ch); struct spdk_nvme_qpair *bdev_nvme_get_io_qpair(struct spdk_io_channel *ctrlr_io_ch);
void bdev_nvme_get_opts(struct spdk_bdev_nvme_opts *opts); void bdev_nvme_get_opts(struct spdk_bdev_nvme_opts *opts);
int bdev_nvme_set_opts(const struct spdk_bdev_nvme_opts *opts); int bdev_nvme_set_opts(const struct spdk_bdev_nvme_opts *opts);
int bdev_nvme_set_hotplug(bool enabled, uint64_t period_us, spdk_msg_fn cb, void *cb_ctx); int bdev_nvme_set_hotplug(bool enabled, uint64_t period_us, spdk_msg_fn cb, void *cb_ctx);

View File

@ -106,18 +106,18 @@ bdev_ocssd_get_ns_from_nvme(struct nvme_bdev_ns *nvme_ns)
} }
static inline bool static inline bool
bdev_ocssd_find_io_path(struct nvme_bdev *nbdev, struct nvme_io_path *io_path, bdev_ocssd_find_io_path(struct nvme_bdev *nbdev, struct nvme_ctrlr_channel *ctrlr_ch,
struct bdev_ocssd_ns **_ocssd_ns, struct bdev_ocssd_ns **_ocssd_ns,
struct spdk_nvme_ns **_ns, struct spdk_nvme_qpair **_qpair) struct spdk_nvme_ns **_ns, struct spdk_nvme_qpair **_qpair)
{ {
if (spdk_unlikely(io_path->qpair == NULL)) { if (spdk_unlikely(ctrlr_ch->qpair == NULL)) {
/* The device is currently resetting. */ /* The device is currently resetting. */
return false; return false;
} }
*_ocssd_ns = bdev_ocssd_get_ns_from_nvme(nbdev->nvme_ns); *_ocssd_ns = bdev_ocssd_get_ns_from_nvme(nbdev->nvme_ns);
*_ns = nbdev->nvme_ns->ns; *_ns = nbdev->nvme_ns->ns;
*_qpair = io_path->qpair; *_qpair = ctrlr_ch->qpair;
return true; return true;
} }
@ -527,7 +527,7 @@ static void
bdev_ocssd_io_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success) bdev_ocssd_io_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success)
{ {
struct ocssd_bdev *ocssd_bdev = (struct ocssd_bdev *)bdev_io->bdev->ctxt; struct ocssd_bdev *ocssd_bdev = (struct ocssd_bdev *)bdev_io->bdev->ctxt;
struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch); struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch);
struct bdev_ocssd_ns *ocssd_ns; struct bdev_ocssd_ns *ocssd_ns;
struct spdk_nvme_ns *ns; struct spdk_nvme_ns *ns;
struct spdk_nvme_qpair *qpair; struct spdk_nvme_qpair *qpair;
@ -538,7 +538,7 @@ bdev_ocssd_io_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_i
return; return;
} }
if (spdk_unlikely(!bdev_ocssd_find_io_path(&ocssd_bdev->nvme_bdev, io_path, if (spdk_unlikely(!bdev_ocssd_find_io_path(&ocssd_bdev->nvme_bdev, ctrlr_ch,
&ocssd_ns, &ns, &qpair))) { &ocssd_ns, &ns, &qpair))) {
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
return; return;
@ -653,7 +653,7 @@ bdev_ocssd_zone_info_cb(void *ctx, const struct spdk_nvme_cpl *cpl)
struct spdk_ocssd_chunk_information_entry *chunk_info = &ocdev_io->zone_info.chunk_info; struct spdk_ocssd_chunk_information_entry *chunk_info = &ocdev_io->zone_info.chunk_info;
struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(ctx); struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(ctx);
struct ocssd_bdev *ocssd_bdev = bdev_io->bdev->ctxt; struct ocssd_bdev *ocssd_bdev = bdev_io->bdev->ctxt;
struct nvme_io_path *io_path; struct nvme_ctrlr_channel *ctrlr_ch;
struct spdk_nvme_ns *ns; struct spdk_nvme_ns *ns;
struct spdk_nvme_qpair *qpair; struct spdk_nvme_qpair *qpair;
struct bdev_ocssd_ns *ocssd_ns; struct bdev_ocssd_ns *ocssd_ns;
@ -665,9 +665,9 @@ bdev_ocssd_zone_info_cb(void *ctx, const struct spdk_nvme_cpl *cpl)
return; return;
} }
io_path = spdk_io_channel_get_ctx(spdk_bdev_io_get_io_channel(bdev_io)); ctrlr_ch = spdk_io_channel_get_ctx(spdk_bdev_io_get_io_channel(bdev_io));
if (spdk_unlikely(!bdev_ocssd_find_io_path(&ocssd_bdev->nvme_bdev, io_path, &ocssd_ns, &ns, if (spdk_unlikely(!bdev_ocssd_find_io_path(&ocssd_bdev->nvme_bdev, ctrlr_ch, &ocssd_ns, &ns,
&qpair))) { &qpair))) {
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
return; return;
@ -757,14 +757,14 @@ static void bdev_ocssd_submit_request(struct spdk_io_channel *ch, struct spdk_bd
static int static int
bdev_ocssd_poll_pending(void *ctx) bdev_ocssd_poll_pending(void *ctx)
{ {
struct nvme_io_path *io_path = ctx; struct nvme_ctrlr_channel *ctrlr_ch = ctx;
struct ocssd_io_channel *ocssd_ch; struct ocssd_io_channel *ocssd_ch;
struct spdk_bdev_io *bdev_io; struct spdk_bdev_io *bdev_io;
struct spdk_io_channel *ch; struct spdk_io_channel *ch;
TAILQ_HEAD(, spdk_bdev_io) pending_requests; TAILQ_HEAD(, spdk_bdev_io) pending_requests;
int num_requests = 0; int num_requests = 0;
ocssd_ch = io_path->ocssd_ch; ocssd_ch = ctrlr_ch->ocssd_ch;
TAILQ_INIT(&pending_requests); TAILQ_INIT(&pending_requests);
TAILQ_SWAP(&ocssd_ch->pending_requests, &pending_requests, spdk_bdev_io, module_link); TAILQ_SWAP(&ocssd_ch->pending_requests, &pending_requests, spdk_bdev_io, module_link);
@ -786,8 +786,8 @@ bdev_ocssd_poll_pending(void *ctx)
static void static void
bdev_ocssd_delay_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) bdev_ocssd_delay_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
{ {
struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch); struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch);
struct ocssd_io_channel *ocssd_ch = io_path->ocssd_ch; struct ocssd_io_channel *ocssd_ch = ctrlr_ch->ocssd_ch;
TAILQ_INSERT_TAIL(&ocssd_ch->pending_requests, bdev_io, module_link); TAILQ_INSERT_TAIL(&ocssd_ch->pending_requests, bdev_io, module_link);
spdk_poller_resume(ocssd_ch->pending_poller); spdk_poller_resume(ocssd_ch->pending_poller);
@ -796,14 +796,14 @@ bdev_ocssd_delay_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_i
static int static int
_bdev_ocssd_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) _bdev_ocssd_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
{ {
struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch); struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch);
struct ocssd_bdev *ocssd_bdev = (struct ocssd_bdev *)bdev_io->bdev->ctxt; struct ocssd_bdev *ocssd_bdev = (struct ocssd_bdev *)bdev_io->bdev->ctxt;
struct bdev_ocssd_io *ocdev_io = (struct bdev_ocssd_io *)bdev_io->driver_ctx; struct bdev_ocssd_io *ocdev_io = (struct bdev_ocssd_io *)bdev_io->driver_ctx;
struct bdev_ocssd_ns *ocssd_ns; struct bdev_ocssd_ns *ocssd_ns;
struct spdk_nvme_ns *ns; struct spdk_nvme_ns *ns;
struct spdk_nvme_qpair *qpair; struct spdk_nvme_qpair *qpair;
if (spdk_unlikely(!bdev_ocssd_find_io_path(&ocssd_bdev->nvme_bdev, io_path, if (spdk_unlikely(!bdev_ocssd_find_io_path(&ocssd_bdev->nvme_bdev, ctrlr_ch,
&ocssd_ns, &ns, &qpair))) { &ocssd_ns, &ns, &qpair))) {
return -1; return -1;
} }
@ -1517,7 +1517,7 @@ bdev_ocssd_depopulate_namespace(struct nvme_bdev_ns *nvme_ns)
} }
int int
bdev_ocssd_create_io_channel(struct nvme_io_path *io_path) bdev_ocssd_create_io_channel(struct nvme_ctrlr_channel *ctrlr_ch)
{ {
struct ocssd_io_channel *ocssd_ch; struct ocssd_io_channel *ocssd_ch;
@ -1526,7 +1526,7 @@ bdev_ocssd_create_io_channel(struct nvme_io_path *io_path)
return -ENOMEM; return -ENOMEM;
} }
ocssd_ch->pending_poller = SPDK_POLLER_REGISTER(bdev_ocssd_poll_pending, io_path, 0); ocssd_ch->pending_poller = SPDK_POLLER_REGISTER(bdev_ocssd_poll_pending, ctrlr_ch, 0);
if (ocssd_ch->pending_poller == NULL) { if (ocssd_ch->pending_poller == NULL) {
SPDK_ERRLOG("Failed to register pending requests poller\n"); SPDK_ERRLOG("Failed to register pending requests poller\n");
free(ocssd_ch); free(ocssd_ch);
@ -1537,16 +1537,16 @@ bdev_ocssd_create_io_channel(struct nvme_io_path *io_path)
spdk_poller_pause(ocssd_ch->pending_poller); spdk_poller_pause(ocssd_ch->pending_poller);
TAILQ_INIT(&ocssd_ch->pending_requests); TAILQ_INIT(&ocssd_ch->pending_requests);
io_path->ocssd_ch = ocssd_ch; ctrlr_ch->ocssd_ch = ocssd_ch;
return 0; return 0;
} }
void void
bdev_ocssd_destroy_io_channel(struct nvme_io_path *io_path) bdev_ocssd_destroy_io_channel(struct nvme_ctrlr_channel *ctrlr_ch)
{ {
spdk_poller_unregister(&io_path->ocssd_ch->pending_poller); spdk_poller_unregister(&ctrlr_ch->ocssd_ch->pending_poller);
free(io_path->ocssd_ch); free(ctrlr_ch->ocssd_ch);
} }
int int

View File

@ -50,8 +50,8 @@ void bdev_ocssd_populate_namespace(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr,
void bdev_ocssd_depopulate_namespace(struct nvme_bdev_ns *nvme_ns); void bdev_ocssd_depopulate_namespace(struct nvme_bdev_ns *nvme_ns);
void bdev_ocssd_namespace_config_json(struct spdk_json_write_ctx *w, struct nvme_bdev_ns *nvme_ns); void bdev_ocssd_namespace_config_json(struct spdk_json_write_ctx *w, struct nvme_bdev_ns *nvme_ns);
int bdev_ocssd_create_io_channel(struct nvme_io_path *ioch); int bdev_ocssd_create_io_channel(struct nvme_ctrlr_channel *ioch);
void bdev_ocssd_destroy_io_channel(struct nvme_io_path *ioch); void bdev_ocssd_destroy_io_channel(struct nvme_ctrlr_channel *ioch);
int bdev_ocssd_init_ctrlr(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr); int bdev_ocssd_init_ctrlr(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr);
void bdev_ocssd_fini_ctrlr(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr); void bdev_ocssd_fini_ctrlr(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr);

View File

@ -154,7 +154,7 @@ struct nvme_async_probe_ctx {
struct ocssd_io_channel; struct ocssd_io_channel;
struct nvme_io_path { struct nvme_ctrlr_channel {
struct nvme_bdev_ctrlr *ctrlr; struct nvme_bdev_ctrlr *ctrlr;
struct spdk_nvme_qpair *qpair; struct spdk_nvme_qpair *qpair;
struct nvme_bdev_poll_group *group; struct nvme_bdev_poll_group *group;

View File

@ -208,9 +208,9 @@ DEFINE_STUB_V(bdev_ocssd_depopulate_namespace, (struct nvme_bdev_ns *nvme_ns));
DEFINE_STUB_V(bdev_ocssd_namespace_config_json, (struct spdk_json_write_ctx *w, DEFINE_STUB_V(bdev_ocssd_namespace_config_json, (struct spdk_json_write_ctx *w,
struct nvme_bdev_ns *nvme_ns)); struct nvme_bdev_ns *nvme_ns));
DEFINE_STUB(bdev_ocssd_create_io_channel, int, (struct nvme_io_path *ioch), 0); DEFINE_STUB(bdev_ocssd_create_io_channel, int, (struct nvme_ctrlr_channel *ioch), 0);
DEFINE_STUB_V(bdev_ocssd_destroy_io_channel, (struct nvme_io_path *ioch)); DEFINE_STUB_V(bdev_ocssd_destroy_io_channel, (struct nvme_ctrlr_channel *ioch));
DEFINE_STUB(bdev_ocssd_init_ctrlr, int, (struct nvme_bdev_ctrlr *nvme_bdev_ctrlr), 0); DEFINE_STUB(bdev_ocssd_init_ctrlr, int, (struct nvme_bdev_ctrlr *nvme_bdev_ctrlr), 0);
@ -1030,7 +1030,7 @@ test_reset_ctrlr(void)
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = NULL; struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = NULL;
struct nvme_bdev_ctrlr_trid *curr_trid; struct nvme_bdev_ctrlr_trid *curr_trid;
struct spdk_io_channel *ch1, *ch2; struct spdk_io_channel *ch1, *ch2;
struct nvme_io_path *io_path1, *io_path2; struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
int rc; int rc;
ut_init_trid(&trid); ut_init_trid(&trid);
@ -1050,16 +1050,16 @@ test_reset_ctrlr(void)
ch1 = spdk_get_io_channel(nvme_bdev_ctrlr); ch1 = spdk_get_io_channel(nvme_bdev_ctrlr);
SPDK_CU_ASSERT_FATAL(ch1 != NULL); SPDK_CU_ASSERT_FATAL(ch1 != NULL);
io_path1 = spdk_io_channel_get_ctx(ch1); ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
CU_ASSERT(io_path1->qpair != NULL); CU_ASSERT(ctrlr_ch1->qpair != NULL);
set_thread(1); set_thread(1);
ch2 = spdk_get_io_channel(nvme_bdev_ctrlr); ch2 = spdk_get_io_channel(nvme_bdev_ctrlr);
SPDK_CU_ASSERT_FATAL(ch2 != NULL); SPDK_CU_ASSERT_FATAL(ch2 != NULL);
io_path2 = spdk_io_channel_get_ctx(ch2); ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
CU_ASSERT(io_path2->qpair != NULL); CU_ASSERT(ctrlr_ch2->qpair != NULL);
/* Reset starts from thread 1. */ /* Reset starts from thread 1. */
set_thread(1); set_thread(1);
@ -1085,28 +1085,28 @@ test_reset_ctrlr(void)
rc = _bdev_nvme_reset(nvme_bdev_ctrlr); rc = _bdev_nvme_reset(nvme_bdev_ctrlr);
CU_ASSERT(rc == 0); CU_ASSERT(rc == 0);
CU_ASSERT(nvme_bdev_ctrlr->resetting == true); CU_ASSERT(nvme_bdev_ctrlr->resetting == true);
CU_ASSERT(io_path1->qpair != NULL); CU_ASSERT(ctrlr_ch1->qpair != NULL);
CU_ASSERT(io_path2->qpair != NULL); CU_ASSERT(ctrlr_ch2->qpair != NULL);
poll_thread_times(0, 1); poll_thread_times(0, 1);
CU_ASSERT(io_path1->qpair == NULL); CU_ASSERT(ctrlr_ch1->qpair == NULL);
CU_ASSERT(io_path2->qpair != NULL); CU_ASSERT(ctrlr_ch2->qpair != NULL);
poll_thread_times(1, 1); poll_thread_times(1, 1);
CU_ASSERT(io_path1->qpair == NULL); CU_ASSERT(ctrlr_ch1->qpair == NULL);
CU_ASSERT(io_path2->qpair == NULL); CU_ASSERT(ctrlr_ch2->qpair == NULL);
CU_ASSERT(ctrlr.is_failed == true); CU_ASSERT(ctrlr.is_failed == true);
poll_thread_times(1, 1); poll_thread_times(1, 1);
CU_ASSERT(ctrlr.is_failed == false); CU_ASSERT(ctrlr.is_failed == false);
poll_thread_times(0, 1); poll_thread_times(0, 1);
CU_ASSERT(io_path1->qpair != NULL); CU_ASSERT(ctrlr_ch1->qpair != NULL);
CU_ASSERT(io_path2->qpair == NULL); CU_ASSERT(ctrlr_ch2->qpair == NULL);
poll_thread_times(1, 1); poll_thread_times(1, 1);
CU_ASSERT(io_path1->qpair != NULL); CU_ASSERT(ctrlr_ch1->qpair != NULL);
CU_ASSERT(io_path2->qpair != NULL); CU_ASSERT(ctrlr_ch2->qpair != NULL);
CU_ASSERT(nvme_bdev_ctrlr->resetting == true); CU_ASSERT(nvme_bdev_ctrlr->resetting == true);
CU_ASSERT(curr_trid->is_failed == true); CU_ASSERT(curr_trid->is_failed == true);
@ -1362,7 +1362,7 @@ test_pending_reset(void)
const char *attached_names[STRING_SIZE]; const char *attached_names[STRING_SIZE];
struct spdk_bdev_io *first_bdev_io, *second_bdev_io; struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
struct spdk_io_channel *ch1, *ch2; struct spdk_io_channel *ch1, *ch2;
struct nvme_io_path *io_path1, *io_path2; struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
int rc; int rc;
memset(attached_names, 0, sizeof(char *) * STRING_SIZE); memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
@ -1397,28 +1397,28 @@ test_pending_reset(void)
ch1 = spdk_get_io_channel(nvme_bdev_ctrlr); ch1 = spdk_get_io_channel(nvme_bdev_ctrlr);
SPDK_CU_ASSERT_FATAL(ch1 != NULL); SPDK_CU_ASSERT_FATAL(ch1 != NULL);
io_path1 = spdk_io_channel_get_ctx(ch1); ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
set_thread(1); set_thread(1);
ch2 = spdk_get_io_channel(nvme_bdev_ctrlr); ch2 = spdk_get_io_channel(nvme_bdev_ctrlr);
SPDK_CU_ASSERT_FATAL(ch2 != NULL); SPDK_CU_ASSERT_FATAL(ch2 != NULL);
io_path2 = spdk_io_channel_get_ctx(ch2); ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
/* The first reset request is submitted on thread 1, and the second reset request /* The first reset request is submitted on thread 1, and the second reset request
* is submitted on thread 0 while processing the first request. * is submitted on thread 0 while processing the first request.
*/ */
rc = bdev_nvme_reset(io_path2, first_bdev_io); rc = bdev_nvme_reset(ctrlr_ch2, first_bdev_io);
CU_ASSERT(rc == 0); CU_ASSERT(rc == 0);
CU_ASSERT(nvme_bdev_ctrlr->resetting == true); CU_ASSERT(nvme_bdev_ctrlr->resetting == true);
CU_ASSERT(TAILQ_EMPTY(&io_path2->pending_resets)); CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
set_thread(0); set_thread(0);
rc = bdev_nvme_reset(io_path1, second_bdev_io); rc = bdev_nvme_reset(ctrlr_ch1, second_bdev_io);
CU_ASSERT(rc == 0); CU_ASSERT(rc == 0);
CU_ASSERT(TAILQ_FIRST(&io_path1->pending_resets) == second_bdev_io); CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
poll_threads(); poll_threads();
@ -1434,16 +1434,16 @@ test_pending_reset(void)
*/ */
set_thread(1); set_thread(1);
rc = bdev_nvme_reset(io_path2, first_bdev_io); rc = bdev_nvme_reset(ctrlr_ch2, first_bdev_io);
CU_ASSERT(rc == 0); CU_ASSERT(rc == 0);
CU_ASSERT(nvme_bdev_ctrlr->resetting == true); CU_ASSERT(nvme_bdev_ctrlr->resetting == true);
CU_ASSERT(TAILQ_EMPTY(&io_path2->pending_resets)); CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
set_thread(0); set_thread(0);
rc = bdev_nvme_reset(io_path1, second_bdev_io); rc = bdev_nvme_reset(ctrlr_ch1, second_bdev_io);
CU_ASSERT(rc == 0); CU_ASSERT(rc == 0);
CU_ASSERT(TAILQ_FIRST(&io_path1->pending_resets) == second_bdev_io); CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io);
ctrlr->fail_reset = true; ctrlr->fail_reset = true;
@ -1609,7 +1609,7 @@ test_reconnect_qpair(void)
struct spdk_nvme_ctrlr ctrlr = {}; struct spdk_nvme_ctrlr ctrlr = {};
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = NULL; struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = NULL;
struct spdk_io_channel *ch; struct spdk_io_channel *ch;
struct nvme_io_path *io_path; struct nvme_ctrlr_channel *ctrlr_ch;
int rc; int rc;
set_thread(0); set_thread(0);
@ -1626,26 +1626,26 @@ test_reconnect_qpair(void)
ch = spdk_get_io_channel(nvme_bdev_ctrlr); ch = spdk_get_io_channel(nvme_bdev_ctrlr);
SPDK_CU_ASSERT_FATAL(ch != NULL); SPDK_CU_ASSERT_FATAL(ch != NULL);
io_path = spdk_io_channel_get_ctx(ch); ctrlr_ch = spdk_io_channel_get_ctx(ch);
CU_ASSERT(io_path->qpair != NULL); CU_ASSERT(ctrlr_ch->qpair != NULL);
CU_ASSERT(io_path->group != NULL); CU_ASSERT(ctrlr_ch->group != NULL);
CU_ASSERT(io_path->group->group != NULL); CU_ASSERT(ctrlr_ch->group->group != NULL);
CU_ASSERT(io_path->group->poller != NULL); CU_ASSERT(ctrlr_ch->group->poller != NULL);
/* Test if the disconnected qpair is reconnected. */ /* Test if the disconnected qpair is reconnected. */
io_path->qpair->is_connected = false; ctrlr_ch->qpair->is_connected = false;
poll_threads(); poll_threads();
CU_ASSERT(io_path->qpair->is_connected == true); CU_ASSERT(ctrlr_ch->qpair->is_connected == true);
/* If the ctrlr is failed, reconnecting qpair should fail too. */ /* If the ctrlr is failed, reconnecting qpair should fail too. */
io_path->qpair->is_connected = false; ctrlr_ch->qpair->is_connected = false;
ctrlr.is_failed = true; ctrlr.is_failed = true;
poll_threads(); poll_threads();
CU_ASSERT(io_path->qpair->is_connected == false); CU_ASSERT(ctrlr_ch->qpair->is_connected == false);
spdk_put_io_channel(ch); spdk_put_io_channel(ch);
@ -1740,12 +1740,12 @@ static void
ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
enum spdk_bdev_io_type io_type) enum spdk_bdev_io_type io_type)
{ {
struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch); struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch);
struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev_io->bdev->ctxt; struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev_io->bdev->ctxt;
struct spdk_nvme_ns *ns = NULL; struct spdk_nvme_ns *ns = NULL;
struct spdk_nvme_qpair *qpair = NULL; struct spdk_nvme_qpair *qpair = NULL;
CU_ASSERT(bdev_nvme_find_io_path(nbdev, io_path, &ns, &qpair)); CU_ASSERT(bdev_nvme_find_io_path(nbdev, ctrlr_ch, &ns, &qpair));
bdev_io->type = io_type; bdev_io->type = io_type;
bdev_io->internal.in_submit_request = true; bdev_io->internal.in_submit_request = true;
@ -1766,12 +1766,12 @@ static void
ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
enum spdk_bdev_io_type io_type) enum spdk_bdev_io_type io_type)
{ {
struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch); struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch);
struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev_io->bdev->ctxt; struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev_io->bdev->ctxt;
struct spdk_nvme_ns *ns = NULL; struct spdk_nvme_ns *ns = NULL;
struct spdk_nvme_qpair *qpair = NULL; struct spdk_nvme_qpair *qpair = NULL;
CU_ASSERT(bdev_nvme_find_io_path(nbdev, io_path, &ns, &qpair)); CU_ASSERT(bdev_nvme_find_io_path(nbdev, ctrlr_ch, &ns, &qpair));
bdev_io->type = io_type; bdev_io->type = io_type;
bdev_io->internal.in_submit_request = true; bdev_io->internal.in_submit_request = true;
@ -1786,14 +1786,14 @@ ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
static void static void
ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
{ {
struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch); struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch);
struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
struct ut_nvme_req *req; struct ut_nvme_req *req;
struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev_io->bdev->ctxt; struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev_io->bdev->ctxt;
struct spdk_nvme_ns *ns = NULL; struct spdk_nvme_ns *ns = NULL;
struct spdk_nvme_qpair *qpair = NULL; struct spdk_nvme_qpair *qpair = NULL;
CU_ASSERT(bdev_nvme_find_io_path(nbdev, io_path, &ns, &qpair)); CU_ASSERT(bdev_nvme_find_io_path(nbdev, ctrlr_ch, &ns, &qpair));
/* Only compare and write now. */ /* Only compare and write now. */
bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE; bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE;
@ -1806,7 +1806,7 @@ ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *b
CU_ASSERT(bio->first_fused_submitted == true); CU_ASSERT(bio->first_fused_submitted == true);
/* First outstanding request is compare operation. */ /* First outstanding request is compare operation. */
req = TAILQ_FIRST(&io_path->qpair->outstanding_reqs); req = TAILQ_FIRST(&ctrlr_ch->qpair->outstanding_reqs);
SPDK_CU_ASSERT_FATAL(req != NULL); SPDK_CU_ASSERT_FATAL(req != NULL);
CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE); CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE);
req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE; req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE;
@ -2020,7 +2020,7 @@ test_abort(void)
struct nvme_bdev *bdev; struct nvme_bdev *bdev;
struct spdk_bdev_io *write_io, *admin_io, *abort_io; struct spdk_bdev_io *write_io, *admin_io, *abort_io;
struct spdk_io_channel *ch1, *ch2; struct spdk_io_channel *ch1, *ch2;
struct nvme_io_path *io_path1; struct nvme_ctrlr_channel *ctrlr_ch1;
int rc; int rc;
/* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on /* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on
@ -2072,7 +2072,7 @@ test_abort(void)
ch1 = spdk_get_io_channel(nvme_bdev_ctrlr); ch1 = spdk_get_io_channel(nvme_bdev_ctrlr);
SPDK_CU_ASSERT_FATAL(ch1 != NULL); SPDK_CU_ASSERT_FATAL(ch1 != NULL);
io_path1 = spdk_io_channel_get_ctx(ch1); ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
set_thread(1); set_thread(1);
@ -2126,7 +2126,7 @@ test_abort(void)
bdev_nvme_submit_request(ch1, write_io); bdev_nvme_submit_request(ch1, write_io);
CU_ASSERT(write_io->internal.in_submit_request == true); CU_ASSERT(write_io->internal.in_submit_request == true);
CU_ASSERT(io_path1->qpair->num_outstanding_reqs == 1); CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 1);
abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
abort_io->u.abort.bio_to_abort = write_io; abort_io->u.abort.bio_to_abort = write_io;
@ -2142,7 +2142,7 @@ test_abort(void)
CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
CU_ASSERT(write_io->internal.in_submit_request == false); CU_ASSERT(write_io->internal.in_submit_request == false);
CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
CU_ASSERT(io_path1->qpair->num_outstanding_reqs == 0); CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 0);
/* Aborting the admin request should succeed. */ /* Aborting the admin request should succeed. */
admin_io->internal.in_submit_request = true; admin_io->internal.in_submit_request = true;
@ -2198,7 +2198,7 @@ test_get_io_qpair(void)
struct spdk_nvme_ctrlr ctrlr = {}; struct spdk_nvme_ctrlr ctrlr = {};
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = NULL; struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = NULL;
struct spdk_io_channel *ch; struct spdk_io_channel *ch;
struct nvme_io_path *io_path; struct nvme_ctrlr_channel *ctrlr_ch;
struct spdk_nvme_qpair *qpair; struct spdk_nvme_qpair *qpair;
int rc; int rc;
@ -2215,11 +2215,11 @@ test_get_io_qpair(void)
ch = spdk_get_io_channel(nvme_bdev_ctrlr); ch = spdk_get_io_channel(nvme_bdev_ctrlr);
SPDK_CU_ASSERT_FATAL(ch != NULL); SPDK_CU_ASSERT_FATAL(ch != NULL);
io_path = spdk_io_channel_get_ctx(ch); ctrlr_ch = spdk_io_channel_get_ctx(ch);
CU_ASSERT(io_path->qpair != NULL); CU_ASSERT(ctrlr_ch->qpair != NULL);
qpair = bdev_nvme_get_io_qpair(ch); qpair = bdev_nvme_get_io_qpair(ch);
CU_ASSERT(qpair == io_path->qpair); CU_ASSERT(qpair == ctrlr_ch->qpair);
spdk_put_io_channel(ch); spdk_put_io_channel(ch);

View File

@ -885,7 +885,7 @@ test_get_zone_info(void)
struct spdk_bdev *bdev; struct spdk_bdev *bdev;
struct spdk_bdev_io *bdev_io; struct spdk_bdev_io *bdev_io;
struct spdk_io_channel *ch; struct spdk_io_channel *ch;
struct nvme_io_path *io_path; struct nvme_ctrlr_channel *ctrlr_ch;
#define MAX_ZONE_INFO_COUNT 64 #define MAX_ZONE_INFO_COUNT 64
struct spdk_bdev_zone_info zone_info[MAX_ZONE_INFO_COUNT]; struct spdk_bdev_zone_info zone_info[MAX_ZONE_INFO_COUNT];
struct spdk_ocssd_chunk_information_entry *chunk_info; struct spdk_ocssd_chunk_information_entry *chunk_info;
@ -915,12 +915,12 @@ test_get_zone_info(void)
bdev = spdk_bdev_get_by_name(bdev_name); bdev = spdk_bdev_get_by_name(bdev_name);
SPDK_CU_ASSERT_FATAL(bdev != NULL); SPDK_CU_ASSERT_FATAL(bdev != NULL);
ch = calloc(1, sizeof(*ch) + sizeof(*io_path)); ch = calloc(1, sizeof(*ch) + sizeof(*ctrlr_ch));
SPDK_CU_ASSERT_FATAL(ch != NULL); SPDK_CU_ASSERT_FATAL(ch != NULL);
io_path = spdk_io_channel_get_ctx(ch); ctrlr_ch = spdk_io_channel_get_ctx(ch);
io_path->ctrlr = nvme_bdev_ctrlr; ctrlr_ch->ctrlr = nvme_bdev_ctrlr;
io_path->qpair = (struct spdk_nvme_qpair *)0x1; ctrlr_ch->qpair = (struct spdk_nvme_qpair *)0x1;
bdev_io = alloc_ocssd_io(); bdev_io = alloc_ocssd_io();
bdev_io->internal.cb = get_zone_info_cb; bdev_io->internal.cb = get_zone_info_cb;