bdev/nvme: Alloc qpair context dynamically on nvme_ctrlr_channel

This is another preparation to disconnect qpair asynchronously.

Add nvme_qpair object and move the qpair and poll_group pointers and
the io_path_list list from nvme_ctrlr_channel to nvme_qpair. nvme_qpair
is allocated dynamically when creating nvme_ctrlr_channel, and
nvme_ctrlr_channel points to nvme_qpair.

We want to keep the times of references at I/O path. Change nvme_io_path
to point nvme_qpair instead of nvme_ctrlr_channel, and add
nvme_ctrlr_channel pointer to nvme_qpair.

nvme_ctrlr_channel may be freed earlier than nvme_qpair. nvme_poll_group
lists nvme_qpair instead of nvme_ctrlr_channel and nvme_qpair has a
pointer to nvme_ctrlr.

By using the nvme_ctrlr pointer of the nvme_qpair, a helper function
nvme_ctrlr_channel_get_ctrlr() is not necessary any more. Remove it.

Change-Id: Ib3f579d3441f31b9db7d3844ec56c49e2bb53a5d
Signed-off-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/11832
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Shuhei Matsumoto 2022-03-09 15:44:18 +09:00 committed by Tomasz Zawadzki
parent c1b0b339cf
commit c113e4cdca
4 changed files with 284 additions and 245 deletions

View File

@ -217,7 +217,7 @@ bdev_nvme_get_io_qpair(struct spdk_io_channel *ctrlr_io_ch)
ctrlr_ch = spdk_io_channel_get_ctx(ctrlr_io_ch);
return ctrlr_ch->qpair;
return ctrlr_ch->qpair->qpair;
}
static int
@ -580,6 +580,8 @@ _bdev_nvme_add_io_path(struct nvme_bdev_channel *nbdev_ch, struct nvme_ns *nvme_
{
struct nvme_io_path *io_path;
struct spdk_io_channel *ch;
struct nvme_ctrlr_channel *ctrlr_ch;
struct nvme_qpair *nvme_qpair;
io_path = calloc(1, sizeof(*io_path));
if (io_path == NULL) {
@ -596,8 +598,13 @@ _bdev_nvme_add_io_path(struct nvme_bdev_channel *nbdev_ch, struct nvme_ns *nvme_
return -ENOMEM;
}
io_path->ctrlr_ch = spdk_io_channel_get_ctx(ch);
TAILQ_INSERT_TAIL(&io_path->ctrlr_ch->io_path_list, io_path, tailq);
ctrlr_ch = spdk_io_channel_get_ctx(ch);
nvme_qpair = ctrlr_ch->qpair;
assert(nvme_qpair != NULL);
io_path->qpair = nvme_qpair;
TAILQ_INSERT_TAIL(&nvme_qpair->io_path_list, io_path, tailq);
io_path->nbdev_ch = nbdev_ch;
STAILQ_INSERT_TAIL(&nbdev_ch->io_path_list, io_path, stailq);
@ -611,13 +618,22 @@ static void
_bdev_nvme_delete_io_path(struct nvme_bdev_channel *nbdev_ch, struct nvme_io_path *io_path)
{
struct spdk_io_channel *ch;
struct nvme_qpair *nvme_qpair;
struct nvme_ctrlr_channel *ctrlr_ch;
nbdev_ch->current_io_path = NULL;
STAILQ_REMOVE(&nbdev_ch->io_path_list, io_path, nvme_io_path, stailq);
TAILQ_REMOVE(&io_path->ctrlr_ch->io_path_list, io_path, tailq);
ch = spdk_io_channel_from_ctx(io_path->ctrlr_ch);
nvme_qpair = io_path->qpair;
assert(nvme_qpair != NULL);
TAILQ_REMOVE(&nvme_qpair->io_path_list, io_path, tailq);
ctrlr_ch = nvme_qpair->ctrlr_ch;
assert(ctrlr_ch != NULL);
ch = spdk_io_channel_from_ctx(ctrlr_ch);
spdk_put_io_channel(ch);
free(io_path);
@ -717,7 +733,7 @@ nvme_ns_is_accessible(struct nvme_ns *nvme_ns)
static inline bool
nvme_io_path_is_connected(struct nvme_io_path *io_path)
{
return io_path->ctrlr_ch->qpair != NULL;
return io_path->qpair->qpair != NULL;
}
static inline bool
@ -739,7 +755,7 @@ nvme_io_path_is_failed(struct nvme_io_path *io_path)
{
struct nvme_ctrlr *nvme_ctrlr;
nvme_ctrlr = nvme_ctrlr_channel_get_ctrlr(io_path->ctrlr_ch);
nvme_ctrlr = io_path->qpair->ctrlr;
if (nvme_ctrlr->destruct) {
return true;
@ -952,7 +968,7 @@ bdev_nvme_io_complete_nvme_status(struct nvme_bdev_io *bio,
nbdev_ch = spdk_io_channel_get_ctx(spdk_bdev_io_get_io_channel(bdev_io));
assert(bio->io_path != NULL);
nvme_ctrlr = nvme_ctrlr_channel_get_ctrlr(bio->io_path->ctrlr_ch);
nvme_ctrlr = bio->io_path->qpair->ctrlr;
if (spdk_nvme_cpl_is_path_error(cpl) ||
spdk_nvme_cpl_is_aborted_sq_deletion(cpl) ||
@ -1056,53 +1072,50 @@ bdev_nvme_admin_passthru_complete(struct nvme_bdev_io *bio, int rc)
}
static void
_bdev_nvme_clear_io_path_cache(struct nvme_ctrlr_channel *ctrlr_ch)
_bdev_nvme_clear_io_path_cache(struct nvme_qpair *nvme_qpair)
{
struct nvme_io_path *io_path;
TAILQ_FOREACH(io_path, &ctrlr_ch->io_path_list, tailq) {
TAILQ_FOREACH(io_path, &nvme_qpair->io_path_list, tailq) {
io_path->nbdev_ch->current_io_path = NULL;
}
}
static struct nvme_ctrlr_channel *
nvme_poll_group_get_ctrlr_channel(struct nvme_poll_group *group,
struct spdk_nvme_qpair *qpair)
static struct nvme_qpair *
nvme_poll_group_get_qpair(struct nvme_poll_group *group, struct spdk_nvme_qpair *qpair)
{
struct nvme_ctrlr_channel *ctrlr_ch;
struct nvme_qpair *nvme_qpair;
TAILQ_FOREACH(ctrlr_ch, &group->ctrlr_ch_list, tailq) {
if (ctrlr_ch->qpair == qpair) {
TAILQ_FOREACH(nvme_qpair, &group->qpair_list, tailq) {
if (nvme_qpair->qpair == qpair) {
break;
}
}
return ctrlr_ch;
return nvme_qpair;
}
static void
bdev_nvme_disconnected_qpair_cb(struct spdk_nvme_qpair *qpair, void *poll_group_ctx)
{
struct nvme_poll_group *group = poll_group_ctx;
struct nvme_ctrlr_channel *ctrlr_ch;
struct nvme_ctrlr *nvme_ctrlr;
struct nvme_qpair *nvme_qpair;
SPDK_NOTICELOG("qpair %p is disconnected, free the qpair and reset controller.\n", qpair);
/*
* Free the I/O qpair and reset the nvme_ctrlr.
*/
ctrlr_ch = nvme_poll_group_get_ctrlr_channel(group, qpair);
if (ctrlr_ch != NULL) {
if (ctrlr_ch->qpair != NULL) {
spdk_nvme_ctrlr_free_io_qpair(ctrlr_ch->qpair);
ctrlr_ch->qpair = NULL;
nvme_qpair = nvme_poll_group_get_qpair(group, qpair);
if (nvme_qpair != NULL) {
if (nvme_qpair->qpair != NULL) {
spdk_nvme_ctrlr_free_io_qpair(nvme_qpair->qpair);
nvme_qpair->qpair = NULL;
}
_bdev_nvme_clear_io_path_cache(ctrlr_ch);
_bdev_nvme_clear_io_path_cache(nvme_qpair);
nvme_ctrlr = nvme_ctrlr_channel_get_ctrlr(ctrlr_ch);
bdev_nvme_reset(nvme_ctrlr);
bdev_nvme_reset(nvme_qpair->ctrlr);
}
}
@ -1201,14 +1214,14 @@ bdev_nvme_flush(struct nvme_bdev_io *bio, uint64_t offset, uint64_t nbytes)
}
static int
bdev_nvme_create_qpair(struct nvme_ctrlr_channel *ctrlr_ch)
bdev_nvme_create_qpair(struct nvme_qpair *nvme_qpair)
{
struct nvme_ctrlr *nvme_ctrlr;
struct spdk_nvme_io_qpair_opts opts;
struct spdk_nvme_qpair *qpair;
int rc;
nvme_ctrlr = nvme_ctrlr_channel_get_ctrlr(ctrlr_ch);
nvme_ctrlr = nvme_qpair->ctrlr;
spdk_nvme_ctrlr_get_default_io_qpair_opts(nvme_ctrlr->ctrlr, &opts, sizeof(opts));
opts.delay_cmd_submit = g_opts.delay_cmd_submit;
@ -1225,9 +1238,9 @@ bdev_nvme_create_qpair(struct nvme_ctrlr_channel *ctrlr_ch)
SPDK_DTRACE_PROBE3(bdev_nvme_create_qpair, nvme_ctrlr->nbdev_ctrlr->name,
spdk_nvme_qpair_get_id(qpair), spdk_thread_get_id(nvme_ctrlr->thread));
assert(ctrlr_ch->group != NULL);
assert(nvme_qpair->group != NULL);
rc = spdk_nvme_poll_group_add(ctrlr_ch->group->group, qpair);
rc = spdk_nvme_poll_group_add(nvme_qpair->group->group, qpair);
if (rc != 0) {
SPDK_ERRLOG("Unable to begin polling on NVMe Channel.\n");
goto err;
@ -1239,9 +1252,9 @@ bdev_nvme_create_qpair(struct nvme_ctrlr_channel *ctrlr_ch)
goto err;
}
ctrlr_ch->qpair = qpair;
nvme_qpair->qpair = qpair;
_bdev_nvme_clear_io_path_cache(ctrlr_ch);
_bdev_nvme_clear_io_path_cache(nvme_qpair);
return 0;
@ -1493,12 +1506,16 @@ bdev_nvme_reset_destroy_qpair(struct spdk_io_channel_iter *i)
{
struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch);
struct nvme_qpair *nvme_qpair;
_bdev_nvme_clear_io_path_cache(ctrlr_ch);
nvme_qpair = ctrlr_ch->qpair;
assert(nvme_qpair != NULL);
if (ctrlr_ch->qpair != NULL) {
spdk_nvme_ctrlr_free_io_qpair(ctrlr_ch->qpair);
ctrlr_ch->qpair = NULL;
_bdev_nvme_clear_io_path_cache(nvme_qpair);
if (nvme_qpair->qpair != NULL) {
spdk_nvme_ctrlr_free_io_qpair(nvme_qpair->qpair);
nvme_qpair->qpair = NULL;
}
spdk_for_each_channel_continue(i, 0);
@ -1527,7 +1544,7 @@ bdev_nvme_reset_create_qpair(struct spdk_io_channel_iter *i)
struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(_ch);
int rc;
rc = bdev_nvme_create_qpair(ctrlr_ch);
rc = bdev_nvme_create_qpair(ctrlr_ch->qpair);
spdk_for_each_channel_continue(i, rc);
}
@ -1707,13 +1724,11 @@ bdev_nvme_reset_io_continue(void *cb_arg, bool success)
static int
_bdev_nvme_reset_io(struct nvme_io_path *io_path, struct nvme_bdev_io *bio)
{
struct nvme_ctrlr_channel *ctrlr_ch = io_path->ctrlr_ch;
struct nvme_ctrlr *nvme_ctrlr;
struct nvme_ctrlr *nvme_ctrlr = io_path->qpair->ctrlr;
struct nvme_ctrlr_channel *ctrlr_ch;
struct spdk_bdev_io *bdev_io;
int rc;
nvme_ctrlr = nvme_ctrlr_channel_get_ctrlr(ctrlr_ch);
rc = bdev_nvme_reset(nvme_ctrlr);
if (rc == 0) {
assert(bio->io_path == NULL);
@ -1724,6 +1739,8 @@ _bdev_nvme_reset_io(struct nvme_io_path *io_path, struct nvme_bdev_io *bio)
nvme_ctrlr->reset_cb_fn = bdev_nvme_reset_io_continue;
nvme_ctrlr->reset_cb_arg = bio;
} else if (rc == -EBUSY) {
ctrlr_ch = io_path->qpair->ctrlr_ch;
assert(ctrlr_ch != NULL);
/*
* Reset call is queued only if it is from the app framework. This is on purpose so that
* we don't interfere with the app framework reset strategy. i.e. we are deferring to the
@ -2043,36 +2060,50 @@ bdev_nvme_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type)
static int
nvme_qpair_create(struct nvme_ctrlr *nvme_ctrlr, struct nvme_ctrlr_channel *ctrlr_ch)
{
struct nvme_qpair *nvme_qpair;
struct spdk_io_channel *pg_ch;
int rc;
TAILQ_INIT(&ctrlr_ch->io_path_list);
pg_ch = spdk_get_io_channel(&g_nvme_bdev_ctrlrs);
if (!pg_ch) {
nvme_qpair = calloc(1, sizeof(*nvme_qpair));
if (!nvme_qpair) {
SPDK_ERRLOG("Failed to alloc nvme_qpair.\n");
return -1;
}
ctrlr_ch->group = spdk_io_channel_get_ctx(pg_ch);
TAILQ_INIT(&nvme_qpair->io_path_list);
nvme_qpair->ctrlr = nvme_ctrlr;
nvme_qpair->ctrlr_ch = ctrlr_ch;
pg_ch = spdk_get_io_channel(&g_nvme_bdev_ctrlrs);
if (!pg_ch) {
free(nvme_qpair);
return -1;
}
nvme_qpair->group = spdk_io_channel_get_ctx(pg_ch);
#ifdef SPDK_CONFIG_VTUNE
ctrlr_ch->group->collect_spin_stat = true;
nvme_qpair->group->collect_spin_stat = true;
#else
ctrlr_ch->group->collect_spin_stat = false;
nvme_qpair->group->collect_spin_stat = false;
#endif
rc = bdev_nvme_create_qpair(ctrlr_ch);
rc = bdev_nvme_create_qpair(nvme_qpair);
if (rc != 0) {
/* nvme ctrlr can't create IO qpair during reset. In that case ctrlr_ch->qpair
* pointer will be NULL and IO qpair will be created when reset completes.
* If the user submits IO requests during reset, they will be queued and resubmitted later */
if (!nvme_ctrlr->resetting) {
spdk_put_io_channel(pg_ch);
free(nvme_qpair);
return rc;
}
}
TAILQ_INSERT_TAIL(&ctrlr_ch->group->ctrlr_ch_list, ctrlr_ch, tailq);
TAILQ_INSERT_TAIL(&nvme_qpair->group->qpair_list, nvme_qpair, tailq);
ctrlr_ch->qpair = nvme_qpair;
return 0;
}
@ -2089,28 +2120,34 @@ bdev_nvme_create_ctrlr_channel_cb(void *io_device, void *ctx_buf)
}
static void
nvme_qpair_delete(struct nvme_ctrlr_channel *ctrlr_ch)
nvme_qpair_delete(struct nvme_qpair *nvme_qpair)
{
assert(ctrlr_ch->group != NULL);
assert(nvme_qpair->group != NULL);
TAILQ_REMOVE(&ctrlr_ch->group->ctrlr_ch_list, ctrlr_ch, tailq);
TAILQ_REMOVE(&nvme_qpair->group->qpair_list, nvme_qpair, tailq);
spdk_put_io_channel(spdk_io_channel_from_ctx(ctrlr_ch->group));
spdk_put_io_channel(spdk_io_channel_from_ctx(nvme_qpair->group));
free(nvme_qpair);
}
static void
bdev_nvme_destroy_ctrlr_channel_cb(void *io_device, void *ctx_buf)
{
struct nvme_ctrlr_channel *ctrlr_ch = ctx_buf;
struct nvme_qpair *nvme_qpair;
_bdev_nvme_clear_io_path_cache(ctrlr_ch);
nvme_qpair = ctrlr_ch->qpair;
assert(nvme_qpair != NULL);
if (ctrlr_ch->qpair != NULL) {
spdk_nvme_ctrlr_free_io_qpair(ctrlr_ch->qpair);
ctrlr_ch->qpair = NULL;
_bdev_nvme_clear_io_path_cache(nvme_qpair);
if (nvme_qpair->qpair != NULL) {
spdk_nvme_ctrlr_free_io_qpair(nvme_qpair->qpair);
nvme_qpair->qpair = NULL;
}
nvme_qpair_delete(ctrlr_ch);
nvme_qpair_delete(nvme_qpair);
}
static void
@ -2144,7 +2181,7 @@ bdev_nvme_create_poll_group_cb(void *io_device, void *ctx_buf)
{
struct nvme_poll_group *group = ctx_buf;
TAILQ_INIT(&group->ctrlr_ch_list);
TAILQ_INIT(&group->qpair_list);
group->group = spdk_nvme_poll_group_create(group, &g_bdev_nvme_accel_fn_table);
if (group->group == NULL) {
@ -2175,7 +2212,7 @@ bdev_nvme_destroy_poll_group_cb(void *io_device, void *ctx_buf)
{
struct nvme_poll_group *group = ctx_buf;
assert(TAILQ_EMPTY(&group->ctrlr_ch_list));
assert(TAILQ_EMPTY(&group->qpair_list));
if (group->accel_channel) {
spdk_put_io_channel(group->accel_channel);
@ -2382,7 +2419,7 @@ bdev_nvme_get_spin_time(struct spdk_io_channel *ch)
uint64_t spin_time = 0;
STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) {
group = io_path->ctrlr_ch->group;
group = io_path->qpair->group;
if (!group || !group->collect_spin_stat) {
continue;
@ -3109,7 +3146,9 @@ bdev_nvme_clear_io_path_cache(struct spdk_io_channel_iter *i)
struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(_ch);
_bdev_nvme_clear_io_path_cache(ctrlr_ch);
assert(ctrlr_ch->qpair != NULL);
_bdev_nvme_clear_io_path_cache(ctrlr_ch->qpair);
spdk_for_each_channel_continue(i, 0);
}
@ -4927,7 +4966,7 @@ bdev_nvme_get_zone_info_done(void *ref, const struct spdk_nvme_cpl *cpl)
}
ns = bio->io_path->nvme_ns->ns;
qpair = bio->io_path->ctrlr_ch->qpair;
qpair = bio->io_path->qpair->qpair;
zone_report_bufsize = spdk_nvme_ns_get_max_io_xfer_size(ns);
max_zones_per_buf = (zone_report_bufsize - sizeof(*bio->zone_report_buf)) /
@ -5011,7 +5050,7 @@ bdev_nvme_admin_passthru_complete_nvme_status(void *ctx)
}
nbdev_ch = spdk_io_channel_get_ctx(spdk_bdev_io_get_io_channel(bdev_io));
nvme_ctrlr = nvme_ctrlr_channel_get_ctrlr(bio->io_path->ctrlr_ch);
nvme_ctrlr = bio->io_path->qpair->ctrlr;
if (spdk_nvme_cpl_is_path_error(cpl) ||
spdk_nvme_cpl_is_aborted_sq_deletion(cpl) ||
@ -5178,7 +5217,7 @@ bdev_nvme_no_pi_readv(struct nvme_bdev_io *bio, struct iovec *iov, int iovcnt,
bio->iov_offset = 0;
rc = spdk_nvme_ns_cmd_readv_with_md(bio->io_path->nvme_ns->ns,
bio->io_path->ctrlr_ch->qpair,
bio->io_path->qpair->qpair,
lba, lba_count,
bdev_nvme_no_pi_readv_done, bio, 0,
bdev_nvme_queued_reset_sgl, bdev_nvme_queued_next_sge,
@ -5196,7 +5235,7 @@ bdev_nvme_readv(struct nvme_bdev_io *bio, struct iovec *iov, int iovcnt,
struct spdk_bdev_ext_io_opts *ext_opts)
{
struct spdk_nvme_ns *ns = bio->io_path->nvme_ns->ns;
struct spdk_nvme_qpair *qpair = bio->io_path->ctrlr_ch->qpair;
struct spdk_nvme_qpair *qpair = bio->io_path->qpair->qpair;
int rc;
SPDK_DEBUGLOG(bdev_nvme, "read %" PRIu64 " blocks with offset %#" PRIx64 "\n",
@ -5243,7 +5282,7 @@ bdev_nvme_writev(struct nvme_bdev_io *bio, struct iovec *iov, int iovcnt,
uint32_t flags, struct spdk_bdev_ext_io_opts *ext_opts)
{
struct spdk_nvme_ns *ns = bio->io_path->nvme_ns->ns;
struct spdk_nvme_qpair *qpair = bio->io_path->ctrlr_ch->qpair;
struct spdk_nvme_qpair *qpair = bio->io_path->qpair->qpair;
int rc;
SPDK_DEBUGLOG(bdev_nvme, "write %" PRIu64 " blocks with offset %#" PRIx64 "\n",
@ -5290,7 +5329,7 @@ bdev_nvme_zone_appendv(struct nvme_bdev_io *bio, struct iovec *iov, int iovcnt,
uint32_t flags)
{
struct spdk_nvme_ns *ns = bio->io_path->nvme_ns->ns;
struct spdk_nvme_qpair *qpair = bio->io_path->ctrlr_ch->qpair;
struct spdk_nvme_qpair *qpair = bio->io_path->qpair->qpair;
int rc;
SPDK_DEBUGLOG(bdev_nvme, "zone append %" PRIu64 " blocks to zone start lba %#" PRIx64 "\n",
@ -5336,7 +5375,7 @@ bdev_nvme_comparev(struct nvme_bdev_io *bio, struct iovec *iov, int iovcnt,
bio->iov_offset = 0;
rc = spdk_nvme_ns_cmd_comparev_with_md(bio->io_path->nvme_ns->ns,
bio->io_path->ctrlr_ch->qpair,
bio->io_path->qpair->qpair,
lba, lba_count,
bdev_nvme_comparev_done, bio, flags,
bdev_nvme_queued_reset_sgl, bdev_nvme_queued_next_sge,
@ -5354,7 +5393,7 @@ bdev_nvme_comparev_and_writev(struct nvme_bdev_io *bio, struct iovec *cmp_iov, i
void *md, uint64_t lba_count, uint64_t lba, uint32_t flags)
{
struct spdk_nvme_ns *ns = bio->io_path->nvme_ns->ns;
struct spdk_nvme_qpair *qpair = bio->io_path->ctrlr_ch->qpair;
struct spdk_nvme_qpair *qpair = bio->io_path->qpair->qpair;
struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(bio);
int rc;
@ -5444,7 +5483,7 @@ bdev_nvme_unmap(struct nvme_bdev_io *bio, uint64_t offset_blocks, uint64_t num_b
range->starting_lba = offset;
rc = spdk_nvme_ns_cmd_dataset_management(bio->io_path->nvme_ns->ns,
bio->io_path->ctrlr_ch->qpair,
bio->io_path->qpair->qpair,
SPDK_NVME_DSM_ATTR_DEALLOCATE,
dsm_ranges, num_ranges,
bdev_nvme_queued_done, bio);
@ -5461,7 +5500,7 @@ bdev_nvme_write_zeroes(struct nvme_bdev_io *bio, uint64_t offset_blocks, uint64_
}
return spdk_nvme_ns_cmd_write_zeroes(bio->io_path->nvme_ns->ns,
bio->io_path->ctrlr_ch->qpair,
bio->io_path->qpair->qpair,
offset_blocks, num_blocks,
bdev_nvme_queued_done, bio,
0);
@ -5472,7 +5511,7 @@ bdev_nvme_get_zone_info(struct nvme_bdev_io *bio, uint64_t zone_id, uint32_t num
struct spdk_bdev_zone_info *info)
{
struct spdk_nvme_ns *ns = bio->io_path->nvme_ns->ns;
struct spdk_nvme_qpair *qpair = bio->io_path->ctrlr_ch->qpair;
struct spdk_nvme_qpair *qpair = bio->io_path->qpair->qpair;
uint32_t zone_report_bufsize = spdk_nvme_ns_get_max_io_xfer_size(ns);
uint64_t zone_size = spdk_nvme_zns_ns_get_zone_size_sectors(ns);
uint64_t total_zones = spdk_nvme_zns_ns_get_num_zones(ns);
@ -5503,7 +5542,7 @@ bdev_nvme_zone_management(struct nvme_bdev_io *bio, uint64_t zone_id,
enum spdk_bdev_zone_action action)
{
struct spdk_nvme_ns *ns = bio->io_path->nvme_ns->ns;
struct spdk_nvme_qpair *qpair = bio->io_path->ctrlr_ch->qpair;
struct spdk_nvme_qpair *qpair = bio->io_path->qpair->qpair;
switch (action) {
case SPDK_BDEV_ZONE_CLOSE:
@ -5537,7 +5576,7 @@ bdev_nvme_admin_passthru(struct nvme_bdev_channel *nbdev_ch, struct nvme_bdev_io
/* Choose the first ctrlr which is not failed. */
STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) {
nvme_ctrlr = nvme_ctrlr_channel_get_ctrlr(io_path->ctrlr_ch);
nvme_ctrlr = io_path->qpair->ctrlr;
/* We should skip any unavailable nvme_ctrlr rather than checking
* if the return value of spdk_nvme_ctrlr_cmd_admin_raw() is -ENXIO.
@ -5573,7 +5612,7 @@ bdev_nvme_io_passthru(struct nvme_bdev_io *bio, struct spdk_nvme_cmd *cmd,
void *buf, size_t nbytes)
{
struct spdk_nvme_ns *ns = bio->io_path->nvme_ns->ns;
struct spdk_nvme_qpair *qpair = bio->io_path->ctrlr_ch->qpair;
struct spdk_nvme_qpair *qpair = bio->io_path->qpair->qpair;
uint32_t max_xfer_size = spdk_nvme_ns_get_max_io_xfer_size(ns);
struct spdk_nvme_ctrlr *ctrlr = spdk_nvme_ns_get_ctrlr(ns);
@ -5597,7 +5636,7 @@ bdev_nvme_io_passthru_md(struct nvme_bdev_io *bio, struct spdk_nvme_cmd *cmd,
void *buf, size_t nbytes, void *md_buf, size_t md_len)
{
struct spdk_nvme_ns *ns = bio->io_path->nvme_ns->ns;
struct spdk_nvme_qpair *qpair = bio->io_path->ctrlr_ch->qpair;
struct spdk_nvme_qpair *qpair = bio->io_path->qpair->qpair;
size_t nr_sectors = nbytes / spdk_nvme_ns_get_extended_sector_size(ns);
uint32_t max_xfer_size = spdk_nvme_ns_get_max_io_xfer_size(ns);
struct spdk_nvme_ctrlr *ctrlr = spdk_nvme_ns_get_ctrlr(ns);
@ -5650,10 +5689,10 @@ bdev_nvme_abort(struct nvme_bdev_channel *nbdev_ch, struct nvme_bdev_io *bio,
* but also admin commands.
*/
STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) {
nvme_ctrlr = nvme_ctrlr_channel_get_ctrlr(io_path->ctrlr_ch);
nvme_ctrlr = io_path->qpair->ctrlr;
rc = spdk_nvme_ctrlr_cmd_abort_ext(nvme_ctrlr->ctrlr,
io_path->ctrlr_ch->qpair,
io_path->qpair->qpair,
bio_to_abort,
bdev_nvme_abort_done, bio);
if (rc == -ENOENT) {

View File

@ -170,23 +170,27 @@ struct nvme_bdev {
TAILQ_ENTRY(nvme_bdev) tailq;
};
struct nvme_ctrlr_channel {
struct nvme_qpair {
struct nvme_ctrlr *ctrlr;
struct spdk_nvme_qpair *qpair;
struct nvme_poll_group *group;
TAILQ_HEAD(, spdk_bdev_io) pending_resets;
TAILQ_ENTRY(nvme_ctrlr_channel) tailq;
struct nvme_ctrlr_channel *ctrlr_ch;
/* The following is used to update io_path cache of nvme_bdev_channels. */
TAILQ_HEAD(, nvme_io_path) io_path_list;
TAILQ_ENTRY(nvme_qpair) tailq;
};
#define nvme_ctrlr_channel_get_ctrlr(ctrlr_ch) \
(struct nvme_ctrlr *)spdk_io_channel_get_io_device(spdk_io_channel_from_ctx(ctrlr_ch))
struct nvme_ctrlr_channel {
struct nvme_ctrlr *ctrlr;
struct nvme_qpair *qpair;
TAILQ_HEAD(, spdk_bdev_io) pending_resets;
};
struct nvme_io_path {
struct nvme_ns *nvme_ns;
struct nvme_ctrlr_channel *ctrlr_ch;
struct nvme_qpair *qpair;
STAILQ_ENTRY(nvme_io_path) stailq;
/* The following are used to update io_path cache of the nvme_bdev_channel. */
@ -209,7 +213,7 @@ struct nvme_poll_group {
uint64_t spin_ticks;
uint64_t start_ticks;
uint64_t end_ticks;
TAILQ_HEAD(, nvme_ctrlr_channel) ctrlr_ch_list;
TAILQ_HEAD(, nvme_qpair) qpair_list;
};
struct nvme_ctrlr *nvme_ctrlr_get_by_name(const char *name);

View File

@ -1885,9 +1885,8 @@ rpc_add_error_injection_per_channel(struct spdk_io_channel_iter *i)
struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
struct rpc_add_error_injection_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch);
struct nvme_ctrlr *nvme_ctrlr = nvme_ctrlr_channel_get_ctrlr(ctrlr_ch);
struct spdk_nvme_qpair *qpair = ctrlr_ch->qpair;
struct spdk_nvme_ctrlr *ctrlr = nvme_ctrlr->ctrlr;
struct spdk_nvme_qpair *qpair = ctrlr_ch->qpair->qpair;
struct spdk_nvme_ctrlr *ctrlr = ctrlr_ch->qpair->ctrlr->ctrlr;
int rc = 0;
if (qpair != NULL) {
@ -2003,11 +2002,10 @@ rpc_remove_error_injection_per_channel(struct spdk_io_channel_iter *i)
struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
struct rpc_remove_error_injection_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch);
struct nvme_ctrlr *nvme_ctrlr = nvme_ctrlr_channel_get_ctrlr(ctrlr_ch);
struct spdk_nvme_qpair *qpair = ctrlr_ch->qpair;
struct spdk_nvme_ctrlr *ctrlr = nvme_ctrlr->ctrlr;
struct spdk_nvme_qpair *qpair = ctrlr_ch->qpair->qpair;
struct spdk_nvme_ctrlr *ctrlr = ctrlr_ch->qpair->ctrlr->ctrlr;
if (ctrlr_ch->qpair != NULL) {
if (qpair != NULL) {
spdk_nvme_qpair_remove_cmd_error_injection(ctrlr, qpair, ctx->rpc.opc);
}

View File

@ -1370,24 +1370,24 @@ test_reset_ctrlr(void)
CU_ASSERT(ctrlr_ch2->qpair != NULL);
poll_thread_times(0, 3);
CU_ASSERT(ctrlr_ch1->qpair == NULL);
CU_ASSERT(ctrlr_ch2->qpair != NULL);
CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
poll_thread_times(1, 1);
CU_ASSERT(ctrlr_ch1->qpair == NULL);
CU_ASSERT(ctrlr_ch2->qpair == NULL);
CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
CU_ASSERT(ctrlr.is_failed == true);
poll_thread_times(0, 1);
CU_ASSERT(ctrlr.is_failed == false);
poll_thread_times(0, 1);
CU_ASSERT(ctrlr_ch1->qpair != NULL);
CU_ASSERT(ctrlr_ch2->qpair == NULL);
CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
poll_thread_times(1, 1);
CU_ASSERT(ctrlr_ch1->qpair != NULL);
CU_ASSERT(ctrlr_ch2->qpair != NULL);
CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
CU_ASSERT(nvme_ctrlr->resetting == true);
CU_ASSERT(curr_trid->is_failed == true);
@ -1777,7 +1777,7 @@ test_pending_reset(void)
nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
ctrlr_ch1 = io_path1->ctrlr_ch;
ctrlr_ch1 = io_path1->qpair->ctrlr_ch;
SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
@ -1791,7 +1791,7 @@ test_pending_reset(void)
nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
ctrlr_ch2 = io_path2->ctrlr_ch;
ctrlr_ch2 = io_path2->qpair->ctrlr_ch;
SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
@ -2101,7 +2101,7 @@ ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io
io_path = bdev_nvme_find_io_path(nbdev_ch);
SPDK_CU_ASSERT_FATAL(io_path != NULL);
qpair = io_path->ctrlr_ch->qpair;
qpair = io_path->qpair->qpair;
SPDK_CU_ASSERT_FATAL(qpair != NULL);
bdev_io->type = io_type;
@ -2129,7 +2129,7 @@ ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
io_path = bdev_nvme_find_io_path(nbdev_ch);
SPDK_CU_ASSERT_FATAL(io_path != NULL);
qpair = io_path->ctrlr_ch->qpair;
qpair = io_path->qpair->qpair;
SPDK_CU_ASSERT_FATAL(qpair != NULL);
bdev_io->type = io_type;
@ -2153,7 +2153,7 @@ ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *b
io_path = bdev_nvme_find_io_path(nbdev_ch);
SPDK_CU_ASSERT_FATAL(io_path != NULL);
qpair = io_path->ctrlr_ch->qpair;
qpair = io_path->qpair->qpair;
SPDK_CU_ASSERT_FATAL(qpair != NULL);
/* Only compare and write now. */
@ -2468,7 +2468,7 @@ test_abort(void)
struct spdk_io_channel *ch1, *ch2;
struct nvme_bdev_channel *nbdev_ch1;
struct nvme_io_path *io_path1;
struct nvme_ctrlr_channel *ctrlr_ch1;
struct nvme_qpair *nvme_qpair1;
int rc;
/* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on
@ -2517,8 +2517,8 @@ test_abort(void)
nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
ctrlr_ch1 = io_path1->ctrlr_ch;
SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
nvme_qpair1 = io_path1->qpair;
SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
set_thread(1);
@ -2572,7 +2572,7 @@ test_abort(void)
bdev_nvme_submit_request(ch1, write_io);
CU_ASSERT(write_io->internal.in_submit_request == true);
CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 1);
CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
abort_io->u.abort.bio_to_abort = write_io;
@ -2588,7 +2588,7 @@ test_abort(void)
CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
CU_ASSERT(write_io->internal.in_submit_request == false);
CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 0);
CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
/* Aborting the admin request should succeed. */
admin_io->internal.in_submit_request = true;
@ -2619,11 +2619,11 @@ test_abort(void)
* the corresponding nvme_ctrlr. I/O should be queued if it is submitted
* while resetting the nvme_ctrlr.
*/
ctrlr_ch1->qpair->is_failed = true;
nvme_qpair1->qpair->is_failed = true;
poll_thread_times(0, 3);
CU_ASSERT(ctrlr_ch1->qpair == NULL);
CU_ASSERT(nvme_qpair1->qpair == NULL);
CU_ASSERT(nvme_ctrlr->resetting == true);
write_io->internal.in_submit_request = true;
@ -2695,10 +2695,10 @@ test_get_io_qpair(void)
ch = spdk_get_io_channel(nvme_ctrlr);
SPDK_CU_ASSERT_FATAL(ch != NULL);
ctrlr_ch = spdk_io_channel_get_ctx(ch);
CU_ASSERT(ctrlr_ch->qpair != NULL);
CU_ASSERT(ctrlr_ch->qpair->qpair != NULL);
qpair = bdev_nvme_get_io_qpair(ch);
CU_ASSERT(qpair == ctrlr_ch->qpair);
CU_ASSERT(qpair == ctrlr_ch->qpair->qpair);
spdk_put_io_channel(ch);
@ -2951,7 +2951,7 @@ test_reconnect_qpair(void)
struct spdk_io_channel *ch1, *ch2;
struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
struct nvme_io_path *io_path1, *io_path2;
struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
struct nvme_qpair *nvme_qpair1, *nvme_qpair2;
int rc;
memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
@ -2984,8 +2984,8 @@ test_reconnect_qpair(void)
nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
ctrlr_ch1 = io_path1->ctrlr_ch;
SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
nvme_qpair1 = io_path1->qpair;
SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
set_thread(1);
@ -2995,24 +2995,24 @@ test_reconnect_qpair(void)
nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
ctrlr_ch2 = io_path2->ctrlr_ch;
SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
nvme_qpair2 = io_path2->qpair;
SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL);
/* If a qpair is disconnected, it is freed and then reconnected via
* resetting the corresponding nvme_ctrlr.
*/
ctrlr_ch2->qpair->is_failed = true;
nvme_qpair2->qpair->is_failed = true;
ctrlr->is_failed = true;
poll_thread_times(1, 2);
CU_ASSERT(ctrlr_ch1->qpair != NULL);
CU_ASSERT(ctrlr_ch2->qpair == NULL);
CU_ASSERT(nvme_qpair1->qpair != NULL);
CU_ASSERT(nvme_qpair2->qpair == NULL);
CU_ASSERT(nvme_ctrlr->resetting == true);
poll_thread_times(0, 2);
poll_thread_times(1, 1);
CU_ASSERT(ctrlr_ch1->qpair == NULL);
CU_ASSERT(ctrlr_ch2->qpair == NULL);
CU_ASSERT(nvme_qpair1->qpair == NULL);
CU_ASSERT(nvme_qpair2->qpair == NULL);
CU_ASSERT(ctrlr->is_failed == true);
poll_thread_times(0, 1);
@ -3020,8 +3020,8 @@ test_reconnect_qpair(void)
poll_thread_times(0, 1);
poll_thread_times(1, 1);
CU_ASSERT(ctrlr_ch1->qpair != NULL);
CU_ASSERT(ctrlr_ch2->qpair != NULL);
CU_ASSERT(nvme_qpair1->qpair != NULL);
CU_ASSERT(nvme_qpair2->qpair != NULL);
CU_ASSERT(nvme_ctrlr->resetting == true);
poll_thread_times(0, 2);
@ -3034,19 +3034,19 @@ test_reconnect_qpair(void)
/* If a qpair is disconnected and resetting the corresponding nvme_ctrlr
* fails, the qpair is just freed.
*/
ctrlr_ch2->qpair->is_failed = true;
nvme_qpair2->qpair->is_failed = true;
ctrlr->is_failed = true;
ctrlr->fail_reset = true;
poll_thread_times(1, 2);
CU_ASSERT(ctrlr_ch1->qpair != NULL);
CU_ASSERT(ctrlr_ch2->qpair == NULL);
CU_ASSERT(nvme_qpair1->qpair != NULL);
CU_ASSERT(nvme_qpair2->qpair == NULL);
CU_ASSERT(nvme_ctrlr->resetting == true);
poll_thread_times(0, 2);
poll_thread_times(1, 1);
CU_ASSERT(ctrlr_ch1->qpair == NULL);
CU_ASSERT(ctrlr_ch2->qpair == NULL);
CU_ASSERT(nvme_qpair1->qpair == NULL);
CU_ASSERT(nvme_qpair2->qpair == NULL);
CU_ASSERT(ctrlr->is_failed == true);
poll_thread_times(0, 2);
@ -3054,8 +3054,8 @@ test_reconnect_qpair(void)
poll_thread_times(0, 1);
CU_ASSERT(ctrlr->is_failed == true);
CU_ASSERT(nvme_ctrlr->resetting == false);
CU_ASSERT(ctrlr_ch1->qpair == NULL);
CU_ASSERT(ctrlr_ch2->qpair == NULL);
CU_ASSERT(nvme_qpair1->qpair == NULL);
CU_ASSERT(nvme_qpair2->qpair == NULL);
poll_threads();
@ -3713,11 +3713,9 @@ ut_get_io_path_by_ctrlr(struct nvme_bdev_channel *nbdev_ch,
struct nvme_ctrlr *nvme_ctrlr)
{
struct nvme_io_path *io_path;
struct nvme_ctrlr *_nvme_ctrlr;
STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) {
_nvme_ctrlr = spdk_io_channel_get_io_device(spdk_io_channel_from_ctx(io_path->ctrlr_ch));
if (_nvme_ctrlr == nvme_ctrlr) {
if (io_path->qpair->ctrlr == nvme_ctrlr) {
return io_path;
}
}
@ -3841,12 +3839,12 @@ test_reset_bdev_ctrlr(void)
CU_ASSERT(nvme_ctrlr1->reset_cb_arg == first_bio);
poll_thread_times(0, 2);
CU_ASSERT(io_path11->ctrlr_ch->qpair == NULL);
CU_ASSERT(io_path21->ctrlr_ch->qpair != NULL);
CU_ASSERT(io_path11->qpair->qpair == NULL);
CU_ASSERT(io_path21->qpair->qpair != NULL);
poll_thread_times(1, 1);
CU_ASSERT(io_path11->ctrlr_ch->qpair == NULL);
CU_ASSERT(io_path21->ctrlr_ch->qpair == NULL);
CU_ASSERT(io_path11->qpair->qpair == NULL);
CU_ASSERT(io_path21->qpair->qpair == NULL);
CU_ASSERT(ctrlr1->is_failed == true);
poll_thread_times(0, 1);
@ -3855,12 +3853,12 @@ test_reset_bdev_ctrlr(void)
CU_ASSERT(curr_path1->is_failed == true);
poll_thread_times(0, 1);
CU_ASSERT(io_path11->ctrlr_ch->qpair != NULL);
CU_ASSERT(io_path21->ctrlr_ch->qpair == NULL);
CU_ASSERT(io_path11->qpair->qpair != NULL);
CU_ASSERT(io_path21->qpair->qpair == NULL);
poll_thread_times(1, 1);
CU_ASSERT(io_path11->ctrlr_ch->qpair != NULL);
CU_ASSERT(io_path21->ctrlr_ch->qpair != NULL);
CU_ASSERT(io_path11->qpair->qpair != NULL);
CU_ASSERT(io_path21->qpair->qpair != NULL);
poll_thread_times(0, 2);
CU_ASSERT(nvme_ctrlr1->resetting == true);
@ -3873,12 +3871,12 @@ test_reset_bdev_ctrlr(void)
CU_ASSERT(nvme_ctrlr2->resetting == true);
poll_thread_times(0, 2);
CU_ASSERT(io_path12->ctrlr_ch->qpair == NULL);
CU_ASSERT(io_path22->ctrlr_ch->qpair != NULL);
CU_ASSERT(io_path12->qpair->qpair == NULL);
CU_ASSERT(io_path22->qpair->qpair != NULL);
poll_thread_times(1, 1);
CU_ASSERT(io_path12->ctrlr_ch->qpair == NULL);
CU_ASSERT(io_path22->ctrlr_ch->qpair == NULL);
CU_ASSERT(io_path12->qpair->qpair == NULL);
CU_ASSERT(io_path22->qpair->qpair == NULL);
CU_ASSERT(ctrlr2->is_failed == true);
poll_thread_times(0, 2);
@ -3887,12 +3885,12 @@ test_reset_bdev_ctrlr(void)
CU_ASSERT(curr_path2->is_failed == true);
poll_thread_times(0, 1);
CU_ASSERT(io_path12->ctrlr_ch->qpair != NULL);
CU_ASSERT(io_path22->ctrlr_ch->qpair == NULL);
CU_ASSERT(io_path12->qpair->qpair != NULL);
CU_ASSERT(io_path22->qpair->qpair == NULL);
poll_thread_times(1, 2);
CU_ASSERT(io_path12->ctrlr_ch->qpair != NULL);
CU_ASSERT(io_path22->ctrlr_ch->qpair != NULL);
CU_ASSERT(io_path12->qpair->qpair != NULL);
CU_ASSERT(io_path22->qpair->qpair != NULL);
poll_thread_times(0, 2);
CU_ASSERT(nvme_ctrlr2->resetting == true);
@ -3931,7 +3929,7 @@ test_reset_bdev_ctrlr(void)
CU_ASSERT(nvme_ctrlr1->resetting == true);
CU_ASSERT(nvme_ctrlr1->reset_cb_arg == first_bio);
CU_ASSERT(TAILQ_FIRST(&io_path21->ctrlr_ch->pending_resets) == second_bdev_io);
CU_ASSERT(TAILQ_FIRST(&io_path21->qpair->ctrlr_ch->pending_resets) == second_bdev_io);
poll_threads();
@ -3973,16 +3971,16 @@ test_find_io_path(void)
struct nvme_bdev_channel nbdev_ch = {
.io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list),
};
struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {};
struct nvme_qpair nvme_qpair1 = {}, nvme_qpair2 = {};
struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {};
struct nvme_io_path io_path1 = { .ctrlr_ch = &ctrlr_ch1, .nvme_ns = &nvme_ns1, };
struct nvme_io_path io_path2 = { .ctrlr_ch = &ctrlr_ch2, .nvme_ns = &nvme_ns2, };
struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, };
struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, };
STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq);
/* Test if io_path whose ANA state is not accessible is excluded. */
ctrlr_ch1.qpair = (struct spdk_nvme_qpair *)0x1;
nvme_qpair1.qpair = (struct spdk_nvme_qpair *)0x1;
nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
@ -4004,7 +4002,7 @@ test_find_io_path(void)
/* Test if io_path whose qpair is resetting is excluded. */
ctrlr_ch1.qpair = NULL;
nvme_qpair1.qpair = NULL;
CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);
STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq);
@ -4013,9 +4011,9 @@ test_find_io_path(void)
* is prioritized.
*/
ctrlr_ch1.qpair = (struct spdk_nvme_qpair *)0x1;
nvme_qpair1.qpair = (struct spdk_nvme_qpair *)0x1;
nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE;
ctrlr_ch2.qpair = (struct spdk_nvme_qpair *)0x1;
nvme_qpair2.qpair = (struct spdk_nvme_qpair *)0x1;
nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2);
@ -4043,7 +4041,7 @@ test_retry_io_if_ana_state_is_updating(void)
struct spdk_io_channel *ch;
struct nvme_bdev_channel *nbdev_ch;
struct nvme_io_path *io_path;
struct nvme_ctrlr_channel *ctrlr_ch;
struct nvme_qpair *nvme_qpair;
int rc;
memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
@ -4090,9 +4088,9 @@ test_retry_io_if_ana_state_is_updating(void)
io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
SPDK_CU_ASSERT_FATAL(io_path != NULL);
ctrlr_ch = io_path->ctrlr_ch;
SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL);
SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair != NULL);
nvme_qpair = io_path->qpair;
SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch;
@ -4114,7 +4112,7 @@ test_retry_io_if_ana_state_is_updating(void)
bdev_nvme_submit_request(ch, bdev_io1);
CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
CU_ASSERT(bdev_io1->internal.in_submit_request == true);
CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
@ -4125,13 +4123,13 @@ test_retry_io_if_ana_state_is_updating(void)
poll_thread_times(0, 1);
CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
CU_ASSERT(bdev_io1->internal.in_submit_request == true);
CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
poll_threads();
CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
CU_ASSERT(bdev_io1->internal.in_submit_request == false);
CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
@ -4167,7 +4165,7 @@ test_retry_io_for_io_path_error(void)
struct spdk_io_channel *ch;
struct nvme_bdev_channel *nbdev_ch;
struct nvme_io_path *io_path1, *io_path2;
struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
struct nvme_qpair *nvme_qpair1, *nvme_qpair2;
struct ut_nvme_req *req;
struct spdk_uuid uuid1 = { .u.raw = { 0x1 } };
int rc;
@ -4224,9 +4222,9 @@ test_retry_io_for_io_path_error(void)
io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1);
SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
ctrlr_ch1 = io_path1->ctrlr_ch;
SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
SPDK_CU_ASSERT_FATAL(ctrlr_ch1->qpair != NULL);
nvme_qpair1 = io_path1->qpair;
SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL);
SPDK_CU_ASSERT_FATAL(nvme_qpair1->qpair != NULL);
bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
@ -4235,10 +4233,10 @@ test_retry_io_for_io_path_error(void)
bdev_nvme_submit_request(ch, bdev_io);
CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 1);
CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
CU_ASSERT(bdev_io->internal.in_submit_request == true);
req = ut_get_outstanding_nvme_request(ctrlr_ch1->qpair, bio);
req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
SPDK_CU_ASSERT_FATAL(req != NULL);
req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
@ -4247,7 +4245,7 @@ test_retry_io_for_io_path_error(void)
poll_thread_times(0, 1);
CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 0);
CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
CU_ASSERT(bdev_io->internal.in_submit_request == false);
CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
@ -4256,10 +4254,10 @@ test_retry_io_for_io_path_error(void)
bdev_nvme_submit_request(ch, bdev_io);
CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 1);
CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
CU_ASSERT(bdev_io->internal.in_submit_request == true);
req = ut_get_outstanding_nvme_request(ctrlr_ch1->qpair, bio);
req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
SPDK_CU_ASSERT_FATAL(req != NULL);
req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
@ -4267,13 +4265,13 @@ test_retry_io_for_io_path_error(void)
poll_thread_times(0, 1);
CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 0);
CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
CU_ASSERT(bdev_io->internal.in_submit_request == true);
CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
poll_threads();
CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 0);
CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
CU_ASSERT(bdev_io->internal.in_submit_request == false);
CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
@ -4303,9 +4301,9 @@ test_retry_io_for_io_path_error(void)
io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2);
SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
ctrlr_ch2 = io_path2->ctrlr_ch;
SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
SPDK_CU_ASSERT_FATAL(ctrlr_ch2->qpair != NULL);
nvme_qpair2 = io_path2->qpair;
SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL);
SPDK_CU_ASSERT_FATAL(nvme_qpair2->qpair != NULL);
/* I/O is submitted to io_path1, but qpair of io_path1 was disconnected
* and deleted. Hence the I/O was aborted. But io_path2 is available.
@ -4315,11 +4313,11 @@ test_retry_io_for_io_path_error(void)
bdev_nvme_submit_request(ch, bdev_io);
CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 1);
CU_ASSERT(ctrlr_ch2->qpair->num_outstanding_reqs == 0);
CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1);
CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
CU_ASSERT(bdev_io->internal.in_submit_request == true);
req = ut_get_outstanding_nvme_request(ctrlr_ch1->qpair, bio);
req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio);
SPDK_CU_ASSERT_FATAL(req != NULL);
req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
@ -4327,17 +4325,17 @@ test_retry_io_for_io_path_error(void)
poll_thread_times(0, 1);
CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 0);
CU_ASSERT(ctrlr_ch2->qpair->num_outstanding_reqs == 0);
CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0);
CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
CU_ASSERT(bdev_io->internal.in_submit_request == true);
CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
spdk_nvme_ctrlr_free_io_qpair(ctrlr_ch1->qpair);
ctrlr_ch1->qpair = NULL;
spdk_nvme_ctrlr_free_io_qpair(nvme_qpair1->qpair);
nvme_qpair1->qpair = NULL;
poll_threads();
CU_ASSERT(ctrlr_ch2->qpair->num_outstanding_reqs == 0);
CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0);
CU_ASSERT(bdev_io->internal.in_submit_request == false);
CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
@ -4375,7 +4373,7 @@ test_retry_io_count(void)
struct spdk_io_channel *ch;
struct nvme_bdev_channel *nbdev_ch;
struct nvme_io_path *io_path;
struct nvme_ctrlr_channel *ctrlr_ch;
struct nvme_qpair *nvme_qpair;
struct ut_nvme_req *req;
int rc;
@ -4422,9 +4420,9 @@ test_retry_io_count(void)
io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
SPDK_CU_ASSERT_FATAL(io_path != NULL);
ctrlr_ch = io_path->ctrlr_ch;
SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL);
SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair != NULL);
nvme_qpair = io_path->qpair;
SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
bdev_io->internal.ch = (struct spdk_bdev_channel *)ch;
@ -4435,10 +4433,10 @@ test_retry_io_count(void)
bdev_nvme_submit_request(ch, bdev_io);
CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
CU_ASSERT(bdev_io->internal.in_submit_request == true);
req = ut_get_outstanding_nvme_request(ctrlr_ch->qpair, bio);
req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
SPDK_CU_ASSERT_FATAL(req != NULL);
req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
@ -4446,7 +4444,7 @@ test_retry_io_count(void)
poll_thread_times(0, 1);
CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
CU_ASSERT(bdev_io->internal.in_submit_request == false);
CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
@ -4459,10 +4457,10 @@ test_retry_io_count(void)
bdev_nvme_submit_request(ch, bdev_io);
CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
CU_ASSERT(bdev_io->internal.in_submit_request == true);
req = ut_get_outstanding_nvme_request(ctrlr_ch->qpair, bio);
req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
SPDK_CU_ASSERT_FATAL(req != NULL);
req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
@ -4471,7 +4469,7 @@ test_retry_io_count(void)
poll_thread_times(0, 1);
CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
CU_ASSERT(bdev_io->internal.in_submit_request == false);
CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR);
@ -4482,10 +4480,10 @@ test_retry_io_count(void)
bdev_nvme_submit_request(ch, bdev_io);
CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
CU_ASSERT(bdev_io->internal.in_submit_request == true);
req = ut_get_outstanding_nvme_request(ctrlr_ch->qpair, bio);
req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
SPDK_CU_ASSERT_FATAL(req != NULL);
req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
@ -4494,13 +4492,13 @@ test_retry_io_count(void)
poll_thread_times(0, 1);
CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
CU_ASSERT(bdev_io->internal.in_submit_request == true);
CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
poll_threads();
CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
CU_ASSERT(bdev_io->internal.in_submit_request == false);
CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
@ -4513,10 +4511,10 @@ test_retry_io_count(void)
bdev_nvme_submit_request(ch, bdev_io);
CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
CU_ASSERT(bdev_io->internal.in_submit_request == true);
req = ut_get_outstanding_nvme_request(ctrlr_ch->qpair, bio);
req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
SPDK_CU_ASSERT_FATAL(req != NULL);
req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
@ -4525,13 +4523,13 @@ test_retry_io_count(void)
poll_thread_times(0, 1);
CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
CU_ASSERT(bdev_io->internal.in_submit_request == true);
CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
poll_threads();
CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
CU_ASSERT(bdev_io->internal.in_submit_request == false);
CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
@ -4653,7 +4651,7 @@ test_retry_io_for_ana_error(void)
struct spdk_io_channel *ch;
struct nvme_bdev_channel *nbdev_ch;
struct nvme_io_path *io_path;
struct nvme_ctrlr_channel *ctrlr_ch;
struct nvme_qpair *nvme_qpair;
struct ut_nvme_req *req;
uint64_t now;
int rc;
@ -4706,9 +4704,9 @@ test_retry_io_for_ana_error(void)
io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
SPDK_CU_ASSERT_FATAL(io_path != NULL);
ctrlr_ch = io_path->ctrlr_ch;
SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL);
SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair != NULL);
nvme_qpair = io_path->qpair;
SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
now = spdk_get_ticks();
@ -4721,10 +4719,10 @@ test_retry_io_for_ana_error(void)
bdev_nvme_submit_request(ch, bdev_io);
CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
CU_ASSERT(bdev_io->internal.in_submit_request == true);
req = ut_get_outstanding_nvme_request(ctrlr_ch->qpair, bio);
req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio);
SPDK_CU_ASSERT_FATAL(req != NULL);
nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE;
@ -4733,7 +4731,7 @@ test_retry_io_for_ana_error(void)
poll_thread_times(0, 1);
CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
CU_ASSERT(bdev_io->internal.in_submit_request == true);
CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
/* I/O should be retried immediately. */
@ -4744,7 +4742,7 @@ test_retry_io_for_ana_error(void)
poll_threads();
/* Namespace is inaccessible, and hence I/O should be queued again. */
CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
CU_ASSERT(bdev_io->internal.in_submit_request == true);
CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));
/* I/O should be retried after a second if no I/O path was found but
@ -4764,7 +4762,7 @@ test_retry_io_for_ana_error(void)
spdk_delay_us(spdk_get_ticks_hz() - g_opts.nvme_adminq_poll_period_us);
poll_threads();
CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
CU_ASSERT(bdev_io->internal.in_submit_request == false);
CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
@ -5128,7 +5126,7 @@ test_retry_io_if_ctrlr_is_resetting(void)
struct spdk_io_channel *ch;
struct nvme_bdev_channel *nbdev_ch;
struct nvme_io_path *io_path;
struct nvme_ctrlr_channel *ctrlr_ch;
struct nvme_qpair *nvme_qpair;
int rc;
memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
@ -5178,9 +5176,9 @@ test_retry_io_if_ctrlr_is_resetting(void)
io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
SPDK_CU_ASSERT_FATAL(io_path != NULL);
ctrlr_ch = io_path->ctrlr_ch;
SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL);
SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair != NULL);
nvme_qpair = io_path->qpair;
SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL);
SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL);
bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch;
bdev_io2->internal.ch = (struct spdk_bdev_channel *)ch;
@ -5199,12 +5197,12 @@ test_retry_io_if_ctrlr_is_resetting(void)
* the corresponding nvme_ctrlr. I/O should be queued if it is submitted
* while resetting the nvme_ctrlr.
*/
ctrlr_ch->qpair->is_failed = true;
nvme_qpair->qpair->is_failed = true;
ctrlr->is_failed = true;
poll_thread_times(0, 5);
CU_ASSERT(ctrlr_ch->qpair == NULL);
CU_ASSERT(nvme_qpair->qpair == NULL);
CU_ASSERT(nvme_ctrlr->resetting == true);
CU_ASSERT(ctrlr->is_failed == false);
@ -5225,21 +5223,21 @@ test_retry_io_if_ctrlr_is_resetting(void)
poll_threads();
CU_ASSERT(ctrlr_ch->qpair != NULL);
CU_ASSERT(nvme_qpair->qpair != NULL);
CU_ASSERT(nvme_ctrlr->resetting == false);
spdk_delay_us(999999);
poll_thread_times(0, 1);
CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
CU_ASSERT(bdev_io1->internal.in_submit_request == true);
CU_ASSERT(bdev_io2->internal.in_submit_request == true);
CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list));
poll_threads();
CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
CU_ASSERT(bdev_io1->internal.in_submit_request == false);
CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
CU_ASSERT(bdev_io2->internal.in_submit_request == true);
@ -5249,13 +5247,13 @@ test_retry_io_if_ctrlr_is_resetting(void)
poll_thread_times(0, 1);
CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 1);
CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1);
CU_ASSERT(bdev_io2->internal.in_submit_request == true);
CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list));
poll_threads();
CU_ASSERT(ctrlr_ch->qpair->num_outstanding_reqs == 0);
CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0);
CU_ASSERT(bdev_io2->internal.in_submit_request == false);
CU_ASSERT(bdev_io2->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
@ -5451,8 +5449,8 @@ test_reconnect_ctrlr(void)
CU_ASSERT(nvme_ctrlr->resetting == false);
CU_ASSERT(ctrlr.is_failed == false);
CU_ASSERT(ctrlr_ch1->qpair == NULL);
CU_ASSERT(ctrlr_ch2->qpair == NULL);
CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
@ -5468,8 +5466,8 @@ test_reconnect_ctrlr(void)
poll_threads();
CU_ASSERT(nvme_ctrlr->resetting == false);
CU_ASSERT(ctrlr_ch1->qpair != NULL);
CU_ASSERT(ctrlr_ch2->qpair != NULL);
CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL);
CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL);
CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
/* The reset should fail and a reconnect timer should be registered. */
@ -5485,8 +5483,8 @@ test_reconnect_ctrlr(void)
CU_ASSERT(nvme_ctrlr->resetting == false);
CU_ASSERT(ctrlr.is_failed == false);
CU_ASSERT(ctrlr_ch1->qpair == NULL);
CU_ASSERT(ctrlr_ch2->qpair == NULL);
CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
@ -5501,8 +5499,8 @@ test_reconnect_ctrlr(void)
CU_ASSERT(nvme_ctrlr->resetting == false);
CU_ASSERT(ctrlr.is_failed == false);
CU_ASSERT(ctrlr_ch1->qpair == NULL);
CU_ASSERT(ctrlr_ch2->qpair == NULL);
CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL);
CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL);
CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
/* Then a reconnect retry should still fail and the ctrlr should be deleted. */
@ -5595,7 +5593,7 @@ test_retry_failover_ctrlr(void)
CU_ASSERT(nvme_ctrlr->resetting == false);
CU_ASSERT(ctrlr.is_failed == false);
CU_ASSERT(ctrlr_ch->qpair == NULL);
CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true);
@ -5635,7 +5633,7 @@ test_retry_failover_ctrlr(void)
CU_ASSERT(path_id3->is_failed == false);
CU_ASSERT(path_id3 == nvme_ctrlr->active_path_id);
CU_ASSERT(nvme_ctrlr->resetting == false);
CU_ASSERT(ctrlr_ch->qpair != NULL);
CU_ASSERT(ctrlr_ch->qpair->qpair != NULL);
CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false);
spdk_put_io_channel(ch);
@ -5723,9 +5721,9 @@ test_fail_path(void)
io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr);
SPDK_CU_ASSERT_FATAL(io_path != NULL);
ctrlr_ch = io_path->ctrlr_ch;
ctrlr_ch = io_path->qpair->ctrlr_ch;
SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL);
SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair != NULL);
SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair->qpair != NULL);
bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch);
ut_bdev_io_set_buf(bdev_io);
@ -5744,7 +5742,7 @@ test_fail_path(void)
CU_ASSERT(nvme_ctrlr->resetting == false);
CU_ASSERT(ctrlr->is_failed == false);
CU_ASSERT(ctrlr_ch->qpair == NULL);
CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
CU_ASSERT(nvme_ctrlr->reset_start_tsc != 0);
CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false);
@ -5768,7 +5766,7 @@ test_fail_path(void)
CU_ASSERT(nvme_ctrlr->resetting == false);
CU_ASSERT(ctrlr->is_failed == false);
CU_ASSERT(ctrlr_ch->qpair == NULL);
CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false);
@ -5779,7 +5777,7 @@ test_fail_path(void)
CU_ASSERT(nvme_ctrlr->resetting == false);
CU_ASSERT(ctrlr->is_failed == false);
CU_ASSERT(ctrlr_ch->qpair == NULL);
CU_ASSERT(ctrlr_ch->qpair->qpair == NULL);
CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL);
CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false);
CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == true);