bdev/nvme: Remove destruct_poller and process deferred destruct request after completing reset
destruct_poller had been used to destruct ctrlr after completing reset, but we can remove destruct_poller and change reset processing to destruct ctrlr after its completion by itself. spdk_io_device_unregister() may fail spdk_for_each_channel(). Hence call nvme_bdev_ctrlr_do_destruct() as the completion function of spdk_for_each_channel(). The first idea was to always run destruct_poller at nvme_bdev_ctrlr_destruct(), but this patch will be simpler and more intuitive. Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Change-Id: I43a400bdb67ab015d707fb9679693bd3d5bfb070 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/5607 Community-CI: Broadcom CI Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Changpeng Liu <changpeng.liu@intel.com> Reviewed-by: <dongx.yi@intel.com>
This commit is contained in:
parent
d2b0b15406
commit
2caff467ea
@ -351,6 +351,15 @@ err:
|
|||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
_bdev_nvme_reset_destruct_ctrlr(struct spdk_io_channel_iter *i, int status)
|
||||||
|
{
|
||||||
|
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = spdk_io_channel_iter_get_io_device(i);
|
||||||
|
|
||||||
|
spdk_thread_send_msg(nvme_bdev_ctrlr->thread, nvme_bdev_ctrlr_do_destruct,
|
||||||
|
nvme_bdev_ctrlr);
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
_bdev_nvme_complete_pending_resets(struct spdk_io_channel_iter *i)
|
_bdev_nvme_complete_pending_resets(struct spdk_io_channel_iter *i)
|
||||||
{
|
{
|
||||||
@ -380,6 +389,7 @@ _bdev_nvme_reset_complete(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, int rc)
|
|||||||
/* If it's zero, we succeeded, otherwise, the reset failed. */
|
/* If it's zero, we succeeded, otherwise, the reset failed. */
|
||||||
void *cb_arg = NULL;
|
void *cb_arg = NULL;
|
||||||
struct nvme_bdev_ctrlr_trid *curr_trid;
|
struct nvme_bdev_ctrlr_trid *curr_trid;
|
||||||
|
bool do_destruct = false;
|
||||||
|
|
||||||
if (rc) {
|
if (rc) {
|
||||||
cb_arg = (void *)0x1;
|
cb_arg = (void *)0x1;
|
||||||
@ -398,11 +408,17 @@ _bdev_nvme_reset_complete(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, int rc)
|
|||||||
|
|
||||||
curr_trid->is_failed = cb_arg != NULL ? true : false;
|
curr_trid->is_failed = cb_arg != NULL ? true : false;
|
||||||
|
|
||||||
|
if (nvme_bdev_ctrlr->ref == 0 && nvme_bdev_ctrlr->destruct) {
|
||||||
|
/* Destruct ctrlr after clearing pending resets. */
|
||||||
|
do_destruct = true;
|
||||||
|
}
|
||||||
|
|
||||||
pthread_mutex_unlock(&g_bdev_nvme_mutex);
|
pthread_mutex_unlock(&g_bdev_nvme_mutex);
|
||||||
/* Make sure we clear any pending resets before returning. */
|
/* Make sure we clear any pending resets before returning. */
|
||||||
spdk_for_each_channel(nvme_bdev_ctrlr,
|
spdk_for_each_channel(nvme_bdev_ctrlr,
|
||||||
_bdev_nvme_complete_pending_resets,
|
_bdev_nvme_complete_pending_resets,
|
||||||
cb_arg, NULL);
|
cb_arg,
|
||||||
|
do_destruct ? _bdev_nvme_reset_destruct_ctrlr : NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -148,22 +148,10 @@ nvme_bdev_unregister_cb(void *io_device)
|
|||||||
pthread_mutex_unlock(&g_bdev_nvme_mutex);
|
pthread_mutex_unlock(&g_bdev_nvme_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
void
|
||||||
nvme_bdev_ctrlr_destruct(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr)
|
nvme_bdev_ctrlr_do_destruct(void *ctx)
|
||||||
{
|
{
|
||||||
assert(nvme_bdev_ctrlr->destruct);
|
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = ctx;
|
||||||
pthread_mutex_lock(&g_bdev_nvme_mutex);
|
|
||||||
|
|
||||||
spdk_poller_unregister(&nvme_bdev_ctrlr->destruct_poller);
|
|
||||||
|
|
||||||
if (nvme_bdev_ctrlr->resetting) {
|
|
||||||
nvme_bdev_ctrlr->destruct_poller =
|
|
||||||
SPDK_POLLER_REGISTER((spdk_poller_fn)nvme_bdev_ctrlr_destruct,
|
|
||||||
nvme_bdev_ctrlr, 1000);
|
|
||||||
pthread_mutex_unlock(&g_bdev_nvme_mutex);
|
|
||||||
return SPDK_POLLER_BUSY;
|
|
||||||
}
|
|
||||||
pthread_mutex_unlock(&g_bdev_nvme_mutex);
|
|
||||||
|
|
||||||
if (nvme_bdev_ctrlr->opal_dev) {
|
if (nvme_bdev_ctrlr->opal_dev) {
|
||||||
spdk_opal_dev_destruct(nvme_bdev_ctrlr->opal_dev);
|
spdk_opal_dev_destruct(nvme_bdev_ctrlr->opal_dev);
|
||||||
@ -175,7 +163,21 @@ nvme_bdev_ctrlr_destruct(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr)
|
|||||||
}
|
}
|
||||||
|
|
||||||
spdk_io_device_unregister(nvme_bdev_ctrlr, nvme_bdev_unregister_cb);
|
spdk_io_device_unregister(nvme_bdev_ctrlr, nvme_bdev_unregister_cb);
|
||||||
return SPDK_POLLER_BUSY;
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
nvme_bdev_ctrlr_destruct(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr)
|
||||||
|
{
|
||||||
|
assert(nvme_bdev_ctrlr->destruct);
|
||||||
|
|
||||||
|
pthread_mutex_lock(&g_bdev_nvme_mutex);
|
||||||
|
if (nvme_bdev_ctrlr->resetting) {
|
||||||
|
pthread_mutex_unlock(&g_bdev_nvme_mutex);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
pthread_mutex_unlock(&g_bdev_nvme_mutex);
|
||||||
|
|
||||||
|
nvme_bdev_ctrlr_do_destruct(nvme_bdev_ctrlr);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -101,7 +101,6 @@ struct nvme_bdev_ctrlr {
|
|||||||
struct spdk_opal_dev *opal_dev;
|
struct spdk_opal_dev *opal_dev;
|
||||||
|
|
||||||
struct spdk_poller *adminq_timer_poller;
|
struct spdk_poller *adminq_timer_poller;
|
||||||
struct spdk_poller *destruct_poller;
|
|
||||||
struct spdk_thread *thread;
|
struct spdk_thread *thread;
|
||||||
|
|
||||||
struct ocssd_bdev_ctrlr *ocssd_ctrlr;
|
struct ocssd_bdev_ctrlr *ocssd_ctrlr;
|
||||||
@ -165,7 +164,8 @@ struct nvme_bdev_ctrlr *nvme_bdev_next_ctrlr(struct nvme_bdev_ctrlr *prev);
|
|||||||
void nvme_bdev_dump_trid_json(const struct spdk_nvme_transport_id *trid,
|
void nvme_bdev_dump_trid_json(const struct spdk_nvme_transport_id *trid,
|
||||||
struct spdk_json_write_ctx *w);
|
struct spdk_json_write_ctx *w);
|
||||||
|
|
||||||
int nvme_bdev_ctrlr_destruct(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr);
|
void nvme_bdev_ctrlr_destruct(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr);
|
||||||
|
void nvme_bdev_ctrlr_do_destruct(void *ctx);
|
||||||
void nvme_bdev_attach_bdev_to_ns(struct nvme_bdev_ns *nvme_ns, struct nvme_bdev *nvme_disk);
|
void nvme_bdev_attach_bdev_to_ns(struct nvme_bdev_ns *nvme_ns, struct nvme_bdev *nvme_disk);
|
||||||
void nvme_bdev_detach_bdev_from_ns(struct nvme_bdev *nvme_disk);
|
void nvme_bdev_detach_bdev_from_ns(struct nvme_bdev *nvme_disk);
|
||||||
|
|
||||||
|
@ -545,6 +545,7 @@ delete_nvme_bdev_controller(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr)
|
|||||||
CU_ASSERT(nvme_bdev_ctrlr->ref == 1);
|
CU_ASSERT(nvme_bdev_ctrlr->ref == 1);
|
||||||
nvme_bdev_ctrlr->ref--;
|
nvme_bdev_ctrlr->ref--;
|
||||||
nvme_bdev_ctrlr_destruct(nvme_bdev_ctrlr);
|
nvme_bdev_ctrlr_destruct(nvme_bdev_ctrlr);
|
||||||
|
spdk_delay_us(1000);
|
||||||
|
|
||||||
while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
|
while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user