nvme: call correct remove_cb when device is removed

When a device is removed, we should use the remove_cb
that was specified when the device was originally probed
and attached, if one was set.

Also add a new spdk_nvme_ctrlr_set_remove_cb API.  This
can be used for cases where a different remove_ctx is
desired than was specified for the probe call.  This
also enables setting a remove_cb when using connect APIs
which do not have a way currently to provide a remove_cb.

This also requires fixing the bdev nvme module, which
was depending on the previously errant behavior.

Fixes issue #1715.

Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: Id848b39040099ff7a21fe57ea6b194a8c25ae015

Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/5510
Community-CI: Broadcom CI
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: <dongx.yi@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
This commit is contained in:
Jim Harris 2020-12-07 08:42:00 -07:00 committed by Tomasz Zawadzki
parent fd7c1ff2d8
commit 17afd268df
5 changed files with 44 additions and 18 deletions

View File

@ -867,6 +867,22 @@ int spdk_nvme_detach_poll_async(struct spdk_nvme_detach_ctx *detach_ctx);
*/
int spdk_nvme_ctrlr_set_trid(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_transport_id *trid);
/**
* Set the remove callback and context to be invoked if the controller is removed.
*
* This will override any remove_cb and/or ctx specified when the controller was
* probed.
*
* This function may only be called from the primary process. This function has
* no effect if called from a secondary process.
*
* \param ctrlr Opaque handle to an NVMe controller.
* \param remove_cb remove callback
* \param remove_ctx remove callback context
*/
void spdk_nvme_ctrlr_set_remove_cb(struct spdk_nvme_ctrlr *ctrlr,
spdk_nvme_remove_cb remove_cb, void *remove_ctx);
/**
* Perform a full hardware reset of the NVMe controller.
*

View File

@ -1454,6 +1454,20 @@ out:
return rc;
}
void
spdk_nvme_ctrlr_set_remove_cb(struct spdk_nvme_ctrlr *ctrlr,
spdk_nvme_remove_cb remove_cb, void *remove_ctx)
{
if (!spdk_process_is_primary()) {
return;
}
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
ctrlr->remove_cb = remove_ctx;
ctrlr->cb_ctx = remove_ctx;
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
}
static void
nvme_ctrlr_identify_done(void *arg, const struct spdk_nvme_cpl *cpl)
{

View File

@ -306,7 +306,7 @@ _nvme_pcie_hotplug_monitor(struct spdk_nvme_probe_ctx *probe_ctx)
/* get the user app to clean up and stop I/O */
if (ctrlr->remove_cb) {
nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
ctrlr->remove_cb(probe_ctx->cb_ctx, ctrlr);
ctrlr->remove_cb(ctrlr->cb_ctx, ctrlr);
nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock);
}
}
@ -335,7 +335,7 @@ _nvme_pcie_hotplug_monitor(struct spdk_nvme_probe_ctx *probe_ctx)
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
if (ctrlr->remove_cb) {
nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
ctrlr->remove_cb(probe_ctx->cb_ctx, ctrlr);
ctrlr->remove_cb(ctrlr->cb_ctx, ctrlr);
nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock);
}
}

View File

@ -87,6 +87,7 @@
spdk_nvme_ctrlr_get_transport_id;
spdk_nvme_ctrlr_alloc_qid;
spdk_nvme_ctrlr_free_qid;
spdk_nvme_ctrlr_set_remove_cb;
spdk_nvme_poll_group_create;
spdk_nvme_poll_group_add;

View File

@ -170,6 +170,7 @@ static int bdev_nvme_abort(struct nvme_io_channel *nvme_ch,
struct nvme_bdev_io *bio, struct nvme_bdev_io *bio_to_abort);
static int bdev_nvme_reset(struct nvme_io_channel *nvme_ch, struct nvme_bdev_io *bio);
static int bdev_nvme_failover(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, bool remove);
static void remove_cb(void *cb_ctx, struct spdk_nvme_ctrlr *ctrlr);
typedef void (*populate_namespace_fn)(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr,
struct nvme_bdev_ns *nvme_ns, struct nvme_async_probe_ctx *ctx);
@ -1534,6 +1535,7 @@ nvme_bdev_ctrlr_create(struct spdk_nvme_ctrlr *ctrlr,
}
spdk_nvme_ctrlr_register_aer_callback(ctrlr, aer_cb, nvme_bdev_ctrlr);
spdk_nvme_ctrlr_set_remove_cb(ctrlr, remove_cb, nvme_bdev_ctrlr);
if (spdk_nvme_ctrlr_get_flags(nvme_bdev_ctrlr->ctrlr) &
SPDK_NVME_CTRLR_SECURITY_SEND_RECV_SUPPORTED) {
@ -1625,11 +1627,10 @@ _nvme_bdev_ctrlr_destruct(void *ctx)
static void
remove_cb(void *cb_ctx, struct spdk_nvme_ctrlr *ctrlr)
{
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr;
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = cb_ctx;
pthread_mutex_lock(&g_bdev_nvme_mutex);
TAILQ_FOREACH(nvme_bdev_ctrlr, &g_nvme_bdev_ctrlrs, tailq) {
if (nvme_bdev_ctrlr->ctrlr == ctrlr) {
assert(nvme_bdev_ctrlr->ctrlr == ctrlr);
/* The controller's destruction was already started */
if (nvme_bdev_ctrlr->destruct) {
pthread_mutex_unlock(&g_bdev_nvme_mutex);
@ -1637,12 +1638,7 @@ remove_cb(void *cb_ctx, struct spdk_nvme_ctrlr *ctrlr)
}
nvme_bdev_ctrlr->destruct = true;
pthread_mutex_unlock(&g_bdev_nvme_mutex);
_nvme_bdev_ctrlr_destruct(nvme_bdev_ctrlr);
return;
}
}
pthread_mutex_unlock(&g_bdev_nvme_mutex);
}
static int
@ -1656,8 +1652,7 @@ bdev_nvme_hotplug(void *arg)
spdk_nvme_trid_populate_transport(&trid_pcie, SPDK_NVME_TRANSPORT_PCIE);
g_hotplug_probe_ctx = spdk_nvme_probe_async(&trid_pcie, NULL,
hotplug_probe_cb,
attach_cb, remove_cb);
hotplug_probe_cb, attach_cb, NULL);
if (!g_hotplug_probe_ctx) {
return SPDK_POLLER_BUSY;
}