bdev/nvme: asynchronous namespace depopulation

This patch adds the ability for a namespace to be depopulated
asynchronously.  Currently both regular NVMe namespaces, as well as the
OCSSD ones are depopulated synchronously, but it'll be changed in the
upcoming patches.

The nvme_bdev_ctrlr.ref is now not only tracking the number of bdevs
created on that controller, but also the number of populated namespaces.

Change-Id: I7b112d9b0d41739f3dc7d427e9da340843128c54
Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1197
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
This commit is contained in:
Konrad Sztyber 2020-03-09 14:34:21 +01:00 committed by Tomasz Zawadzki
parent 62e0342eac
commit 9d0f9b330d
4 changed files with 31 additions and 8 deletions

View File

@ -1073,6 +1073,21 @@ timeout_cb(void *cb_arg, struct spdk_nvme_ctrlr *ctrlr,
}
}
void
nvme_ctrlr_depopulate_namespace_done(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr)
{
pthread_mutex_lock(&g_bdev_nvme_mutex);
nvme_bdev_ctrlr->ref--;
if (nvme_bdev_ctrlr->ref == 0 && nvme_bdev_ctrlr->destruct) {
pthread_mutex_unlock(&g_bdev_nvme_mutex);
nvme_bdev_ctrlr_destruct(nvme_bdev_ctrlr);
return;
}
pthread_mutex_unlock(&g_bdev_nvme_mutex);
}
static void
nvme_ctrlr_depopulate_standard_namespace(struct nvme_bdev_ns *ns)
{
@ -1083,6 +1098,8 @@ nvme_ctrlr_depopulate_standard_namespace(struct nvme_bdev_ns *ns)
}
ns->populated = false;
nvme_ctrlr_depopulate_namespace_done(ns->ctrlr);
}
static void nvme_ctrlr_populate_namespace(struct nvme_bdev_ctrlr *ctrlr, struct nvme_bdev_ns *ns,
@ -1102,6 +1119,9 @@ nvme_ctrlr_populate_namespace_done(struct nvme_async_probe_ctx *ctx,
{
if (rc == 0) {
ns->populated = true;
pthread_mutex_lock(&g_bdev_nvme_mutex);
ns->ctrlr->ref++;
pthread_mutex_unlock(&g_bdev_nvme_mutex);
} else {
memset(ns, 0, sizeof(*ns));
}
@ -1901,12 +1921,6 @@ bdev_nvme_library_fini(void)
pthread_mutex_lock(&g_bdev_nvme_mutex);
TAILQ_FOREACH_SAFE(nvme_bdev_ctrlr, &g_nvme_bdev_ctrlrs, tailq, tmp) {
if (nvme_bdev_ctrlr->ref > 0) {
SPDK_ERRLOG("Controller %s is still referenced, can't destroy it\n",
nvme_bdev_ctrlr->name);
continue;
}
if (nvme_bdev_ctrlr->destruct) {
/* This controller's destruction was already started
* before the application started shutting down
@ -1914,7 +1928,6 @@ bdev_nvme_library_fini(void)
continue;
}
nvme_bdev_ctrlr->destruct = true;
pthread_mutex_unlock(&g_bdev_nvme_mutex);
for (i = 0; i < nvme_bdev_ctrlr->num_ns; i++) {
@ -1927,9 +1940,15 @@ bdev_nvme_library_fini(void)
}
}
pthread_mutex_lock(&g_bdev_nvme_mutex);
nvme_bdev_ctrlr->destruct = true;
if (nvme_bdev_ctrlr->ref == 0) {
pthread_mutex_unlock(&g_bdev_nvme_mutex);
nvme_bdev_ctrlr_destruct(nvme_bdev_ctrlr);
pthread_mutex_lock(&g_bdev_nvme_mutex);
}
}
pthread_mutex_unlock(&g_bdev_nvme_mutex);
}

View File

@ -1400,6 +1400,8 @@ bdev_ocssd_depopulate_namespace(struct nvme_bdev_ns *ns)
free(ns->type_ctx);
ns->populated = false;
ns->type_ctx = NULL;
nvme_ctrlr_depopulate_namespace_done(ns->ctrlr);
}
int

View File

@ -141,6 +141,7 @@ struct nvme_io_channel {
void nvme_ctrlr_populate_namespace_done(struct nvme_async_probe_ctx *ctx,
struct nvme_bdev_ns *ns, int rc);
void nvme_ctrlr_depopulate_namespace_done(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr);
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr_get(const struct spdk_nvme_transport_id *trid);
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr_get_by_name(const char *name);

View File

@ -59,6 +59,7 @@ DEFINE_STUB(spdk_bdev_push_media_events, int, (struct spdk_bdev *bdev,
const struct spdk_bdev_media_event *events,
size_t num_events), 0);
DEFINE_STUB_V(spdk_bdev_notify_media_management, (struct spdk_bdev *bdev));
DEFINE_STUB_V(nvme_ctrlr_depopulate_namespace_done, (struct nvme_bdev_ctrlr *ctrlr));
struct nvme_request {
spdk_nvme_cmd_cb cb_fn;