vhost/nvme: generic cleanup

* don't iterate through g_nvme_ctrlrs when it's unnecessary
* fixup a potential deadlock on session stop error
  (which can't practically happen unless the SPDK generic
   vhost layer is malfunctioning)
* add a FIXME note to wait for pending I/Os before putting
  bdev io channels and stopping the vhost pollers.

Change-Id: I576c4771f51e432fbbab244fd1b91668436004bf
Signed-off-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/448224
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Darek Stojaczyk 2019-03-17 00:20:49 +01:00 committed by Jim Harris
parent 3b760a4d09
commit b4abd4c9d9

View File

@ -1156,14 +1156,13 @@ static int
destroy_device_poller_cb(void *arg) destroy_device_poller_cb(void *arg)
{ {
struct spdk_vhost_nvme_dev *nvme = arg; struct spdk_vhost_nvme_dev *nvme = arg;
struct spdk_vhost_nvme_dev *dev, *tmp;
struct spdk_vhost_nvme_ns *ns_dev; struct spdk_vhost_nvme_ns *ns_dev;
uint32_t i; uint32_t i;
SPDK_DEBUGLOG(SPDK_LOG_VHOST_NVME, "Destroy device poller callback\n"); SPDK_DEBUGLOG(SPDK_LOG_VHOST_NVME, "Destroy device poller callback\n");
TAILQ_FOREACH_SAFE(dev, &g_nvme_ctrlrs, tailq, tmp) { /* FIXME wait for pending I/Os to complete */
if (dev == nvme) {
for (i = 0; i < nvme->num_ns; i++) { for (i = 0; i < nvme->num_ns; i++) {
ns_dev = &nvme->ns[i]; ns_dev = &nvme->ns[i];
if (ns_dev->bdev_io_channel) { if (ns_dev->bdev_io_channel) {
@ -1180,8 +1179,6 @@ destroy_device_poller_cb(void *arg)
nvme->dbbuf_dbs = NULL; nvme->dbbuf_dbs = NULL;
nvme->dbbuf_eis = NULL; nvme->dbbuf_eis = NULL;
nvme->dataplane_started = false; nvme->dataplane_started = false;
}
}
spdk_poller_unregister(&nvme->destroy_ctx.poller); spdk_poller_unregister(&nvme->destroy_ctx.poller);
spdk_vhost_session_event_done(nvme->vsession, 0); spdk_vhost_session_event_done(nvme->vsession, 0);
@ -1196,6 +1193,7 @@ spdk_vhost_nvme_stop_cb(struct spdk_vhost_dev *vdev,
struct spdk_vhost_nvme_dev *nvme = to_nvme_dev(vdev); struct spdk_vhost_nvme_dev *nvme = to_nvme_dev(vdev);
if (nvme == NULL) { if (nvme == NULL) {
spdk_vhost_session_event_done(vsession, -1);
return -1; return -1;
} }
@ -1422,7 +1420,6 @@ int
spdk_vhost_nvme_dev_remove(struct spdk_vhost_dev *vdev) spdk_vhost_nvme_dev_remove(struct spdk_vhost_dev *vdev)
{ {
struct spdk_vhost_nvme_dev *nvme = to_nvme_dev(vdev); struct spdk_vhost_nvme_dev *nvme = to_nvme_dev(vdev);
struct spdk_vhost_nvme_dev *dev, *tmp;
struct spdk_vhost_nvme_ns *ns; struct spdk_vhost_nvme_ns *ns;
int rc; int rc;
uint32_t i; uint32_t i;
@ -1431,17 +1428,13 @@ spdk_vhost_nvme_dev_remove(struct spdk_vhost_dev *vdev)
return -EINVAL; return -EINVAL;
} }
TAILQ_FOREACH_SAFE(dev, &g_nvme_ctrlrs, tailq, tmp) { TAILQ_REMOVE(&g_nvme_ctrlrs, nvme, tailq);
if (dev == nvme) {
TAILQ_REMOVE(&g_nvme_ctrlrs, dev, tailq);
for (i = 0; i < nvme->num_ns; i++) { for (i = 0; i < nvme->num_ns; i++) {
ns = &nvme->ns[i]; ns = &nvme->ns[i];
if (ns->active_ns) { if (ns->active_ns) {
spdk_vhost_nvme_deactive_ns(ns); spdk_vhost_nvme_deactive_ns(ns);
} }
} }
}
}
rc = spdk_vhost_dev_unregister(vdev); rc = spdk_vhost_dev_unregister(vdev);
if (rc != 0) { if (rc != 0) {