bdev/nvme: run poller more frequently while probe in progress

When we do detect new devices, we would like to get them
attached as quickly as possible. Controller initialization
requires a non-trivial number of admin commands, and when
using async probe, it means that after we have detected
a new device, it will take many iterations of calling
spdk_nvme_probe_poll_async() before the controller is
fully attached.

So when we are actively probing a probe_ctx, create
a new poller that is solely responsible for probing
that context at a much higher frequency.

Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: I72fbe3faef2d72608edb163bd87907902d7c3adc

Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/5646
Community-CI: Broadcom CI
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Paul Luse <paul.e.luse@intel.com>
Reviewed-by: <dongx.yi@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
This commit is contained in:
Jim Harris 2020-12-18 06:10:56 -07:00 committed by Tomasz Zawadzki
parent 83a544c2a9
commit acf116c6d5

View File

@ -130,6 +130,7 @@ static uint64_t g_nvme_hotplug_poll_period_us = NVME_HOTPLUG_POLL_PERIOD_DEFAULT
static bool g_nvme_hotplug_enabled = false;
static struct spdk_thread *g_bdev_nvme_init_thread;
static struct spdk_poller *g_hotplug_poller;
static struct spdk_poller *g_hotplug_probe_poller;
static struct spdk_nvme_probe_ctx *g_hotplug_probe_ctx;
static void nvme_ctrlr_populate_namespaces(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr,
@ -1666,26 +1667,35 @@ remove_cb(void *cb_ctx, struct spdk_nvme_ctrlr *ctrlr)
_nvme_bdev_ctrlr_destruct(nvme_bdev_ctrlr);
}
static int
bdev_nvme_hotplug_probe(void *arg)
{
if (spdk_nvme_probe_poll_async(g_hotplug_probe_ctx) != -EAGAIN) {
g_hotplug_probe_ctx = NULL;
spdk_poller_unregister(&g_hotplug_probe_poller);
}
return SPDK_POLLER_BUSY;
}
static int
bdev_nvme_hotplug(void *arg)
{
struct spdk_nvme_transport_id trid_pcie;
int done;
if (!g_hotplug_probe_ctx) {
memset(&trid_pcie, 0, sizeof(trid_pcie));
spdk_nvme_trid_populate_transport(&trid_pcie, SPDK_NVME_TRANSPORT_PCIE);
g_hotplug_probe_ctx = spdk_nvme_probe_async(&trid_pcie, NULL,
hotplug_probe_cb, attach_cb, NULL);
if (!g_hotplug_probe_ctx) {
return SPDK_POLLER_BUSY;
}
if (g_hotplug_probe_ctx) {
return SPDK_POLLER_BUSY;
}
done = spdk_nvme_probe_poll_async(g_hotplug_probe_ctx);
if (done != -EAGAIN) {
g_hotplug_probe_ctx = NULL;
memset(&trid_pcie, 0, sizeof(trid_pcie));
spdk_nvme_trid_populate_transport(&trid_pcie, SPDK_NVME_TRANSPORT_PCIE);
g_hotplug_probe_ctx = spdk_nvme_probe_async(&trid_pcie, NULL,
hotplug_probe_cb, attach_cb, NULL);
if (g_hotplug_probe_ctx) {
assert(g_hotplug_probe_poller == NULL);
g_hotplug_probe_poller = SPDK_POLLER_REGISTER(bdev_nvme_hotplug_probe, NULL, 1000);
}
return SPDK_POLLER_BUSY;