From bb726d516b603a7af215f881dbe701f404858bfc Mon Sep 17 00:00:00 2001 From: GangCao Date: Tue, 15 Nov 2016 01:54:52 -0500 Subject: [PATCH] nvme: add multi-process support This version of multi-process support needs to have DPDK 16.11 builtin. Change-Id: I3352944516f327800b4bd640347afc6127d82ed4 Signed-off-by: GangCao --- autotest.sh | 3 + lib/nvme/nvme.c | 151 ++++++++++++++---- lib/nvme/nvme_ctrlr.c | 95 ++++++++++- lib/nvme/nvme_internal.h | 7 +- lib/nvme/nvme_pcie.c | 8 +- test/lib/nvme/nvme.sh | 22 +++ test/lib/nvme/nvmemp.sh | 89 +++++++++++ .../nvme/unit/nvme_ns_cmd_c/nvme_ns_cmd_ut.c | 7 + 8 files changed, 339 insertions(+), 43 deletions(-) create mode 100755 test/lib/nvme/nvmemp.sh diff --git a/autotest.sh b/autotest.sh index 03ceada11..a705d835f 100755 --- a/autotest.sh +++ b/autotest.sh @@ -78,6 +78,9 @@ timing_enter lib run_test test/lib/bdev/blockdev.sh run_test test/lib/event/event.sh run_test test/lib/nvme/nvme.sh +if [ $RUN_NIGHTLY -eq 1 ]; then + run_test test/lib/nvme/nvmemp.sh +fi run_test test/lib/nvmf/nvmf.sh run_test test/lib/env/env.sh run_test test/lib/ioat/ioat.sh diff --git a/lib/nvme/nvme.c b/lib/nvme/nvme.c index 7dd1f8d71..df3f53e52 100644 --- a/lib/nvme/nvme.c +++ b/lib/nvme/nvme.c @@ -35,18 +35,13 @@ #include "nvme_internal.h" #include "nvme_uevent.h" -struct nvme_driver _g_nvme_driver = { - .lock = PTHREAD_MUTEX_INITIALIZER, - .hotplug_fd = -1, - .init_ctrlrs = TAILQ_HEAD_INITIALIZER(_g_nvme_driver.init_ctrlrs), - .attached_ctrlrs = TAILQ_HEAD_INITIALIZER(_g_nvme_driver.attached_ctrlrs), - .request_mempool = NULL, - .initialized = false, -}; +#define SPDK_NVME_DRIVER_NAME "spdk_nvme_driver" -struct nvme_driver *g_spdk_nvme_driver = &_g_nvme_driver; +struct nvme_driver *g_spdk_nvme_driver; -int32_t spdk_nvme_retry_count; +int32_t spdk_nvme_retry_count; + +static int hotplug_fd = -1; struct spdk_nvme_ctrlr * nvme_attach(enum spdk_nvme_transport transport, @@ -233,6 +228,91 @@ nvme_mutex_init_shared(pthread_mutex_t *mtx) return rc; } +static int +nvme_driver_init(void) +{ + int ret = 0; + /* Any socket ID */ + int socket_id = -1; + + /* + * Only one thread from one process will do this driver init work. + * The primary process will reserve the shared memory and do the + * initialization. + * The secondary process will lookup the existing reserved memory. + */ + if (spdk_process_is_primary()) { + /* The unique named memzone already reserved. */ + if (g_spdk_nvme_driver != NULL) { + assert(g_spdk_nvme_driver->initialized == true); + + return 0; + } else { + g_spdk_nvme_driver = spdk_memzone_reserve(SPDK_NVME_DRIVER_NAME, + sizeof(struct nvme_driver), socket_id, 0); + } + + if (g_spdk_nvme_driver == NULL) { + SPDK_ERRLOG("primary process failed to reserve memory\n"); + + return -1; + } + } else { + g_spdk_nvme_driver = spdk_memzone_lookup(SPDK_NVME_DRIVER_NAME); + + /* The unique named memzone already reserved by the primary process. */ + if (g_spdk_nvme_driver != NULL) { + /* Wait the nvme driver to get initialized. */ + while (g_spdk_nvme_driver->initialized == false) { + nvme_delay(1000); + } + } else { + SPDK_ERRLOG("primary process is not started yet\n"); + + return -1; + } + + return 0; + } + + /* + * At this moment, only one thread from the primary process will do + * the g_spdk_nvme_driver initialization + */ + assert(spdk_process_is_primary()); + + ret = nvme_mutex_init_shared(&g_spdk_nvme_driver->lock); + if (ret != 0) { + SPDK_ERRLOG("failed to initialize mutex\n"); + spdk_memzone_free(SPDK_NVME_DRIVER_NAME); + return ret; + } + + pthread_mutex_lock(&g_spdk_nvme_driver->lock); + + g_spdk_nvme_driver->initialized = false; + + TAILQ_INIT(&g_spdk_nvme_driver->init_ctrlrs); + TAILQ_INIT(&g_spdk_nvme_driver->attached_ctrlrs); + + g_spdk_nvme_driver->request_mempool = spdk_mempool_create("nvme_request", 8192, + sizeof(struct nvme_request), 128); + if (g_spdk_nvme_driver->request_mempool == NULL) { + SPDK_ERRLOG("unable to allocate pool of requests\n"); + + pthread_mutex_unlock(&g_spdk_nvme_driver->lock); + pthread_mutex_destroy(&g_spdk_nvme_driver->lock); + + spdk_memzone_free(SPDK_NVME_DRIVER_NAME); + + return -1; + } + + pthread_mutex_unlock(&g_spdk_nvme_driver->lock); + + return ret; +} + int nvme_probe_one(enum spdk_nvme_transport transport, spdk_nvme_probe_cb probe_cb, void *cb_ctx, struct spdk_nvme_probe_info *probe_info, void *devhandle) @@ -314,6 +394,8 @@ nvme_init_controllers(void *cb_ctx, spdk_nvme_attach_cb attach_cb) } } + g_spdk_nvme_driver->initialized = true; + pthread_mutex_unlock(&g_spdk_nvme_driver->lock); return rc; } @@ -333,32 +415,22 @@ _spdk_nvme_probe(const struct spdk_nvme_discover_info *info, void *cb_ctx, { int rc; enum spdk_nvme_transport transport; + struct spdk_nvme_ctrlr *ctrlr; - if (!spdk_process_is_primary()) { - while (g_spdk_nvme_driver->initialized == false) { - usleep(200 * 1000); - } + rc = nvme_driver_init(); + if (rc != 0) { + return rc; } pthread_mutex_lock(&g_spdk_nvme_driver->lock); - if (g_spdk_nvme_driver->hotplug_fd < 0) { - g_spdk_nvme_driver->hotplug_fd = spdk_uevent_connect(); - if (g_spdk_nvme_driver->hotplug_fd < 0) { + if (hotplug_fd < 0) { + hotplug_fd = spdk_uevent_connect(); + if (hotplug_fd < 0) { SPDK_ERRLOG("Failed to open uevent netlink socket\n"); } } - if (g_spdk_nvme_driver->request_mempool == NULL) { - g_spdk_nvme_driver->request_mempool = spdk_mempool_create("nvme_request", 8192, - sizeof(struct nvme_request), -1); - if (g_spdk_nvme_driver->request_mempool == NULL) { - SPDK_ERRLOG("Unable to allocate pool of requests\n"); - pthread_mutex_unlock(&g_spdk_nvme_driver->lock); - return -1; - } - } - if (!info) { transport = SPDK_NVME_TRANSPORT_PCIE; } else { @@ -373,6 +445,23 @@ _spdk_nvme_probe(const struct spdk_nvme_discover_info *info, void *cb_ctx, nvme_transport_ctrlr_scan(transport, probe_cb, cb_ctx, (void *)info, NULL); + if (!spdk_process_is_primary()) { + TAILQ_FOREACH(ctrlr, &g_spdk_nvme_driver->attached_ctrlrs, tailq) { + nvme_ctrlr_proc_get_ref(ctrlr); + + /* + * Unlock while calling attach_cb() so the user can call other functions + * that may take the driver lock, like nvme_detach(). + */ + pthread_mutex_unlock(&g_spdk_nvme_driver->lock); + attach_cb(cb_ctx, &ctrlr->probe_info, ctrlr, &ctrlr->opts); + pthread_mutex_lock(&g_spdk_nvme_driver->lock); + } + + pthread_mutex_unlock(&g_spdk_nvme_driver->lock); + return 0; + } + pthread_mutex_unlock(&g_spdk_nvme_driver->lock); /* * Keep going even if one or more nvme_attach() calls failed, @@ -381,9 +470,6 @@ _spdk_nvme_probe(const struct spdk_nvme_discover_info *info, void *cb_ctx, rc = nvme_init_controllers(cb_ctx, attach_cb); - pthread_mutex_lock(&g_spdk_nvme_driver->lock); - g_spdk_nvme_driver->initialized = true; - pthread_mutex_unlock(&g_spdk_nvme_driver->lock); return rc; } @@ -406,9 +492,8 @@ nvme_hotplug_monitor(void *cb_ctx, spdk_nvme_probe_cb probe_cb, spdk_nvme_attach int rc = 0; struct spdk_nvme_ctrlr *ctrlr; struct spdk_uevent event; - struct nvme_driver *nvme_driver = g_spdk_nvme_driver; - while (spdk_get_uevent(nvme_driver->hotplug_fd, &event) > 0) { + while (spdk_get_uevent(hotplug_fd, &event) > 0) { if (event.subsystem == SPDK_NVME_UEVENT_SUBSYSTEM_UIO) { if (event.action == SPDK_NVME_UEVENT_ADD) { SPDK_TRACELOG(SPDK_TRACE_NVME, "add nvme address: %04x:%02x:%02x.%u\n", @@ -455,7 +540,7 @@ int spdk_nvme_probe(void *cb_ctx, spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb, spdk_nvme_remove_cb remove_cb) { - if (g_spdk_nvme_driver->hotplug_fd < 0) { + if (hotplug_fd < 0) { return _spdk_nvme_probe(NULL, cb_ctx, probe_cb, attach_cb, remove_cb); } else { return nvme_hotplug_monitor(cb_ctx, probe_cb, attach_cb, remove_cb); diff --git a/lib/nvme/nvme_ctrlr.c b/lib/nvme/nvme_ctrlr.c index e7e96f498..80657cbce 100644 --- a/lib/nvme/nvme_ctrlr.c +++ b/lib/nvme/nvme_ctrlr.c @@ -84,6 +84,46 @@ spdk_nvme_ctrlr_opts_set_defaults(struct spdk_nvme_ctrlr_opts *opts) strncpy(opts->hostnqn, DEFAULT_HOSTNQN, sizeof(opts->hostnqn)); } +/** + * This function will be called when the process allocates the IO qpair. + * Note: the ctrlr_lock must be held when calling this function. + */ +static void +nvme_ctrlr_proc_add_io_qpair(struct spdk_nvme_qpair *qpair) +{ + struct spdk_nvme_ctrlr_process *active_proc; + struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr; + pid_t pid = getpid(); + + TAILQ_FOREACH(active_proc, &ctrlr->active_procs, tailq) { + if (active_proc->pid == pid) { + TAILQ_INSERT_TAIL(&active_proc->allocated_io_qpairs, qpair, + per_process_tailq); + break; + } + } +} + +/** + * This function will be called when the process frees the IO qpair. + * Note: the ctrlr_lock must be held when calling this function. + */ +static void +nvme_ctrlr_proc_remove_io_qpair(struct spdk_nvme_qpair *qpair) +{ + struct spdk_nvme_ctrlr_process *active_proc; + struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr; + pid_t pid = getpid(); + + TAILQ_FOREACH(active_proc, &ctrlr->active_procs, tailq) { + if (active_proc->pid == pid) { + TAILQ_REMOVE(&active_proc->allocated_io_qpairs, qpair, + per_process_tailq); + break; + } + } +} + struct spdk_nvme_qpair * spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr, enum spdk_nvme_qprio qprio) @@ -132,6 +172,8 @@ spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr, spdk_bit_array_clear(ctrlr->free_io_qids, qid); TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq); + nvme_ctrlr_proc_add_io_qpair(qpair); + pthread_mutex_unlock(&ctrlr->ctrlr_lock); return qpair; @@ -150,6 +192,8 @@ spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair) pthread_mutex_lock(&ctrlr->ctrlr_lock); + nvme_ctrlr_proc_remove_io_qpair(qpair); + TAILQ_REMOVE(&ctrlr->active_io_qpairs, qpair, tailq); spdk_bit_array_set(ctrlr->free_io_qids, qpair->id); @@ -867,16 +911,38 @@ nvme_ctrlr_add_process(struct spdk_nvme_ctrlr *ctrlr, void *devhandle) STAILQ_INIT(&ctrlr_proc->active_reqs); ctrlr_proc->devhandle = devhandle; ctrlr_proc->ref = 0; + TAILQ_INIT(&ctrlr_proc->allocated_io_qpairs); TAILQ_INSERT_TAIL(&ctrlr->active_procs, ctrlr_proc, tailq); return 0; } +/** + * This function will be called when the process detaches the controller. + * Note: the ctrlr_lock must be held when calling this function. + */ +static void +nvme_ctrlr_remove_process(struct spdk_nvme_ctrlr *ctrlr, + struct spdk_nvme_ctrlr_process *proc) +{ + struct spdk_nvme_qpair *qpair, *tmp_qpair; + + assert(STAILQ_EMPTY(&proc->active_reqs)); + + TAILQ_FOREACH_SAFE(qpair, &proc->allocated_io_qpairs, per_process_tailq, tmp_qpair) { + spdk_nvme_ctrlr_free_io_qpair(qpair); + } + + TAILQ_REMOVE(&ctrlr->active_procs, proc, tailq); + + spdk_free(proc); +} + /** * This function will be called when the process exited unexpectedly * in order to free any incomplete nvme request and allocated memory. - * Note: the ctrl_lock must be held when calling this function. + * Note: the ctrlr_lock must be held when calling this function. */ static void nvme_ctrlr_cleanup_process(struct spdk_nvme_ctrlr_process *proc) @@ -918,12 +984,13 @@ nvme_ctrlr_free_processes(struct spdk_nvme_ctrlr *ctrlr) * This function will be called when any other process attaches or * detaches the controller in order to cleanup those unexpectedly * terminated processes. - * Note: the ctrl_lock must be held when calling this function. + * Note: the ctrlr_lock must be held when calling this function. */ -static void +static int nvme_ctrlr_remove_inactive_proc(struct spdk_nvme_ctrlr *ctrlr) { struct spdk_nvme_ctrlr_process *active_proc, *tmp; + int active_proc_count = 0; TAILQ_FOREACH_SAFE(active_proc, &ctrlr->active_procs, tailq, tmp) { if ((kill(active_proc->pid, 0) == -1) && (errno == ESRCH)) { @@ -932,8 +999,12 @@ nvme_ctrlr_remove_inactive_proc(struct spdk_nvme_ctrlr *ctrlr) TAILQ_REMOVE(&ctrlr->active_procs, active_proc, tailq); nvme_ctrlr_cleanup_process(active_proc); + } else { + active_proc_count++; } } + + return active_proc_count; } void @@ -959,17 +1030,27 @@ nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr) void nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr) { - struct spdk_nvme_ctrlr_process *active_proc; + struct spdk_nvme_ctrlr_process *active_proc, *tmp; pid_t pid = getpid(); + int proc_count; pthread_mutex_lock(&ctrlr->ctrlr_lock); - nvme_ctrlr_remove_inactive_proc(ctrlr); + proc_count = nvme_ctrlr_remove_inactive_proc(ctrlr); - TAILQ_FOREACH(active_proc, &ctrlr->active_procs, tailq) { + TAILQ_FOREACH_SAFE(active_proc, &ctrlr->active_procs, tailq, tmp) { if (active_proc->pid == pid) { active_proc->ref--; assert(active_proc->ref >= 0); + + /* + * The last active process will be removed at the end of + * the destruction of the controller. + */ + if (active_proc->ref == 0 && proc_count != 1) { + nvme_ctrlr_remove_process(ctrlr, active_proc); + } + break; } } @@ -1245,8 +1326,6 @@ nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr) pthread_mutex_destroy(&ctrlr->ctrlr_lock); - nvme_ctrlr_free_processes(ctrlr); - nvme_transport_ctrlr_destruct(ctrlr); } diff --git a/lib/nvme/nvme_internal.h b/lib/nvme/nvme_internal.h index b1dde21d4..1e239448b 100644 --- a/lib/nvme/nvme_internal.h +++ b/lib/nvme/nvme_internal.h @@ -262,6 +262,9 @@ struct spdk_nvme_qpair { /* List entry for spdk_nvme_ctrlr::active_io_qpairs */ TAILQ_ENTRY(spdk_nvme_qpair) tailq; + + /* List entry for spdk_nvme_ctrlr_process::allocated_io_qpairs */ + TAILQ_ENTRY(spdk_nvme_qpair) per_process_tailq; }; struct spdk_nvme_ns { @@ -328,6 +331,9 @@ struct spdk_nvme_ctrlr_process { /** Reference to track the number of attachment to this controller. */ int ref; + + /** Allocated IO qpairs */ + TAILQ_HEAD(, spdk_nvme_qpair) allocated_io_qpairs; }; /* @@ -417,7 +423,6 @@ struct spdk_nvme_ctrlr { struct nvme_driver { pthread_mutex_t lock; - int hotplug_fd; TAILQ_HEAD(, spdk_nvme_ctrlr) init_ctrlrs; TAILQ_HEAD(, spdk_nvme_ctrlr) attached_ctrlrs; struct spdk_mempool *request_mempool; diff --git a/lib/nvme/nvme_pcie.c b/lib/nvme/nvme_pcie.c index 682e0afbb..313b58718 100644 --- a/lib/nvme/nvme_pcie.c +++ b/lib/nvme/nvme_pcie.c @@ -528,6 +528,7 @@ pcie_nvme_enum_cb(void *ctx, struct spdk_pci_device *pci_dev) struct spdk_nvme_probe_info probe_info = {}; struct nvme_pcie_enum_ctx *enum_ctx = ctx; struct spdk_nvme_ctrlr *ctrlr; + int rc = 0; probe_info.pci_addr = spdk_pci_device_get_addr(pci_dev); probe_info.pci_id = spdk_pci_device_get_id(pci_dev); @@ -539,7 +540,10 @@ pcie_nvme_enum_cb(void *ctx, struct spdk_pci_device *pci_dev) * same controller. */ if (spdk_pci_addr_compare(&probe_info.pci_addr, &ctrlr->probe_info.pci_addr) == 0) { - return 0; + if (!spdk_process_is_primary()) { + rc = nvme_ctrlr_add_process(ctrlr, pci_dev); + } + return rc; } } @@ -676,6 +680,8 @@ nvme_pcie_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr) nvme_pcie_qpair_destroy(ctrlr->adminq); } + nvme_ctrlr_free_processes(ctrlr); + nvme_pcie_ctrlr_free_bars(pctrlr); spdk_pci_device_detach(pctrlr->devhandle); spdk_free(pctrlr); diff --git a/test/lib/nvme/nvme.sh b/test/lib/nvme/nvme.sh index c6326b632..ee086134e 100755 --- a/test/lib/nvme/nvme.sh +++ b/test/lib/nvme/nvme.sh @@ -60,6 +60,28 @@ timing_enter arbitration $rootdir/examples/nvme/arbitration/arbitration -t 3 timing_exit arbitration +if [ $(uname -s) = Linux ]; then + timing_enter multi_process + $rootdir/examples/nvme/arbitration/arbitration -s 4096 -t 10 & + pid=$! + sleep 3 + $rootdir/examples/nvme/perf/perf -q 1 -w randread -s 4096 -t 10 & + sleep 1 + kill -9 $! + count=0 + while [ $count -le 2 ]; do + $rootdir/examples/nvme/perf/perf -q 1 -w read -s 4096 -t 1 + count=$(( $count + 1 )) + done + count=0 + while [ $count -le 1 ]; do + $rootdir/examples/nvme/perf/perf -q 128 -w read -s 4096 -t 1 & + count=$(( $count + 1 )) + done + wait $pid + timing_exit multi_process +fi + #Now test nvme reset function timing_enter reset $testdir/reset/reset -q 64 -w write -s 4096 -t 2 diff --git a/test/lib/nvme/nvmemp.sh b/test/lib/nvme/nvmemp.sh new file mode 100755 index 000000000..369666490 --- /dev/null +++ b/test/lib/nvme/nvmemp.sh @@ -0,0 +1,89 @@ +#!/usr/bin/env bash + +set -e + +testdir=$(readlink -f $(dirname $0)) +rootdir="$testdir/../../.." +source $rootdir/scripts/autotest_common.sh + +if [ $(uname -s) = Linux ]; then + timing_enter nvme_mp + + timing_enter mp_func_test + $rootdir/examples/nvme/arbitration/arbitration -s 4096 -t 5 & + sleep 3 + $rootdir/examples/nvme/perf/perf -q 128 -w read -s 4096 -t 1 + wait $! + timing_exit mp_func_test + + timing_enter mp_fault_test + timing_enter mp_fault_test_1 + $rootdir/examples/nvme/arbitration/arbitration -s 4096 -t 5 & + sleep 3 + $rootdir/examples/nvme/perf/perf -q 128 -w read -s 4096 -t 3 + sleep 1 + kill -9 $! + timing_exit mp_fault_test_1 + + timing_enter mp_fault_test_2 + $rootdir/examples/nvme/arbitration/arbitration -s 4096 -t 7 & + pid=$! + sleep 3 + $rootdir/examples/nvme/perf/perf -q 128 -w read -s 4096 -t 3 & + sleep 2 + kill -9 $! + wait $pid + timing_exit mp_fault_test_2 + timing_exit mp_fault_test + + timing_enter mp_stress_test + timing_enter mp_stress_test_1 + $rootdir/examples/nvme/arbitration/arbitration -s 4096 -t 10 & + sleep 3 + count=0 + while [ $count -le 4 ]; do + $rootdir/examples/nvme/perf/perf -q 128 -w read -s 4096 -t 1 + count=$(( $count + 1 )) + done + wait $! + timing_exit mp_stress_test_1 + + timing_enter mp_stress_test_2 + $rootdir/examples/nvme/arbitration/arbitration -s 4096 -t 15 & + pid=$! + sleep 3 + count=0 + while [ $count -le 4 ]; do + $rootdir/examples/nvme/perf/perf -q 128 -w read -s 4096 -t 3 & + sleep 2 + kill -9 $! + count=$(( $count + 1 )) + done + wait $pid + timing_exit mp_stress_test_2 + + timing_enter mp_stress_test_3 + $rootdir/examples/nvme/arbitration/arbitration -s 4096 -t 10 & + pid=$! + sleep 3 + count=0 + while [ $count -le 4 ]; do + $rootdir/examples/nvme/perf/perf -q 128 -w read -s 4096 -t 1 & + count=$(( $count + 1 )) + done + wait $pid + timing_exit mp_stress_test_3 + timing_exit mp_stress_test + + timing_enter mp_perf_test + $rootdir/examples/nvme/perf/perf -q 1 -w randread -s 4096 -t 5 -c 0x3 + sleep 3 + + $rootdir/examples/nvme/perf/perf -q 1 -w randread -s 4096 -t 8 -c 0x1 & + sleep 3 + $rootdir/examples/nvme/perf/perf -q 1 -w randread -s 4096 -t 3 -c 0x2 + wait $! + timing_exit mp_perf_test + + timing_exit nvme_mp +fi diff --git a/test/lib/nvme/unit/nvme_ns_cmd_c/nvme_ns_cmd_ut.c b/test/lib/nvme/unit/nvme_ns_cmd_c/nvme_ns_cmd_ut.c index 20570cb5b..f135e157b 100644 --- a/test/lib/nvme/unit/nvme_ns_cmd_c/nvme_ns_cmd_ut.c +++ b/test/lib/nvme/unit/nvme_ns_cmd_c/nvme_ns_cmd_ut.c @@ -37,6 +37,13 @@ #include "lib/nvme/unit/test_env.c" +struct nvme_driver _g_nvme_driver = { + .lock = PTHREAD_MUTEX_INITIALIZER, + .request_mempool = NULL, +}; + +struct nvme_driver *g_spdk_nvme_driver = &_g_nvme_driver; + struct nvme_request *g_request = NULL; struct spdk_uevent;