example/nvme: Replace next pointer by TAILQ except for fio_plugin and perf

This will make the object relationship cleaner and the asynchronous
detach operation easier to implement.

Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: I5030dc9eb8f607247f08b4524d37ec2b74826a93
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/4430
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
This commit is contained in:
Shuhei Matsumoto 2020-09-27 11:51:01 +09:00 committed by Tomasz Zawadzki
parent 6d87bc7a8a
commit 4c3fd22850
4 changed files with 158 additions and 256 deletions

View File

@ -45,7 +45,7 @@ struct ctrlr_entry {
struct spdk_nvme_ctrlr *ctrlr; struct spdk_nvme_ctrlr *ctrlr;
enum spdk_nvme_transport_type trtype; enum spdk_nvme_transport_type trtype;
struct ctrlr_entry *next; TAILQ_ENTRY(ctrlr_entry) link;
char name[1024]; char name[1024];
}; };
@ -53,7 +53,7 @@ struct ns_entry {
struct spdk_nvme_ctrlr *ctrlr; struct spdk_nvme_ctrlr *ctrlr;
struct spdk_nvme_ns *ns; struct spdk_nvme_ns *ns;
struct ns_entry *next; TAILQ_ENTRY(ns_entry) link;
uint32_t io_size_blocks; uint32_t io_size_blocks;
uint32_t num_io_requests; uint32_t num_io_requests;
uint64_t size_in_ios; uint64_t size_in_ios;
@ -71,7 +71,7 @@ struct ctrlr_worker_ctx {
uint64_t abort_failed; uint64_t abort_failed;
uint64_t current_queue_depth; uint64_t current_queue_depth;
struct spdk_nvme_ctrlr *ctrlr; struct spdk_nvme_ctrlr *ctrlr;
struct ctrlr_worker_ctx *next; TAILQ_ENTRY(ctrlr_worker_ctx) link;
}; };
struct ns_worker_ctx { struct ns_worker_ctx {
@ -85,7 +85,7 @@ struct ns_worker_ctx {
bool is_draining; bool is_draining;
struct spdk_nvme_qpair *qpair; struct spdk_nvme_qpair *qpair;
struct ctrlr_worker_ctx *ctrlr_ctx; struct ctrlr_worker_ctx *ctrlr_ctx;
struct ns_worker_ctx *next; TAILQ_ENTRY(ns_worker_ctx) link;
}; };
struct perf_task { struct perf_task {
@ -94,18 +94,18 @@ struct perf_task {
}; };
struct worker_thread { struct worker_thread {
struct ns_worker_ctx *ns_ctx; TAILQ_HEAD(, ns_worker_ctx) ns_ctx;
struct ctrlr_worker_ctx *ctrlr_ctx; TAILQ_HEAD(, ctrlr_worker_ctx) ctrlr_ctx;
struct worker_thread *next; TAILQ_ENTRY(worker_thread) link;
unsigned lcore; unsigned lcore;
}; };
static const char *g_workload_type = "read"; static const char *g_workload_type = "read";
static struct ctrlr_entry *g_controllers; static TAILQ_HEAD(, ctrlr_entry) g_controllers = TAILQ_HEAD_INITIALIZER(g_controllers);
static struct ns_entry *g_namespaces; static TAILQ_HEAD(, ns_entry) g_namespaces = TAILQ_HEAD_INITIALIZER(g_namespaces);
static int g_num_namespaces; static int g_num_namespaces;
static struct worker_thread *g_workers; static TAILQ_HEAD(, worker_thread) g_workers = TAILQ_HEAD_INITIALIZER(g_workers);
static int g_num_workers; static int g_num_workers = 0;
static uint32_t g_master_core; static uint32_t g_master_core;
static int g_abort_interval = 1; static int g_abort_interval = 1;
@ -248,19 +248,17 @@ register_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns *ns)
build_nvme_ns_name(entry->name, sizeof(entry->name), ctrlr, spdk_nvme_ns_get_id(ns)); build_nvme_ns_name(entry->name, sizeof(entry->name), ctrlr, spdk_nvme_ns_get_id(ns));
g_num_namespaces++; g_num_namespaces++;
entry->next = g_namespaces; TAILQ_INSERT_TAIL(&g_namespaces, entry, link);
g_namespaces = entry;
} }
static void static void
unregister_namespaces(void) unregister_namespaces(void)
{ {
struct ns_entry *entry = g_namespaces; struct ns_entry *entry, *tmp;
while (entry) { TAILQ_FOREACH_SAFE(entry, &g_namespaces, link, tmp) {
struct ns_entry *next = entry->next; TAILQ_REMOVE(&g_namespaces, entry, link);
free(entry); free(entry);
entry = next;
} }
} }
@ -280,8 +278,7 @@ register_ctrlr(struct spdk_nvme_ctrlr *ctrlr, struct trid_entry *trid_entry)
entry->ctrlr = ctrlr; entry->ctrlr = ctrlr;
entry->trtype = trid_entry->trid.trtype; entry->trtype = trid_entry->trid.trtype;
entry->next = g_controllers; TAILQ_INSERT_TAIL(&g_controllers, entry, link);
g_controllers = entry;
if (trid_entry->nsid == 0) { if (trid_entry->nsid == 0) {
for (nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr); for (nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
@ -455,8 +452,7 @@ work_fn(void *arg)
uint32_t unfinished_ctx; uint32_t unfinished_ctx;
/* Allocate queue pair for each namespace. */ /* Allocate queue pair for each namespace. */
ns_ctx = worker->ns_ctx; TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
while (ns_ctx != NULL) {
ns_entry = ns_ctx->entry; ns_entry = ns_ctx->entry;
spdk_nvme_ctrlr_get_default_io_qpair_opts(ns_entry->ctrlr, &opts, sizeof(opts)); spdk_nvme_ctrlr_get_default_io_qpair_opts(ns_entry->ctrlr, &opts, sizeof(opts));
@ -469,34 +465,26 @@ work_fn(void *arg)
fprintf(stderr, "spdk_nvme_ctrlr_alloc_io_qpair failed\n"); fprintf(stderr, "spdk_nvme_ctrlr_alloc_io_qpair failed\n");
return 1; return 1;
} }
ns_ctx = ns_ctx->next;
} }
tsc_end = spdk_get_ticks() + g_time_in_sec * g_tsc_rate; tsc_end = spdk_get_ticks() + g_time_in_sec * g_tsc_rate;
/* Submit initial I/O for each namespace. */ /* Submit initial I/O for each namespace. */
ns_ctx = worker->ns_ctx; TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
while (ns_ctx != NULL) {
submit_io(ns_ctx, g_queue_depth); submit_io(ns_ctx, g_queue_depth);
ns_ctx = ns_ctx->next;
} }
while (1) { while (1) {
ns_ctx = worker->ns_ctx; TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
while (ns_ctx != NULL) {
spdk_nvme_qpair_process_completions(ns_ctx->qpair, 0); spdk_nvme_qpair_process_completions(ns_ctx->qpair, 0);
ns_ctx = ns_ctx->next;
} }
if (worker->lcore == g_master_core) { if (worker->lcore == g_master_core) {
ctrlr_ctx = worker->ctrlr_ctx; TAILQ_FOREACH(ctrlr_ctx, &worker->ctrlr_ctx, link) {
while (ctrlr_ctx) {
/* Hold mutex to guard ctrlr_ctx->current_queue_depth. */ /* Hold mutex to guard ctrlr_ctx->current_queue_depth. */
pthread_mutex_lock(&ctrlr_ctx->mutex); pthread_mutex_lock(&ctrlr_ctx->mutex);
spdk_nvme_ctrlr_process_admin_completions(ctrlr_ctx->ctrlr); spdk_nvme_ctrlr_process_admin_completions(ctrlr_ctx->ctrlr);
pthread_mutex_unlock(&ctrlr_ctx->mutex); pthread_mutex_unlock(&ctrlr_ctx->mutex);
ctrlr_ctx = ctrlr_ctx->next;
} }
} }
@ -508,8 +496,7 @@ work_fn(void *arg)
do { do {
unfinished_ctx = 0; unfinished_ctx = 0;
ns_ctx = worker->ns_ctx; TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
while (ns_ctx != NULL) {
if (!ns_ctx->is_draining) { if (!ns_ctx->is_draining) {
ns_ctx->is_draining = true; ns_ctx->is_draining = true;
} }
@ -521,7 +508,6 @@ work_fn(void *arg)
unfinished_ctx++; unfinished_ctx++;
} }
} }
ns_ctx = ns_ctx->next;
} }
} while (unfinished_ctx > 0); } while (unfinished_ctx > 0);
@ -529,8 +515,7 @@ work_fn(void *arg)
do { do {
unfinished_ctx = 0; unfinished_ctx = 0;
ctrlr_ctx = worker->ctrlr_ctx; TAILQ_FOREACH(ctrlr_ctx, &worker->ctrlr_ctx, link) {
while (ctrlr_ctx != NULL) {
pthread_mutex_lock(&ctrlr_ctx->mutex); pthread_mutex_lock(&ctrlr_ctx->mutex);
if (ctrlr_ctx->current_queue_depth > 0) { if (ctrlr_ctx->current_queue_depth > 0) {
spdk_nvme_ctrlr_process_admin_completions(ctrlr_ctx->ctrlr); spdk_nvme_ctrlr_process_admin_completions(ctrlr_ctx->ctrlr);
@ -539,7 +524,6 @@ work_fn(void *arg)
} }
} }
pthread_mutex_unlock(&ctrlr_ctx->mutex); pthread_mutex_unlock(&ctrlr_ctx->mutex);
ctrlr_ctx = ctrlr_ctx->next;
} }
} while (unfinished_ctx > 0); } while (unfinished_ctx > 0);
} }
@ -812,9 +796,6 @@ register_workers(void)
uint32_t i; uint32_t i;
struct worker_thread *worker; struct worker_thread *worker;
g_workers = NULL;
g_num_workers = 0;
SPDK_ENV_FOREACH_CORE(i) { SPDK_ENV_FOREACH_CORE(i) {
worker = calloc(1, sizeof(*worker)); worker = calloc(1, sizeof(*worker));
if (worker == NULL) { if (worker == NULL) {
@ -822,9 +803,10 @@ register_workers(void)
return -1; return -1;
} }
TAILQ_INIT(&worker->ns_ctx);
TAILQ_INIT(&worker->ctrlr_ctx);
worker->lcore = i; worker->lcore = i;
worker->next = g_workers; TAILQ_INSERT_TAIL(&g_workers, worker, link);
g_workers = worker;
g_num_workers++; g_num_workers++;
} }
@ -834,27 +816,23 @@ register_workers(void)
static void static void
unregister_workers(void) unregister_workers(void)
{ {
struct worker_thread *worker = g_workers; struct worker_thread *worker, *tmp_worker;
struct ns_worker_ctx *ns_ctx, *tmp_ns_ctx;
struct ctrlr_worker_ctx *ctrlr_ctx, *tmp_ctrlr_ctx;
/* Free namespace context and worker thread */ /* Free namespace context and worker thread */
while (worker) { TAILQ_FOREACH_SAFE(worker, &g_workers, link, tmp_worker) {
struct worker_thread *next_worker = worker->next; TAILQ_REMOVE(&g_workers, worker, link);
struct ns_worker_ctx *ns_ctx = worker->ns_ctx;
while (ns_ctx) {
struct ns_worker_ctx *next_ns_ctx = ns_ctx->next;
TAILQ_FOREACH_SAFE(ns_ctx, &worker->ns_ctx, link, tmp_ns_ctx) {
TAILQ_REMOVE(&worker->ns_ctx, ns_ctx, link);
printf("NS: %s I/O completed: %lu, failed: %lu\n", printf("NS: %s I/O completed: %lu, failed: %lu\n",
ns_ctx->entry->name, ns_ctx->io_completed, ns_ctx->io_failed); ns_ctx->entry->name, ns_ctx->io_completed, ns_ctx->io_failed);
free(ns_ctx); free(ns_ctx);
ns_ctx = next_ns_ctx;
} }
struct ctrlr_worker_ctx *ctrlr_ctx = worker->ctrlr_ctx; TAILQ_FOREACH_SAFE(ctrlr_ctx, &worker->ctrlr_ctx, link, tmp_ctrlr_ctx) {
TAILQ_REMOVE(&worker->ctrlr_ctx, ctrlr_ctx, link);
while (ctrlr_ctx) {
struct ctrlr_worker_ctx *next_ctrlr_ctx = ctrlr_ctx->next;
printf("CTRLR: %s abort submitted %lu, failed to submit %lu\n", printf("CTRLR: %s abort submitted %lu, failed to submit %lu\n",
ctrlr_ctx->entry->name, ctrlr_ctx->abort_submitted, ctrlr_ctx->entry->name, ctrlr_ctx->abort_submitted,
ctrlr_ctx->abort_submit_failed); ctrlr_ctx->abort_submit_failed);
@ -862,11 +840,9 @@ unregister_workers(void)
ctrlr_ctx->successful_abort, ctrlr_ctx->unsuccessful_abort, ctrlr_ctx->successful_abort, ctrlr_ctx->unsuccessful_abort,
ctrlr_ctx->abort_failed); ctrlr_ctx->abort_failed);
free(ctrlr_ctx); free(ctrlr_ctx);
ctrlr_ctx = next_ctrlr_ctx;
} }
free(worker); free(worker);
worker = next_worker;
} }
} }
@ -931,35 +907,33 @@ register_controllers(void)
static void static void
unregister_controllers(void) unregister_controllers(void)
{ {
struct ctrlr_entry *entry = g_controllers; struct ctrlr_entry *entry, *tmp;
while (entry) { TAILQ_FOREACH_SAFE(entry, &g_controllers, link, tmp) {
struct ctrlr_entry *next = entry->next; TAILQ_REMOVE(&g_controllers, entry, link);
spdk_nvme_detach(entry->ctrlr); spdk_nvme_detach(entry->ctrlr);
free(entry); free(entry);
entry = next;
} }
} }
static int static int
associate_master_worker_with_ctrlr(void) associate_master_worker_with_ctrlr(void)
{ {
struct ctrlr_entry *entry = g_controllers; struct ctrlr_entry *entry;
struct worker_thread *worker = g_workers; struct worker_thread *worker;
struct ctrlr_worker_ctx *ctrlr_ctx; struct ctrlr_worker_ctx *ctrlr_ctx;
while (worker) { TAILQ_FOREACH(worker, &g_workers, link) {
if (worker->lcore == g_master_core) { if (worker->lcore == g_master_core) {
break; break;
} }
worker = worker->next;
} }
if (!worker) { if (!worker) {
return -1; return -1;
} }
while (entry) { TAILQ_FOREACH(entry, &g_controllers, link) {
ctrlr_ctx = calloc(1, sizeof(struct ctrlr_worker_ctx)); ctrlr_ctx = calloc(1, sizeof(struct ctrlr_worker_ctx));
if (!ctrlr_ctx) { if (!ctrlr_ctx) {
return -1; return -1;
@ -968,10 +942,8 @@ associate_master_worker_with_ctrlr(void)
pthread_mutex_init(&ctrlr_ctx->mutex, NULL); pthread_mutex_init(&ctrlr_ctx->mutex, NULL);
ctrlr_ctx->entry = entry; ctrlr_ctx->entry = entry;
ctrlr_ctx->ctrlr = entry->ctrlr; ctrlr_ctx->ctrlr = entry->ctrlr;
ctrlr_ctx->next = worker->ctrlr_ctx;
worker->ctrlr_ctx = ctrlr_ctx;
entry = entry->next; TAILQ_INSERT_TAIL(&worker->ctrlr_ctx, ctrlr_ctx, link);
} }
return 0; return 0;
@ -980,27 +952,23 @@ associate_master_worker_with_ctrlr(void)
static struct ctrlr_worker_ctx * static struct ctrlr_worker_ctx *
get_ctrlr_worker_ctx(struct spdk_nvme_ctrlr *ctrlr) get_ctrlr_worker_ctx(struct spdk_nvme_ctrlr *ctrlr)
{ {
struct worker_thread *worker = g_workers; struct worker_thread *worker;
struct ctrlr_worker_ctx *ctrlr_ctx; struct ctrlr_worker_ctx *ctrlr_ctx;
while (worker != NULL) { TAILQ_FOREACH(worker, &g_workers, link) {
if (worker->lcore == g_master_core) { if (worker->lcore == g_master_core) {
break; break;
} }
worker = worker->next;
} }
if (!worker) { if (!worker) {
return NULL; return NULL;
} }
ctrlr_ctx = worker->ctrlr_ctx; TAILQ_FOREACH(ctrlr_ctx, &worker->ctrlr_ctx, link) {
while (ctrlr_ctx != NULL) {
if (ctrlr_ctx->ctrlr == ctrlr) { if (ctrlr_ctx->ctrlr == ctrlr) {
return ctrlr_ctx; return ctrlr_ctx;
} }
ctrlr_ctx = ctrlr_ctx->next;
} }
return NULL; return NULL;
@ -1009,8 +977,8 @@ get_ctrlr_worker_ctx(struct spdk_nvme_ctrlr *ctrlr)
static int static int
associate_workers_with_ns(void) associate_workers_with_ns(void)
{ {
struct ns_entry *entry = g_namespaces; struct ns_entry *entry = TAILQ_FIRST(&g_namespaces);
struct worker_thread *worker = g_workers; struct worker_thread *worker = TAILQ_FIRST(&g_workers);
struct ns_worker_ctx *ns_ctx; struct ns_worker_ctx *ns_ctx;
int i, count; int i, count;
@ -1034,17 +1002,16 @@ associate_workers_with_ns(void)
return -1; return -1;
} }
ns_ctx->next = worker->ns_ctx; TAILQ_INSERT_TAIL(&worker->ns_ctx, ns_ctx, link);
worker->ns_ctx = ns_ctx;
worker = worker->next; worker = TAILQ_NEXT(worker, link);
if (worker == NULL) { if (worker == NULL) {
worker = g_workers; worker = TAILQ_FIRST(&g_workers);
} }
entry = entry->next; entry = TAILQ_NEXT(entry, link);
if (entry == NULL) { if (entry == NULL) {
entry = g_namespaces; entry = TAILQ_FIRST(&g_namespaces);
} }
} }
@ -1117,15 +1084,13 @@ int main(int argc, char **argv)
/* Launch all of the slave workers */ /* Launch all of the slave workers */
g_master_core = spdk_env_get_current_core(); g_master_core = spdk_env_get_current_core();
master_worker = NULL; master_worker = NULL;
worker = g_workers; TAILQ_FOREACH(worker, &g_workers, link) {
while (worker != NULL) {
if (worker->lcore != g_master_core) { if (worker->lcore != g_master_core) {
spdk_env_thread_launch_pinned(worker->lcore, work_fn, worker); spdk_env_thread_launch_pinned(worker->lcore, work_fn, worker);
} else { } else {
assert(master_worker == NULL); assert(master_worker == NULL);
master_worker = worker; master_worker = worker;
} }
worker = worker->next;
} }
assert(master_worker != NULL); assert(master_worker != NULL);

View File

@ -41,7 +41,7 @@
struct ctrlr_entry { struct ctrlr_entry {
struct spdk_nvme_ctrlr *ctrlr; struct spdk_nvme_ctrlr *ctrlr;
struct spdk_nvme_intel_rw_latency_page latency_page; struct spdk_nvme_intel_rw_latency_page latency_page;
struct ctrlr_entry *next; TAILQ_ENTRY(ctrlr_entry) link;
char name[1024]; char name[1024];
}; };
@ -51,7 +51,7 @@ struct ns_entry {
struct spdk_nvme_ns *ns; struct spdk_nvme_ns *ns;
} nvme; } nvme;
struct ns_entry *next; TAILQ_ENTRY(ns_entry) link;
uint32_t io_size_blocks; uint32_t io_size_blocks;
uint64_t size_in_ios; uint64_t size_in_ios;
char name[1024]; char name[1024];
@ -64,7 +64,7 @@ struct ns_worker_ctx {
uint64_t offset_in_ios; uint64_t offset_in_ios;
bool is_draining; bool is_draining;
struct spdk_nvme_qpair *qpair; struct spdk_nvme_qpair *qpair;
struct ns_worker_ctx *next; TAILQ_ENTRY(ns_worker_ctx) link;
}; };
struct arb_task { struct arb_task {
@ -73,8 +73,8 @@ struct arb_task {
}; };
struct worker_thread { struct worker_thread {
struct ns_worker_ctx *ns_ctx; TAILQ_HEAD(, ns_worker_ctx) ns_ctx;
struct worker_thread *next; TAILQ_ENTRY(worker_thread) link;
unsigned lcore; unsigned lcore;
enum spdk_nvme_qprio qprio; enum spdk_nvme_qprio qprio;
}; };
@ -106,9 +106,9 @@ struct feature {
static struct spdk_mempool *task_pool = NULL; static struct spdk_mempool *task_pool = NULL;
static struct ctrlr_entry *g_controllers = NULL; static TAILQ_HEAD(, ctrlr_entry) g_controllers = TAILQ_HEAD_INITIALIZER(g_controllers);
static struct ns_entry *g_namespaces = NULL; static TAILQ_HEAD(, ns_entry) g_namespaces = TAILQ_HEAD_INITIALIZER(g_namespaces);
static struct worker_thread *g_workers = NULL; static TAILQ_HEAD(, worker_thread) g_workers = TAILQ_HEAD_INITIALIZER(g_workers);
static struct feature features[SPDK_NVME_FEAT_ARBITRATION + 1] = {}; static struct feature features[SPDK_NVME_FEAT_ARBITRATION + 1] = {};
@ -184,8 +184,7 @@ register_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns *ns)
snprintf(entry->name, 44, "%-20.20s (%-20.20s)", cdata->mn, cdata->sn); snprintf(entry->name, 44, "%-20.20s (%-20.20s)", cdata->mn, cdata->sn);
g_arbitration.num_namespaces++; g_arbitration.num_namespaces++;
entry->next = g_namespaces; TAILQ_INSERT_TAIL(&g_namespaces, entry, link);
g_namespaces = entry;
} }
static void static void
@ -239,8 +238,7 @@ register_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
snprintf(entry->name, sizeof(entry->name), "%-20.20s (%-20.20s)", cdata->mn, cdata->sn); snprintf(entry->name, sizeof(entry->name), "%-20.20s (%-20.20s)", cdata->mn, cdata->sn);
entry->ctrlr = ctrlr; entry->ctrlr = ctrlr;
entry->next = g_controllers; TAILQ_INSERT_TAIL(&g_controllers, entry, link);
g_controllers = entry;
if ((g_arbitration.latency_tracking_enable != 0) && if ((g_arbitration.latency_tracking_enable != 0) &&
spdk_nvme_ctrlr_is_feature_supported(ctrlr, SPDK_NVME_INTEL_FEAT_LATENCY_TRACKING)) { spdk_nvme_ctrlr_is_feature_supported(ctrlr, SPDK_NVME_INTEL_FEAT_LATENCY_TRACKING)) {
@ -399,30 +397,25 @@ cleanup_ns_worker_ctx(struct ns_worker_ctx *ns_ctx)
static void static void
cleanup(uint32_t task_count) cleanup(uint32_t task_count)
{ {
struct ns_entry *entry = g_namespaces; struct ns_entry *entry, *tmp_entry;
struct ns_entry *next_entry = NULL; struct worker_thread *worker, *tmp_worker;
struct worker_thread *worker = g_workers; struct ns_worker_ctx *ns_ctx, *tmp_ns_ctx;
struct worker_thread *next_worker = NULL;
while (entry) { TAILQ_FOREACH_SAFE(entry, &g_namespaces, link, tmp_entry) {
next_entry = entry->next; TAILQ_REMOVE(&g_namespaces, entry, link);
free(entry); free(entry);
entry = next_entry;
}; };
while (worker) { TAILQ_FOREACH_SAFE(worker, &g_workers, link, tmp_worker) {
struct ns_worker_ctx *ns_ctx = worker->ns_ctx; TAILQ_REMOVE(&g_workers, worker, link);
/* ns_worker_ctx is a list in the worker */ /* ns_worker_ctx is a list in the worker */
while (ns_ctx) { TAILQ_FOREACH_SAFE(ns_ctx, &worker->ns_ctx, link, tmp_ns_ctx) {
struct ns_worker_ctx *next_ns_ctx = ns_ctx->next; TAILQ_REMOVE(&worker->ns_ctx, ns_ctx, link);
free(ns_ctx); free(ns_ctx);
ns_ctx = next_ns_ctx;
} }
next_worker = worker->next;
free(worker); free(worker);
worker = next_worker;
}; };
if (spdk_mempool_count(task_pool) != (size_t)task_count) { if (spdk_mempool_count(task_pool) != (size_t)task_count) {
@ -437,28 +430,23 @@ work_fn(void *arg)
{ {
uint64_t tsc_end; uint64_t tsc_end;
struct worker_thread *worker = (struct worker_thread *)arg; struct worker_thread *worker = (struct worker_thread *)arg;
struct ns_worker_ctx *ns_ctx = NULL; struct ns_worker_ctx *ns_ctx;
printf("Starting thread on core %u with %s\n", worker->lcore, print_qprio(worker->qprio)); printf("Starting thread on core %u with %s\n", worker->lcore, print_qprio(worker->qprio));
/* Allocate a queue pair for each namespace. */ /* Allocate a queue pair for each namespace. */
ns_ctx = worker->ns_ctx; TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
while (ns_ctx != NULL) {
if (init_ns_worker_ctx(ns_ctx, worker->qprio) != 0) { if (init_ns_worker_ctx(ns_ctx, worker->qprio) != 0) {
printf("ERROR: init_ns_worker_ctx() failed\n"); printf("ERROR: init_ns_worker_ctx() failed\n");
return 1; return 1;
} }
ns_ctx = ns_ctx->next;
} }
tsc_end = spdk_get_ticks() + g_arbitration.time_in_sec * g_arbitration.tsc_rate; tsc_end = spdk_get_ticks() + g_arbitration.time_in_sec * g_arbitration.tsc_rate;
/* Submit initial I/O for each namespace. */ /* Submit initial I/O for each namespace. */
ns_ctx = worker->ns_ctx; TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
while (ns_ctx != NULL) {
submit_io(ns_ctx, g_arbitration.queue_depth); submit_io(ns_ctx, g_arbitration.queue_depth);
ns_ctx = ns_ctx->next;
} }
while (1) { while (1) {
@ -467,10 +455,8 @@ work_fn(void *arg)
* I/O will be submitted in the io_complete callback * I/O will be submitted in the io_complete callback
* to replace each I/O that is completed. * to replace each I/O that is completed.
*/ */
ns_ctx = worker->ns_ctx; TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
while (ns_ctx != NULL) {
check_io(ns_ctx); check_io(ns_ctx);
ns_ctx = ns_ctx->next;
} }
if (spdk_get_ticks() > tsc_end) { if (spdk_get_ticks() > tsc_end) {
@ -478,11 +464,9 @@ work_fn(void *arg)
} }
} }
ns_ctx = worker->ns_ctx; TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
while (ns_ctx != NULL) {
drain_io(ns_ctx); drain_io(ns_ctx);
cleanup_ns_worker_ctx(ns_ctx); cleanup_ns_worker_ctx(ns_ctx);
ns_ctx = ns_ctx->next;
} }
return 0; return 0;
@ -562,18 +546,14 @@ print_performance(void)
struct worker_thread *worker; struct worker_thread *worker;
struct ns_worker_ctx *ns_ctx; struct ns_worker_ctx *ns_ctx;
worker = g_workers; TAILQ_FOREACH(worker, &g_workers, link) {
while (worker) { TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
ns_ctx = worker->ns_ctx;
while (ns_ctx) {
io_per_second = (float)ns_ctx->io_completed / g_arbitration.time_in_sec; io_per_second = (float)ns_ctx->io_completed / g_arbitration.time_in_sec;
sent_all_io_in_secs = g_arbitration.io_count / io_per_second; sent_all_io_in_secs = g_arbitration.io_count / io_per_second;
printf("%-43.43s core %u: %8.2f IO/s %8.2f secs/%d ios\n", printf("%-43.43s core %u: %8.2f IO/s %8.2f secs/%d ios\n",
ns_ctx->entry->name, worker->lcore, ns_ctx->entry->name, worker->lcore,
io_per_second, sent_all_io_in_secs, g_arbitration.io_count); io_per_second, sent_all_io_in_secs, g_arbitration.io_count);
ns_ctx = ns_ctx->next;
} }
worker = worker->next;
} }
printf("========================================================\n"); printf("========================================================\n");
@ -613,8 +593,7 @@ print_latency_statistics(const char *op_name, enum spdk_nvme_intel_log_page log_
printf("%s Latency Statistics:\n", op_name); printf("%s Latency Statistics:\n", op_name);
printf("========================================================\n"); printf("========================================================\n");
ctrlr = g_controllers; TAILQ_FOREACH(ctrlr, &g_controllers, link) {
while (ctrlr) {
if (spdk_nvme_ctrlr_is_log_page_supported(ctrlr->ctrlr, log_page)) { if (spdk_nvme_ctrlr_is_log_page_supported(ctrlr->ctrlr, log_page)) {
if (spdk_nvme_ctrlr_cmd_get_log_page( if (spdk_nvme_ctrlr_cmd_get_log_page(
ctrlr->ctrlr, log_page, ctrlr->ctrlr, log_page,
@ -633,23 +612,18 @@ print_latency_statistics(const char *op_name, enum spdk_nvme_intel_log_page log_
printf("Controller %s: %s latency statistics not supported\n", printf("Controller %s: %s latency statistics not supported\n",
ctrlr->name, op_name); ctrlr->name, op_name);
} }
ctrlr = ctrlr->next;
} }
while (g_arbitration.outstanding_commands) { while (g_arbitration.outstanding_commands) {
ctrlr = g_controllers; TAILQ_FOREACH(ctrlr, &g_controllers, link) {
while (ctrlr) {
spdk_nvme_ctrlr_process_admin_completions(ctrlr->ctrlr); spdk_nvme_ctrlr_process_admin_completions(ctrlr->ctrlr);
ctrlr = ctrlr->next;
} }
} }
ctrlr = g_controllers; TAILQ_FOREACH(ctrlr, &g_controllers, link) {
while (ctrlr) {
if (spdk_nvme_ctrlr_is_log_page_supported(ctrlr->ctrlr, log_page)) { if (spdk_nvme_ctrlr_is_log_page_supported(ctrlr->ctrlr, log_page)) {
print_latency_page(ctrlr); print_latency_page(ctrlr);
} }
ctrlr = ctrlr->next;
} }
printf("\n"); printf("\n");
} }
@ -825,9 +799,6 @@ register_workers(void)
struct worker_thread *worker; struct worker_thread *worker;
enum spdk_nvme_qprio qprio = SPDK_NVME_QPRIO_URGENT; enum spdk_nvme_qprio qprio = SPDK_NVME_QPRIO_URGENT;
g_workers = NULL;
g_arbitration.num_workers = 0;
SPDK_ENV_FOREACH_CORE(i) { SPDK_ENV_FOREACH_CORE(i) {
worker = calloc(1, sizeof(*worker)); worker = calloc(1, sizeof(*worker));
if (worker == NULL) { if (worker == NULL) {
@ -835,9 +806,9 @@ register_workers(void)
return -1; return -1;
} }
TAILQ_INIT(&worker->ns_ctx);
worker->lcore = i; worker->lcore = i;
worker->next = g_workers; TAILQ_INSERT_TAIL(&g_workers, worker, link);
g_workers = worker;
g_arbitration.num_workers++; g_arbitration.num_workers++;
if (g_arbitration.arbitration_mechanism == SPDK_NVME_CAP_AMS_WRR) { if (g_arbitration.arbitration_mechanism == SPDK_NVME_CAP_AMS_WRR) {
@ -895,25 +866,24 @@ register_controllers(void)
static void static void
unregister_controllers(void) unregister_controllers(void)
{ {
struct ctrlr_entry *entry = g_controllers; struct ctrlr_entry *entry, *tmp;
while (entry) { TAILQ_FOREACH_SAFE(entry, &g_controllers, link, tmp) {
struct ctrlr_entry *next = entry->next; TAILQ_REMOVE(&g_controllers, entry, link);
if (g_arbitration.latency_tracking_enable && if (g_arbitration.latency_tracking_enable &&
spdk_nvme_ctrlr_is_feature_supported(entry->ctrlr, SPDK_NVME_INTEL_FEAT_LATENCY_TRACKING)) { spdk_nvme_ctrlr_is_feature_supported(entry->ctrlr, SPDK_NVME_INTEL_FEAT_LATENCY_TRACKING)) {
set_latency_tracking_feature(entry->ctrlr, false); set_latency_tracking_feature(entry->ctrlr, false);
} }
spdk_nvme_detach(entry->ctrlr); spdk_nvme_detach(entry->ctrlr);
free(entry); free(entry);
entry = next;
} }
} }
static int static int
associate_workers_with_ns(void) associate_workers_with_ns(void)
{ {
struct ns_entry *entry = g_namespaces; struct ns_entry *entry = TAILQ_FIRST(&g_namespaces);
struct worker_thread *worker = g_workers; struct worker_thread *worker = TAILQ_FIRST(&g_workers);
struct ns_worker_ctx *ns_ctx; struct ns_worker_ctx *ns_ctx;
int i, count; int i, count;
@ -933,17 +903,16 @@ associate_workers_with_ns(void)
printf("Associating %s with lcore %d\n", entry->name, worker->lcore); printf("Associating %s with lcore %d\n", entry->name, worker->lcore);
ns_ctx->entry = entry; ns_ctx->entry = entry;
ns_ctx->next = worker->ns_ctx; TAILQ_INSERT_TAIL(&worker->ns_ctx, ns_ctx, link);
worker->ns_ctx = ns_ctx;
worker = worker->next; worker = TAILQ_NEXT(worker, link);
if (worker == NULL) { if (worker == NULL) {
worker = g_workers; worker = TAILQ_FIRST(&g_workers);
} }
entry = entry->next; entry = TAILQ_NEXT(entry, link);
if (entry == NULL) { if (entry == NULL) {
entry = g_namespaces; entry = TAILQ_FIRST(&g_namespaces);
} }
} }
@ -1128,15 +1097,13 @@ main(int argc, char **argv)
/* Launch all of the slave workers */ /* Launch all of the slave workers */
master_core = spdk_env_get_current_core(); master_core = spdk_env_get_current_core();
master_worker = NULL; master_worker = NULL;
worker = g_workers; TAILQ_FOREACH(worker, &g_workers, link) {
while (worker != NULL) {
if (worker->lcore != master_core) { if (worker->lcore != master_core) {
spdk_env_thread_launch_pinned(worker->lcore, work_fn, worker); spdk_env_thread_launch_pinned(worker->lcore, work_fn, worker);
} else { } else {
assert(master_worker == NULL); assert(master_worker == NULL);
master_worker = worker; master_worker = worker;
} }
worker = worker->next;
} }
assert(master_worker != NULL); assert(master_worker != NULL);

View File

@ -38,20 +38,20 @@
#include "spdk/env.h" #include "spdk/env.h"
struct ctrlr_entry { struct ctrlr_entry {
struct spdk_nvme_ctrlr *ctrlr; struct spdk_nvme_ctrlr *ctrlr;
struct ctrlr_entry *next; TAILQ_ENTRY(ctrlr_entry) link;
char name[1024]; char name[1024];
}; };
struct ns_entry { struct ns_entry {
struct spdk_nvme_ctrlr *ctrlr; struct spdk_nvme_ctrlr *ctrlr;
struct spdk_nvme_ns *ns; struct spdk_nvme_ns *ns;
struct ns_entry *next; TAILQ_ENTRY(ns_entry) link;
struct spdk_nvme_qpair *qpair; struct spdk_nvme_qpair *qpair;
}; };
static struct ctrlr_entry *g_controllers = NULL; static TAILQ_HEAD(, ctrlr_entry) g_controllers = TAILQ_HEAD_INITIALIZER(g_controllers);
static struct ns_entry *g_namespaces = NULL; static TAILQ_HEAD(, ns_entry) g_namespaces = TAILQ_HEAD_INITIALIZER(g_namespaces);
static bool g_vmd = false; static bool g_vmd = false;
@ -72,8 +72,7 @@ register_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns *ns)
entry->ctrlr = ctrlr; entry->ctrlr = ctrlr;
entry->ns = ns; entry->ns = ns;
entry->next = g_namespaces; TAILQ_INSERT_TAIL(&g_namespaces, entry, link);
g_namespaces = entry;
printf(" Namespace ID: %d size: %juGB\n", spdk_nvme_ns_get_id(ns), printf(" Namespace ID: %d size: %juGB\n", spdk_nvme_ns_get_id(ns),
spdk_nvme_ns_get_size(ns) / 1000000000); spdk_nvme_ns_get_size(ns) / 1000000000);
@ -162,8 +161,7 @@ hello_world(void)
int rc; int rc;
size_t sz; size_t sz;
ns_entry = g_namespaces; TAILQ_FOREACH(ns_entry, &g_namespaces, link) {
while (ns_entry != NULL) {
/* /*
* Allocate an I/O qpair that we can use to submit read/write requests * Allocate an I/O qpair that we can use to submit read/write requests
* to namespaces on the controller. NVMe controllers typically support * to namespaces on the controller. NVMe controllers typically support
@ -259,7 +257,6 @@ hello_world(void)
* pending I/O are completed before trying to free the qpair. * pending I/O are completed before trying to free the qpair.
*/ */
spdk_nvme_ctrlr_free_io_qpair(ns_entry->qpair); spdk_nvme_ctrlr_free_io_qpair(ns_entry->qpair);
ns_entry = ns_entry->next;
} }
} }
@ -302,8 +299,7 @@ attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
snprintf(entry->name, sizeof(entry->name), "%-20.20s (%-20.20s)", cdata->mn, cdata->sn); snprintf(entry->name, sizeof(entry->name), "%-20.20s (%-20.20s)", cdata->mn, cdata->sn);
entry->ctrlr = ctrlr; entry->ctrlr = ctrlr;
entry->next = g_controllers; TAILQ_INSERT_TAIL(&g_controllers, entry, link);
g_controllers = entry;
/* /*
* Each controller has one or more namespaces. An NVMe namespace is basically * Each controller has one or more namespaces. An NVMe namespace is basically
@ -327,21 +323,18 @@ attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
static void static void
cleanup(void) cleanup(void)
{ {
struct ns_entry *ns_entry = g_namespaces; struct ns_entry *ns_entry, *tmp_ns_entry;
struct ctrlr_entry *ctrlr_entry = g_controllers; struct ctrlr_entry *ctrlr_entry, *tmp_ctrlr_entry;
while (ns_entry) { TAILQ_FOREACH_SAFE(ns_entry, &g_namespaces, link, tmp_ns_entry) {
struct ns_entry *next = ns_entry->next; TAILQ_REMOVE(&g_namespaces, ns_entry, link);
free(ns_entry); free(ns_entry);
ns_entry = next;
} }
while (ctrlr_entry) { TAILQ_FOREACH_SAFE(ctrlr_entry, &g_controllers, link, tmp_ctrlr_entry) {
struct ctrlr_entry *next = ctrlr_entry->next; TAILQ_REMOVE(&g_controllers, ctrlr_entry, link);
spdk_nvme_detach(ctrlr_entry->ctrlr); spdk_nvme_detach(ctrlr_entry->ctrlr);
free(ctrlr_entry); free(ctrlr_entry);
ctrlr_entry = next;
} }
} }
@ -418,7 +411,7 @@ int main(int argc, char **argv)
return 1; return 1;
} }
if (g_controllers == NULL) { if (TAILQ_EMPTY(&g_controllers)) {
fprintf(stderr, "no NVMe controllers found\n"); fprintf(stderr, "no NVMe controllers found\n");
cleanup(); cleanup();
return 1; return 1;

View File

@ -47,7 +47,7 @@ struct ctrlr_entry {
struct spdk_nvme_ctrlr *ctrlr; struct spdk_nvme_ctrlr *ctrlr;
struct spdk_nvme_transport_id failover_trid; struct spdk_nvme_transport_id failover_trid;
enum spdk_nvme_transport_type trtype; enum spdk_nvme_transport_type trtype;
struct ctrlr_entry *next; TAILQ_ENTRY(ctrlr_entry) link;
char name[1024]; char name[1024];
int num_resets; int num_resets;
}; };
@ -56,7 +56,7 @@ struct ns_entry {
struct spdk_nvme_ctrlr *ctrlr; struct spdk_nvme_ctrlr *ctrlr;
struct spdk_nvme_ns *ns; struct spdk_nvme_ns *ns;
struct ns_entry *next; TAILQ_ENTRY(ns_entry) link;
uint32_t io_size_blocks; uint32_t io_size_blocks;
uint32_t num_io_requests; uint32_t num_io_requests;
uint64_t size_in_ios; uint64_t size_in_ios;
@ -66,17 +66,17 @@ struct ns_entry {
}; };
struct ns_worker_ctx { struct ns_worker_ctx {
struct ns_entry *entry; struct ns_entry *entry;
uint64_t io_completed; uint64_t io_completed;
uint64_t current_queue_depth; uint64_t current_queue_depth;
uint64_t offset_in_ios; uint64_t offset_in_ios;
bool is_draining; bool is_draining;
int num_qpairs; int num_qpairs;
struct spdk_nvme_qpair **qpair; struct spdk_nvme_qpair **qpair;
int last_qpair; int last_qpair;
struct ns_worker_ctx *next; TAILQ_ENTRY(ns_worker_ctx) link;
}; };
struct perf_task { struct perf_task {
@ -86,18 +86,18 @@ struct perf_task {
}; };
struct worker_thread { struct worker_thread {
struct ns_worker_ctx *ns_ctx; TAILQ_HEAD(, ns_worker_ctx) ns_ctx;
struct worker_thread *next; TAILQ_ENTRY(worker_thread) link;
unsigned lcore; unsigned lcore;
}; };
/* For basic reset handling. */ /* For basic reset handling. */
static int g_max_ctrlr_resets = 15; static int g_max_ctrlr_resets = 15;
static struct ctrlr_entry *g_controllers = NULL; static TAILQ_HEAD(, ctrlr_entry) g_controllers = TAILQ_HEAD_INITIALIZER(g_controllers);
static struct ns_entry *g_namespaces = NULL; static TAILQ_HEAD(, ns_entry) g_namespaces = TAILQ_HEAD_INITIALIZER(g_namespaces);
static int g_num_namespaces = 0; static int g_num_namespaces = 0;
static struct worker_thread *g_workers = NULL; static TAILQ_HEAD(, worker_thread) g_workers = TAILQ_HEAD_INITIALIZER(g_workers);
static int g_num_workers = 0; static int g_num_workers = 0;
static uint64_t g_tsc_rate; static uint64_t g_tsc_rate;
@ -347,19 +347,17 @@ register_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns *ns)
build_nvme_name(entry->name, sizeof(entry->name), ctrlr); build_nvme_name(entry->name, sizeof(entry->name), ctrlr);
g_num_namespaces++; g_num_namespaces++;
entry->next = g_namespaces; TAILQ_INSERT_TAIL(&g_namespaces, entry, link);
g_namespaces = entry;
} }
static void static void
unregister_namespaces(void) unregister_namespaces(void)
{ {
struct ns_entry *entry = g_namespaces; struct ns_entry *entry, *tmp;
while (entry) { TAILQ_FOREACH_SAFE(entry, &g_namespaces, link, tmp) {
struct ns_entry *next = entry->next; TAILQ_REMOVE(&g_namespaces, entry, link);
free(entry); free(entry);
entry = next;
} }
} }
@ -396,8 +394,7 @@ register_ctrlr(struct spdk_nvme_ctrlr *ctrlr, struct trid_entry *trid_entry)
entry->ctrlr = ctrlr; entry->ctrlr = ctrlr;
entry->trtype = trid_entry->trid.trtype; entry->trtype = trid_entry->trid.trtype;
entry->next = g_controllers; TAILQ_INSERT_TAIL(&g_controllers, entry, link);
g_controllers = entry;
for (nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr); for (nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
nsid != 0; nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, nsid)) { nsid != 0; nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, nsid)) {
@ -527,22 +524,18 @@ work_fn(void *arg)
printf("Starting thread on core %u\n", worker->lcore); printf("Starting thread on core %u\n", worker->lcore);
/* Allocate queue pairs for each namespace. */ /* Allocate queue pairs for each namespace. */
ns_ctx = worker->ns_ctx; TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
while (ns_ctx != NULL) {
if (nvme_init_ns_worker_ctx(ns_ctx) != 0) { if (nvme_init_ns_worker_ctx(ns_ctx) != 0) {
printf("ERROR: init_ns_worker_ctx() failed\n"); printf("ERROR: init_ns_worker_ctx() failed\n");
return 1; return 1;
} }
ns_ctx = ns_ctx->next;
} }
tsc_end = spdk_get_ticks() + g_time_in_sec * g_tsc_rate; tsc_end = spdk_get_ticks() + g_time_in_sec * g_tsc_rate;
/* Submit initial I/O for each namespace. */ /* Submit initial I/O for each namespace. */
ns_ctx = worker->ns_ctx; TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
while (ns_ctx != NULL) {
submit_io(ns_ctx, g_queue_depth); submit_io(ns_ctx, g_queue_depth);
ns_ctx = ns_ctx->next;
} }
while (1) { while (1) {
@ -551,10 +544,8 @@ work_fn(void *arg)
* I/O will be submitted in the io_complete callback * I/O will be submitted in the io_complete callback
* to replace each I/O that is completed. * to replace each I/O that is completed.
*/ */
ns_ctx = worker->ns_ctx; TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
while (ns_ctx != NULL) {
check_io(ns_ctx); check_io(ns_ctx);
ns_ctx = ns_ctx->next;
} }
if (spdk_get_ticks() > tsc_end) { if (spdk_get_ticks() > tsc_end) {
@ -565,8 +556,7 @@ work_fn(void *arg)
/* drain the io of each ns_ctx in round robin to make the fairness */ /* drain the io of each ns_ctx in round robin to make the fairness */
do { do {
unfinished_ns_ctx = 0; unfinished_ns_ctx = 0;
ns_ctx = worker->ns_ctx; TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
while (ns_ctx != NULL) {
/* first time will enter into this if case */ /* first time will enter into this if case */
if (!ns_ctx->is_draining) { if (!ns_ctx->is_draining) {
ns_ctx->is_draining = true; ns_ctx->is_draining = true;
@ -580,7 +570,6 @@ work_fn(void *arg)
unfinished_ns_ctx++; unfinished_ns_ctx++;
} }
} }
ns_ctx = ns_ctx->next;
} }
} while (unfinished_ns_ctx > 0); } while (unfinished_ns_ctx > 0);
@ -875,9 +864,6 @@ register_workers(void)
uint32_t i; uint32_t i;
struct worker_thread *worker; struct worker_thread *worker;
g_workers = NULL;
g_num_workers = 0;
SPDK_ENV_FOREACH_CORE(i) { SPDK_ENV_FOREACH_CORE(i) {
worker = calloc(1, sizeof(*worker)); worker = calloc(1, sizeof(*worker));
if (worker == NULL) { if (worker == NULL) {
@ -885,9 +871,9 @@ register_workers(void)
return -1; return -1;
} }
TAILQ_INIT(&worker->ns_ctx);
worker->lcore = i; worker->lcore = i;
worker->next = g_workers; TAILQ_INSERT_TAIL(&g_workers, worker, link);
g_workers = worker;
g_num_workers++; g_num_workers++;
} }
@ -897,21 +883,18 @@ register_workers(void)
static void static void
unregister_workers(void) unregister_workers(void)
{ {
struct worker_thread *worker = g_workers; struct worker_thread *worker, *tmp_worker;
struct ns_worker_ctx *ns_ctx, *tmp_ns_ctx;
/* Free namespace context and worker thread */ /* Free namespace context and worker thread */
while (worker) { TAILQ_FOREACH_SAFE(worker, &g_workers, link, tmp_worker) {
struct worker_thread *next_worker = worker->next; TAILQ_REMOVE(&g_workers, worker, link);
struct ns_worker_ctx *ns_ctx = worker->ns_ctx; TAILQ_FOREACH_SAFE(ns_ctx, &worker->ns_ctx, link, tmp_ns_ctx) {
TAILQ_REMOVE(&worker->ns_ctx, ns_ctx, link);
while (ns_ctx) {
struct ns_worker_ctx *next_ns_ctx = ns_ctx->next;
free(ns_ctx); free(ns_ctx);
ns_ctx = next_ns_ctx;
} }
free(worker); free(worker);
worker = next_worker;
} }
} }
@ -975,22 +958,20 @@ register_controllers(void)
static void static void
unregister_controllers(void) unregister_controllers(void)
{ {
struct ctrlr_entry *entry = g_controllers; struct ctrlr_entry *entry, *tmp;
while (entry) {
struct ctrlr_entry *next = entry->next;
TAILQ_FOREACH_SAFE(entry, &g_controllers, link, tmp) {
TAILQ_REMOVE(&g_controllers, entry, link);
spdk_nvme_detach(entry->ctrlr); spdk_nvme_detach(entry->ctrlr);
free(entry); free(entry);
entry = next;
} }
} }
static int static int
associate_workers_with_ns(void) associate_workers_with_ns(void)
{ {
struct ns_entry *entry = g_namespaces; struct ns_entry *entry = TAILQ_FIRST(&g_namespaces);
struct worker_thread *worker = g_workers; struct worker_thread *worker = TAILQ_FIRST(&g_workers);
struct ns_worker_ctx *ns_ctx; struct ns_worker_ctx *ns_ctx;
int i, count; int i, count;
@ -1008,17 +989,17 @@ associate_workers_with_ns(void)
printf("Associating %s with lcore %d\n", entry->name, worker->lcore); printf("Associating %s with lcore %d\n", entry->name, worker->lcore);
ns_ctx->entry = entry; ns_ctx->entry = entry;
ns_ctx->next = worker->ns_ctx;
worker->ns_ctx = ns_ctx;
worker = worker->next; TAILQ_INSERT_TAIL(&worker->ns_ctx, ns_ctx, link);
worker = TAILQ_NEXT(worker, link);
if (worker == NULL) { if (worker == NULL) {
worker = g_workers; worker = TAILQ_FIRST(&g_workers);
} }
entry = entry->next; entry = TAILQ_NEXT(entry, link);
if (entry == NULL) { if (entry == NULL) {
entry = g_namespaces; entry = TAILQ_FIRST(&g_namespaces);
} }
} }
@ -1040,8 +1021,7 @@ nvme_poll_ctrlrs(void *arg)
while (true) { while (true) {
pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate); pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
entry = g_controllers; TAILQ_FOREACH(entry, &g_controllers, link) {
while (entry) {
rc = spdk_nvme_ctrlr_process_admin_completions(entry->ctrlr); rc = spdk_nvme_ctrlr_process_admin_completions(entry->ctrlr);
/* This controller has encountered a failure at the transport level. reset it. */ /* This controller has encountered a failure at the transport level. reset it. */
if (rc == -ENXIO) { if (rc == -ENXIO) {
@ -1071,7 +1051,6 @@ nvme_poll_ctrlrs(void *arg)
fprintf(stderr, "Controller properly reset.\n"); fprintf(stderr, "Controller properly reset.\n");
} }
} }
entry = entry->next;
} }
pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &oldstate); pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &oldstate);
@ -1148,15 +1127,13 @@ int main(int argc, char **argv)
/* Launch all of the slave workers */ /* Launch all of the slave workers */
master_core = spdk_env_get_current_core(); master_core = spdk_env_get_current_core();
master_worker = NULL; master_worker = NULL;
worker = g_workers; TAILQ_FOREACH(worker, &g_workers, link) {
while (worker != NULL) {
if (worker->lcore != master_core) { if (worker->lcore != master_core) {
spdk_env_thread_launch_pinned(worker->lcore, work_fn, worker); spdk_env_thread_launch_pinned(worker->lcore, work_fn, worker);
} else { } else {
assert(master_worker == NULL); assert(master_worker == NULL);
master_worker = worker; master_worker = worker;
} }
worker = worker->next;
} }
assert(master_worker != NULL); assert(master_worker != NULL);