examples/nvme/perf: increase opts.num_io_queues when needed

By default we specify 1024 max_io_queues per controller.
But it's possible we need more for high connection count
use cases (i.e. -c 0xFF -P 512 which is 8 * 512 = 4096).
So dynamically configure opts.num_io_queues based on
the corresponding values.

Note: we have to change a couple of globals from int to
uint32_t to avoid signed v. unsigned comparison warnings.
Let's just do that in this patch instead of a separate
one.

Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: Iba2d670c224a91e50377e622b154ce43eed94002
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/17621
Reviewed-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
Reviewed-by: Konrad Sztyber <konrad.sztyber@intel.com>
This commit is contained in:
Jim Harris 2023-04-18 00:39:27 +00:00 committed by Konrad Sztyber
parent 982ae8f46c
commit 0ca5304550

View File

@ -207,9 +207,9 @@ static bool g_vmd;
static const char *g_workload_type;
static TAILQ_HEAD(, ctrlr_entry) g_controllers = TAILQ_HEAD_INITIALIZER(g_controllers);
static TAILQ_HEAD(, ns_entry) g_namespaces = TAILQ_HEAD_INITIALIZER(g_namespaces);
static int g_num_namespaces;
static uint32_t g_num_namespaces;
static TAILQ_HEAD(, worker_thread) g_workers = TAILQ_HEAD_INITIALIZER(g_workers);
static int g_num_workers = 0;
static uint32_t g_num_workers = 0;
static bool g_use_every_core = false;
static uint32_t g_main_core;
static pthread_barrier_t g_worker_sync_barrier;
@ -2843,6 +2843,9 @@ probe_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
memcpy(opts->hostnqn, trid_entry->hostnqn, sizeof(opts->hostnqn));
opts->transport_tos = g_transport_tos;
if (opts->num_io_queues < g_num_workers * g_nr_io_queues_per_ns) {
opts->num_io_queues = g_num_workers * g_nr_io_queues_per_ns;
}
return true;
}