examples/nvme/perf: increase opts.num_io_queues when needed
By default we specify 1024 max_io_queues per controller. But it's possible we need more for high connection count use cases (i.e. -c 0xFF -P 512 which is 8 * 512 = 4096). So dynamically configure opts.num_io_queues based on the corresponding values. Note: we have to change a couple of globals from int to uint32_t to avoid signed v. unsigned comparison warnings. Let's just do that in this patch instead of a separate one. Signed-off-by: Jim Harris <james.r.harris@intel.com> Change-Id: Iba2d670c224a91e50377e622b154ce43eed94002 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/17621 Reviewed-by: Shuhei Matsumoto <smatsumoto@nvidia.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com> Reviewed-by: Konrad Sztyber <konrad.sztyber@intel.com>
This commit is contained in:
parent
a8d86cb313
commit
f353506f1c
@ -207,9 +207,9 @@ static bool g_vmd;
|
||||
static const char *g_workload_type;
|
||||
static TAILQ_HEAD(, ctrlr_entry) g_controllers = TAILQ_HEAD_INITIALIZER(g_controllers);
|
||||
static TAILQ_HEAD(, ns_entry) g_namespaces = TAILQ_HEAD_INITIALIZER(g_namespaces);
|
||||
static int g_num_namespaces;
|
||||
static uint32_t g_num_namespaces;
|
||||
static TAILQ_HEAD(, worker_thread) g_workers = TAILQ_HEAD_INITIALIZER(g_workers);
|
||||
static int g_num_workers = 0;
|
||||
static uint32_t g_num_workers = 0;
|
||||
static bool g_use_every_core = false;
|
||||
static uint32_t g_main_core;
|
||||
static pthread_barrier_t g_worker_sync_barrier;
|
||||
@ -2843,6 +2843,9 @@ probe_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
|
||||
memcpy(opts->hostnqn, trid_entry->hostnqn, sizeof(opts->hostnqn));
|
||||
|
||||
opts->transport_tos = g_transport_tos;
|
||||
if (opts->num_io_queues < g_num_workers * g_nr_io_queues_per_ns) {
|
||||
opts->num_io_queues = g_num_workers * g_nr_io_queues_per_ns;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user