diff --git a/examples/nvme/perf/perf.c b/examples/nvme/perf/perf.c index dd9a9014e..4d18be7e8 100644 --- a/examples/nvme/perf/perf.c +++ b/examples/nvme/perf/perf.c @@ -77,6 +77,7 @@ struct ns_entry { struct ns_entry *next; uint32_t io_size_blocks; + uint32_t num_io_requests; uint64_t size_in_ios; uint32_t io_flags; uint16_t apptag_mask; @@ -233,10 +234,9 @@ register_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns *ns) if ((g_queue_depth * entries) > opts.io_queue_size) { printf("controller IO queue size %u less than required\n", opts.io_queue_size); - printf("configure an IO queue depth and IO size such that " - "the product is less than or equal to %u\n", opts.io_queue_size); + printf("Consider using lower queue depth or small IO size because " + "IO requests may be queued at the NVMe driver.\n"); g_warn = true; - return; } entry = calloc(1, sizeof(struct ns_entry)); @@ -248,6 +248,7 @@ register_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns *ns) entry->type = ENTRY_TYPE_NVME_NS; entry->u.nvme.ctrlr = ctrlr; entry->u.nvme.ns = ns; + entry->num_io_requests = entries; entry->size_in_ios = spdk_nvme_ns_get_size(ns) / g_io_size_bytes; @@ -805,7 +806,15 @@ init_ns_worker_ctx(struct ns_worker_ctx *ns_ctx) * TODO: If a controller has multiple namespaces, they could all use the same queue. * For now, give each namespace/thread combination its own queue. */ - ns_ctx->u.nvme.qpair = spdk_nvme_ctrlr_alloc_io_qpair(ns_ctx->entry->u.nvme.ctrlr, NULL, 0); + struct spdk_nvme_io_qpair_opts opts; + + spdk_nvme_ctrlr_get_default_io_qpair_opts(ns_ctx->entry->u.nvme.ctrlr, &opts, sizeof(opts)); + if (opts.io_queue_requests < ns_ctx->entry->num_io_requests) { + opts.io_queue_requests = ns_ctx->entry->num_io_requests; + } + + ns_ctx->u.nvme.qpair = spdk_nvme_ctrlr_alloc_io_qpair(ns_ctx->entry->u.nvme.ctrlr, &opts, + sizeof(opts)); if (!ns_ctx->u.nvme.qpair) { printf("ERROR: spdk_nvme_ctrlr_alloc_io_qpair failed\n"); return -1;