examples/nvme/perf: connect io qpairs asynchronously

This significantly speeds up testing with high connection
workloads (i.e. -P 64) with TCP especially.  We already
set async_mode=true all of the time for the bdev/nvme
module, so there's no reason we shouldn't do it in
perf too.

After allocating all of the IO qpairs, busy poll the
poll group, using the new spdk_nvme_poll_group_all_connected()
API to ensure the qpairs are all connected before proceeding
with I/O.

Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: If0c3c944cd5f3d87170a5bbf7d766ac1a4dcef7c
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/17578
Reviewed-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
Reviewed-by: Konrad Sztyber <konrad.sztyber@intel.com>
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
This commit is contained in:
Jim Harris 2023-04-17 23:03:11 +00:00 committed by David Ko
parent 31f126b46c
commit 5bf8031230

View File

@ -983,7 +983,8 @@ nvme_init_ns_worker_ctx(struct ns_worker_ctx *ns_ctx)
struct ns_entry *entry = ns_ctx->entry;
struct spdk_nvme_poll_group *group;
struct spdk_nvme_qpair *qpair;
int i;
uint64_t poll_timeout_tsc;
int i, rc;
ns_ctx->u.nvme.num_active_qpairs = g_nr_io_queues_per_ns;
ns_ctx->u.nvme.num_all_qpairs = g_nr_io_queues_per_ns + g_nr_unused_io_queues;
@ -998,6 +999,7 @@ nvme_init_ns_worker_ctx(struct ns_worker_ctx *ns_ctx)
}
opts.delay_cmd_submit = true;
opts.create_only = true;
opts.async_mode = true;
ns_ctx->u.nvme.group = spdk_nvme_poll_group_create(NULL, NULL);
if (ns_ctx->u.nvme.group == NULL) {
@ -1027,7 +1029,22 @@ nvme_init_ns_worker_ctx(struct ns_worker_ctx *ns_ctx)
}
}
return 0;
/* Busy poll here until all qpairs are connected - this ensures once we start
* I/O we aren't still waiting for some qpairs to connect. Limit the poll to
* 10 seconds though.
*/
poll_timeout_tsc = spdk_get_ticks() + 10 * spdk_get_ticks_hz();
rc = -EAGAIN;
while (spdk_get_ticks() < poll_timeout_tsc && rc == -EAGAIN) {
spdk_nvme_poll_group_process_completions(group, 0, perf_disconnect_cb);
rc = spdk_nvme_poll_group_all_connected(group);
if (rc == 0) {
return 0;
}
}
/* If we reach here, it means we either timed out, or some connection failed. */
assert(spdk_get_ticks() > poll_timeout_tsc || rc == -EIO);
qpair_failed:
for (; i > 0; --i) {