nvmf/rdma: Handle several ibv events in a row

Currently rdma acceptor handles only one ibv event per poll
Taking into account the default acceptor poll rate (10ms), it can
take a long time to handle e.g. LAST_WQE_REACHED events when we
close huge amount of qpairs at the same time.
This patch allows to handle up to 32 ibv events per acceptor poll.

Change-Id: Ic2884dfc5b54c6aec0655aaa547b491a9934a386
Signed-off-by: Alexey Marchuk <alexeymar@mellanox.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/3821
Community-CI: Mellanox Build Bot
Community-CI: Broadcom CI
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
This commit is contained in:
Alexey Marchuk 2020-08-17 17:39:54 +03:00 committed by Tomasz Zawadzki
parent 8e43a261ea
commit 58f43df1f5

View File

@ -3050,7 +3050,7 @@ nvmf_rdma_send_qpair_async_event(struct spdk_nvmf_rdma_qpair *rqpair,
return rc; return rc;
} }
static void static int
nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device) nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)
{ {
int rc; int rc;
@ -3060,9 +3060,8 @@ nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)
rc = ibv_get_async_event(device->context, &event); rc = ibv_get_async_event(device->context, &event);
if (rc) { if (rc) {
SPDK_ERRLOG("Failed to get async_event (%d): %s\n", /* In non-blocking mode -1 means there are no events available */
errno, spdk_strerror(errno)); return rc;
return;
} }
switch (event.event_type) { switch (event.event_type) {
@ -3125,6 +3124,24 @@ nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)
break; break;
} }
ibv_ack_async_event(&event); ibv_ack_async_event(&event);
return 0;
}
static void
nvmf_process_ib_events(struct spdk_nvmf_rdma_device *device, uint32_t max_events)
{
int rc = 0;
uint32_t i = 0;
for (i = 0; i < max_events; i++) {
rc = nvmf_process_ib_event(device);
if (rc) {
break;
}
}
SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Device %s: %u events processed\n", device->context->device->name, i);
} }
static uint32_t static uint32_t
@ -3155,7 +3172,7 @@ nvmf_rdma_accept(struct spdk_nvmf_transport *transport)
/* Second and subsequent poll descriptors are IB async events */ /* Second and subsequent poll descriptors are IB async events */
TAILQ_FOREACH_SAFE(device, &rtransport->devices, link, tmp) { TAILQ_FOREACH_SAFE(device, &rtransport->devices, link, tmp) {
if (rtransport->poll_fds[i++].revents & POLLIN) { if (rtransport->poll_fds[i++].revents & POLLIN) {
nvmf_process_ib_event(device); nvmf_process_ib_events(device, 32);
nfds--; nfds--;
} }
} }