nvmf/rdma: monitor asynchronous events

NVMf cnx acceptor poller is changed to check the asynchronous events
from the RDMA devices.

RDMA async events are polled together with RDMA CM events; the file
descriptors are combined into a poll fd array and processed in a single
poll syscall.

The errors handler is an empty placeholder for this patch, it just
prints the kind of event read from the IB device context.
The work for implementing event handling is left for later.

Signed-off-by: Philipp Skadorov <philipp.skadorov@wdc.com>
Change-Id: Ib167990651b585090aceef1404a88d431a910226
Reviewed-on: https://review.gerrithub.io/412540
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Philipp Skadorov 2018-04-06 08:53:41 -05:00 committed by Jim Harris
parent a1c7c58f71
commit b6f90c527a

View File

@ -264,6 +264,10 @@ struct spdk_nvmf_rdma_transport {
uint32_t io_unit_size; uint32_t io_unit_size;
uint32_t in_capsule_data_size; uint32_t in_capsule_data_size;
/* fields used to poll RDMA/IB events */
nfds_t npoll_fds;
struct pollfd *poll_fds;
TAILQ_HEAD(, spdk_nvmf_rdma_device) devices; TAILQ_HEAD(, spdk_nvmf_rdma_device) devices;
TAILQ_HEAD(, spdk_nvmf_rdma_port) ports; TAILQ_HEAD(, spdk_nvmf_rdma_port) ports;
}; };
@ -1240,6 +1244,14 @@ spdk_nvmf_rdma_create(struct spdk_nvmf_tgt *tgt)
break; break;
} }
/* set up device context async ev fd as NON_BLOCKING */
flag = fcntl(device->context->async_fd, F_GETFL);
rc = fcntl(device->context->async_fd, F_SETFL, flag | O_NONBLOCK);
if (rc < 0) {
SPDK_ERRLOG("Failed to set context async fd to NONBLOCK.\n");
free(device);
break;
}
device->pd = NULL; device->pd = NULL;
device->map = NULL; device->map = NULL;
@ -1258,6 +1270,20 @@ spdk_nvmf_rdma_create(struct spdk_nvmf_tgt *tgt)
free(rtransport); free(rtransport);
rdma_free_devices(contexts); rdma_free_devices(contexts);
return NULL; return NULL;
} else {
/* Set up poll descriptor array to monitor events from RDMA and IB
* in a single poll syscall
*/
rtransport->npoll_fds = i + 1;
i = 0;
rtransport->poll_fds = calloc(rtransport->npoll_fds, sizeof(struct pollfd));
rtransport->poll_fds[i].fd = rtransport->event_channel->fd;
rtransport->poll_fds[i++].events = POLLIN;
TAILQ_FOREACH_SAFE(device, &rtransport->devices, link, tmp) {
rtransport->poll_fds[i].fd = device->context->async_fd;
rtransport->poll_fds[i++].events = POLLIN;
}
} }
rdma_free_devices(contexts); rdma_free_devices(contexts);
@ -1280,6 +1306,10 @@ spdk_nvmf_rdma_destroy(struct spdk_nvmf_transport *transport)
free(port); free(port);
} }
if (rtransport->poll_fds != NULL) {
free(rtransport->poll_fds);
}
if (rtransport->event_channel != NULL) { if (rtransport->event_channel != NULL) {
rdma_destroy_event_channel(rtransport->event_channel); rdma_destroy_event_channel(rtransport->event_channel);
} }
@ -1473,7 +1503,7 @@ spdk_nvmf_rdma_stop_listen(struct spdk_nvmf_transport *transport,
} }
static void static void
spdk_nvmf_rdma_accept(struct spdk_nvmf_transport *transport, new_qpair_fn cb_fn) spdk_nvmf_process_cm_event(struct spdk_nvmf_transport *transport, new_qpair_fn cb_fn)
{ {
struct spdk_nvmf_rdma_transport *rtransport; struct spdk_nvmf_rdma_transport *rtransport;
struct rdma_cm_event *event; struct rdma_cm_event *event;
@ -1550,6 +1580,60 @@ spdk_nvmf_rdma_accept(struct spdk_nvmf_transport *transport, new_qpair_fn cb_fn)
} }
} }
static void
spdk_nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)
{
int rc;
struct ibv_async_event event;
rc = ibv_get_async_event(device->context, &event);
if (rc) {
SPDK_ERRLOG("Failed to get async_event (%d): %s\n",
errno, spdk_strerror(errno));
return;
}
SPDK_NOTICELOG("Async event: %s\n",
ibv_event_type_str(event.event_type));
ibv_ack_async_event(&event);
}
static void
spdk_nvmf_rdma_accept(struct spdk_nvmf_transport *transport, new_qpair_fn cb_fn)
{
int nfds, i = 0;
struct spdk_nvmf_rdma_transport *rtransport;
struct spdk_nvmf_rdma_device *device, *tmp;
rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
nfds = poll(rtransport->poll_fds, rtransport->npoll_fds, 0);
if (nfds <= 0) {
return;
}
/* The first poll descriptor is RDMA CM event */
if (rtransport->poll_fds[i++].revents & POLLIN) {
spdk_nvmf_process_cm_event(transport, cb_fn);
nfds--;
}
if (nfds == 0) {
return;
}
/* Second and subsequent poll descriptors are IB async events */
TAILQ_FOREACH_SAFE(device, &rtransport->devices, link, tmp) {
if (rtransport->poll_fds[i++].revents & POLLIN) {
spdk_nvmf_process_ib_event(device);
nfds--;
}
}
/* check all flagged fd's have been served */
assert(nfds == 0);
}
static void static void
spdk_nvmf_rdma_discover(struct spdk_nvmf_transport *transport, spdk_nvmf_rdma_discover(struct spdk_nvmf_transport *transport,
struct spdk_nvme_transport_id *trid, struct spdk_nvme_transport_id *trid,