lib/nvmf: handle RDMA_CM_EVENT_ADDR_CHANGE

This allows features like transparent failover on
target RNICs.

Signed-off-by: Seth Howell <seth.howell@intel.com>
Change-Id: Iab494ad3e9e4efea4db9cbb30bc18ea5b584f345
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/478879
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: <jacek.kalwas@intel.com>
Reviewed-by: Alexey Marchuk <alexeymar@mellanox.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Seth Howell 2019-12-26 18:04:39 -07:00 committed by Tomasz Zawadzki
parent 12ae64f863
commit 50cb6a04ac

View File

@ -2959,12 +2959,59 @@ static const char *CM_EVENT_STR[] = {
};
#endif /* DEBUG */
static bool
nvmf_rdma_handle_cm_event_addr_change(struct spdk_nvmf_transport *transport,
struct rdma_cm_event *event)
{
struct spdk_nvme_transport_id trid;
struct spdk_nvmf_rdma_qpair *rqpair;
struct spdk_nvmf_rdma_poll_group *rgroup;
struct spdk_nvmf_rdma_poller *rpoller;
struct spdk_nvmf_rdma_port *port;
struct spdk_nvmf_rdma_transport *rtransport;
uint32_t ref, i;
bool event_acked = false;
rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
TAILQ_FOREACH(port, &rtransport->ports, link) {
if (port->id == event->id) {
SPDK_ERRLOG("ADDR_CHANGE: IP %s:%s migrated\n", port->trid.traddr, port->trid.trsvcid);
rdma_ack_cm_event(event);
event_acked = true;
trid = port->trid;
ref = port->ref;
break;
}
}
if (event_acked) {
TAILQ_FOREACH(rgroup, &rtransport->poll_groups, link) {
TAILQ_FOREACH(rpoller, &rgroup->pollers, link) {
TAILQ_FOREACH(rqpair, &rpoller->qpairs, link) {
if (rqpair->listen_id == port->id) {
spdk_nvmf_rdma_start_disconnect(rqpair);
}
}
}
}
for (i = 0; i < ref; i++) {
spdk_nvmf_rdma_stop_listen(transport, &trid);
}
while (ref > 0) {
spdk_nvmf_rdma_listen(transport, &trid);
ref--;
}
}
return event_acked;
}
static void
spdk_nvmf_process_cm_event(struct spdk_nvmf_transport *transport, new_qpair_fn cb_fn, void *cb_arg)
{
struct spdk_nvmf_rdma_transport *rtransport;
struct rdma_cm_event *event;
int rc;
bool event_acked;
rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
@ -2973,6 +3020,7 @@ spdk_nvmf_process_cm_event(struct spdk_nvmf_transport *transport, new_qpair_fn c
}
while (1) {
event_acked = false;
rc = rdma_get_cm_event(rtransport->event_channel, &event);
if (rc == 0) {
SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Acceptor Event: %s\n", CM_EVENT_STR[event->event]);
@ -3019,7 +3067,7 @@ spdk_nvmf_process_cm_event(struct spdk_nvmf_transport *transport, new_qpair_fn c
/* Multicast is not used */
break;
case RDMA_CM_EVENT_ADDR_CHANGE:
/* Not utilizing this event */
event_acked = nvmf_rdma_handle_cm_event_addr_change(transport, event);
break;
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
/* For now, do nothing. The target never re-uses queue pairs. */
@ -3028,8 +3076,9 @@ spdk_nvmf_process_cm_event(struct spdk_nvmf_transport *transport, new_qpair_fn c
SPDK_ERRLOG("Unexpected Acceptor Event [%d]\n", event->event);
break;
}
rdma_ack_cm_event(event);
if (!event_acked) {
rdma_ack_cm_event(event);
}
} else {
if (errno != EAGAIN && errno != EWOULDBLOCK) {
SPDK_ERRLOG("Acceptor Event Error: %s\n", spdk_strerror(errno));