From 50cb6a04acf3f77863cc7fe7753dabd79beaab57 Mon Sep 17 00:00:00 2001 From: Seth Howell Date: Thu, 26 Dec 2019 18:04:39 -0700 Subject: [PATCH] lib/nvmf: handle RDMA_CM_EVENT_ADDR_CHANGE This allows features like transparent failover on target RNICs. Signed-off-by: Seth Howell Change-Id: Iab494ad3e9e4efea4db9cbb30bc18ea5b584f345 Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/478879 Tested-by: SPDK CI Jenkins Community-CI: SPDK CI Jenkins Reviewed-by: Reviewed-by: Alexey Marchuk Reviewed-by: Shuhei Matsumoto Reviewed-by: Ben Walker --- lib/nvmf/rdma.c | 55 ++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 52 insertions(+), 3 deletions(-) diff --git a/lib/nvmf/rdma.c b/lib/nvmf/rdma.c index accf369a4..07ee9d9bc 100644 --- a/lib/nvmf/rdma.c +++ b/lib/nvmf/rdma.c @@ -2959,12 +2959,59 @@ static const char *CM_EVENT_STR[] = { }; #endif /* DEBUG */ +static bool +nvmf_rdma_handle_cm_event_addr_change(struct spdk_nvmf_transport *transport, + struct rdma_cm_event *event) +{ + struct spdk_nvme_transport_id trid; + struct spdk_nvmf_rdma_qpair *rqpair; + struct spdk_nvmf_rdma_poll_group *rgroup; + struct spdk_nvmf_rdma_poller *rpoller; + struct spdk_nvmf_rdma_port *port; + struct spdk_nvmf_rdma_transport *rtransport; + uint32_t ref, i; + bool event_acked = false; + + rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport); + TAILQ_FOREACH(port, &rtransport->ports, link) { + if (port->id == event->id) { + SPDK_ERRLOG("ADDR_CHANGE: IP %s:%s migrated\n", port->trid.traddr, port->trid.trsvcid); + rdma_ack_cm_event(event); + event_acked = true; + trid = port->trid; + ref = port->ref; + break; + } + } + if (event_acked) { + TAILQ_FOREACH(rgroup, &rtransport->poll_groups, link) { + TAILQ_FOREACH(rpoller, &rgroup->pollers, link) { + TAILQ_FOREACH(rqpair, &rpoller->qpairs, link) { + if (rqpair->listen_id == port->id) { + spdk_nvmf_rdma_start_disconnect(rqpair); + } + } + } + } + + for (i = 0; i < ref; i++) { + spdk_nvmf_rdma_stop_listen(transport, &trid); + } + while (ref > 0) { + spdk_nvmf_rdma_listen(transport, &trid); + ref--; + } + } + return event_acked; +} + static void spdk_nvmf_process_cm_event(struct spdk_nvmf_transport *transport, new_qpair_fn cb_fn, void *cb_arg) { struct spdk_nvmf_rdma_transport *rtransport; struct rdma_cm_event *event; int rc; + bool event_acked; rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport); @@ -2973,6 +3020,7 @@ spdk_nvmf_process_cm_event(struct spdk_nvmf_transport *transport, new_qpair_fn c } while (1) { + event_acked = false; rc = rdma_get_cm_event(rtransport->event_channel, &event); if (rc == 0) { SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Acceptor Event: %s\n", CM_EVENT_STR[event->event]); @@ -3019,7 +3067,7 @@ spdk_nvmf_process_cm_event(struct spdk_nvmf_transport *transport, new_qpair_fn c /* Multicast is not used */ break; case RDMA_CM_EVENT_ADDR_CHANGE: - /* Not utilizing this event */ + event_acked = nvmf_rdma_handle_cm_event_addr_change(transport, event); break; case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* For now, do nothing. The target never re-uses queue pairs. */ @@ -3028,8 +3076,9 @@ spdk_nvmf_process_cm_event(struct spdk_nvmf_transport *transport, new_qpair_fn c SPDK_ERRLOG("Unexpected Acceptor Event [%d]\n", event->event); break; } - - rdma_ack_cm_event(event); + if (!event_acked) { + rdma_ack_cm_event(event); + } } else { if (errno != EAGAIN && errno != EWOULDBLOCK) { SPDK_ERRLOG("Acceptor Event Error: %s\n", spdk_strerror(errno));