From fa01f99692297915d44d3c57b7a99cd61c7aac15 Mon Sep 17 00:00:00 2001 From: Seth Howell Date: Thu, 6 Aug 2020 09:57:36 -0700 Subject: [PATCH] nvmf/rdma: disconnect qpair from ibv_event ctx This call can be made directly now that spdk_nvmf_qpair_disconnect is thread safe. It's actually better that we do it this way, because the qp destruct call is guaranteed to block until the ib events associated with it are acknowledged. this means that by processing the disconnect before we ack the event, we will have valid memory to do the atomic checks. Signed-off-by: Seth Howell Change-Id: If6882b7dc568fe4c35f4a35375769634326e9d76 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/3681 Community-CI: Mellanox Build Bot Tested-by: SPDK CI Jenkins Reviewed-by: Ben Walker Reviewed-by: Changpeng Liu Reviewed-by: Aleksey Marchuk --- lib/nvmf/rdma.c | 26 +++----------------------- 1 file changed, 3 insertions(+), 23 deletions(-) diff --git a/lib/nvmf/rdma.c b/lib/nvmf/rdma.c index 54add05c5..a17d68b64 100644 --- a/lib/nvmf/rdma.c +++ b/lib/nvmf/rdma.c @@ -3000,13 +3000,6 @@ nvmf_process_cm_event(struct spdk_nvmf_transport *transport) } } -static void -nvmf_rdma_handle_qp_fatal(struct spdk_nvmf_rdma_qpair *rqpair) -{ - nvmf_rdma_update_ibv_state(rqpair); - spdk_nvmf_qpair_disconnect(&rqpair->qpair, NULL, NULL); -} - static void nvmf_rdma_handle_last_wqe_reached(struct spdk_nvmf_rdma_qpair *rqpair) { @@ -3014,12 +3007,6 @@ nvmf_rdma_handle_last_wqe_reached(struct spdk_nvmf_rdma_qpair *rqpair) nvmf_rdma_destroy_drained_qpair(rqpair); } -static void -nvmf_rdma_handle_sq_drained(struct spdk_nvmf_rdma_qpair *rqpair) -{ - spdk_nvmf_qpair_disconnect(&rqpair->qpair, NULL, NULL); -} - static void nvmf_rdma_qpair_process_ibv_event(void *ctx) { @@ -3092,11 +3079,8 @@ nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device) SPDK_ERRLOG("Fatal event received for rqpair %p\n", rqpair); spdk_trace_record(TRACE_RDMA_IBV_ASYNC_EVENT, 0, 0, (uintptr_t)rqpair->cm_id, event.event_type); - rc = nvmf_rdma_send_qpair_async_event(rqpair, nvmf_rdma_handle_qp_fatal); - if (rc) { - SPDK_WARNLOG("Failed to send QP_FATAL event. rqpair %p, err %d\n", rqpair, rc); - nvmf_rdma_handle_qp_fatal(rqpair); - } + nvmf_rdma_update_ibv_state(rqpair); + spdk_nvmf_qpair_disconnect(&rqpair->qpair, NULL, NULL); break; case IBV_EVENT_QP_LAST_WQE_REACHED: /* This event only occurs for shared receive queues. */ @@ -3116,11 +3100,7 @@ nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device) spdk_trace_record(TRACE_RDMA_IBV_ASYNC_EVENT, 0, 0, (uintptr_t)rqpair->cm_id, event.event_type); if (nvmf_rdma_update_ibv_state(rqpair) == IBV_QPS_ERR) { - rc = nvmf_rdma_send_qpair_async_event(rqpair, nvmf_rdma_handle_sq_drained); - if (rc) { - SPDK_WARNLOG("Failed to send SQ_DRAINED event. rqpair %p, err %d\n", rqpair, rc); - nvmf_rdma_handle_sq_drained(rqpair); - } + spdk_nvmf_qpair_disconnect(&rqpair->qpair, NULL, NULL); } break; case IBV_EVENT_QP_REQ_ERR: