diff --git a/lib/nvmf/rdma.c b/lib/nvmf/rdma.c index 024f7c8b1..f64afefe8 100644 --- a/lib/nvmf/rdma.c +++ b/lib/nvmf/rdma.c @@ -368,6 +368,8 @@ struct spdk_nvmf_rdma_qpair { * that we only initialize one of these paths. */ bool disconnect_started; + /* Lets us know that we have received the last_wqe event. */ + bool last_wqe_reached; }; struct spdk_nvmf_rdma_poller { @@ -2332,11 +2334,21 @@ static void nvmf_rdma_destroy_drained_qpair(void *ctx) struct spdk_nvmf_rdma_transport *rtransport = SPDK_CONTAINEROF(rqpair->qpair.transport, struct spdk_nvmf_rdma_transport, transport); - if (rqpair->current_send_depth == 0 && rqpair->current_recv_depth == rqpair->max_queue_depth) { - /* The qpair has been drained. Free the resources. */ - spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair, true); - spdk_nvmf_rdma_qpair_destroy(rqpair); + /* In non SRQ path, we will reach rqpair->max_queue_depth. In SRQ path, we will get the last_wqe event. */ + if (rqpair->current_send_depth != 0) { + return; } + + if (rqpair->srq == NULL && rqpair->current_recv_depth != rqpair->max_queue_depth) { + return; + } + + if (rqpair->srq != NULL && rqpair->last_wqe_reached == false) { + return; + } + + spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair, true); + spdk_nvmf_rdma_qpair_destroy(rqpair); } @@ -2473,7 +2485,7 @@ static void spdk_nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device) { int rc; - struct spdk_nvmf_rdma_qpair *rqpair; + struct spdk_nvmf_rdma_qpair *rqpair = NULL; struct ibv_async_event event; enum ibv_qp_state state; @@ -2497,7 +2509,17 @@ spdk_nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device) spdk_nvmf_rdma_start_disconnect(rqpair); break; case IBV_EVENT_QP_LAST_WQE_REACHED: - /* This event only occurs for shared receive queues, which are not currently supported. */ + /* This event only occurs for shared receive queues. */ + rqpair = event.element.qp->qp_context; + rqpair->last_wqe_reached = true; + + /* This must be handled on the polling thread if it exists. Otherwise the timeout will catch it. */ + if (rqpair->qpair.group) { + spdk_thread_send_msg(rqpair->qpair.group->thread, nvmf_rdma_destroy_drained_qpair, rqpair); + } else { + SPDK_ERRLOG("Unable to destroy the qpair %p since it does not have a poll group.\n", rqpair); + } + break; case IBV_EVENT_SQ_DRAINED: /* This event occurs frequently in both error and non-error states.