From e0280b1100bb1485bd56b9422f42bbcdc7ae7895 Mon Sep 17 00:00:00 2001 From: Seth Howell Date: Mon, 14 Jan 2019 16:22:11 -0700 Subject: [PATCH] rdma: add drain argument to process_pending This allows us to drain all of the pending requests from the qpairs before we destroy them, preventing them from being picked up on subsequent process_pending polls. Change-Id: I149deff437b4c1764fabf542cdd25dd067a8713a Signed-off-by: Seth Howell Reviewed-on: https://review.gerrithub.io/c/440428 Tested-by: SPDK CI Jenkins Chandler-Test-Pool: SPDK Automated Test System Reviewed-by: Darek Stojaczyk Reviewed-by: Jim Harris Reviewed-by: Ben Walker --- lib/nvmf/rdma.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/lib/nvmf/rdma.c b/lib/nvmf/rdma.c index 64300916f..f5e092f6f 100644 --- a/lib/nvmf/rdma.c +++ b/lib/nvmf/rdma.c @@ -2029,7 +2029,7 @@ spdk_nvmf_rdma_qpair_is_idle(struct spdk_nvmf_qpair *qpair) static void spdk_nvmf_rdma_qpair_process_pending(struct spdk_nvmf_rdma_transport *rtransport, - struct spdk_nvmf_rdma_qpair *rqpair) + struct spdk_nvmf_rdma_qpair *rqpair, bool drain) { struct spdk_nvmf_rdma_recv *rdma_recv, *recv_tmp; struct spdk_nvmf_rdma_request *rdma_req, *req_tmp; @@ -2037,7 +2037,7 @@ spdk_nvmf_rdma_qpair_process_pending(struct spdk_nvmf_rdma_transport *rtransport /* We process I/O in the data transfer pending queue at the highest priority. */ TAILQ_FOREACH_SAFE(rdma_req, &rqpair->state_queue[RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING], state_link, req_tmp) { - if (spdk_nvmf_rdma_request_process(rtransport, rdma_req) == false) { + if (spdk_nvmf_rdma_request_process(rtransport, rdma_req) == false && drain == false) { break; } } @@ -2045,7 +2045,7 @@ spdk_nvmf_rdma_qpair_process_pending(struct spdk_nvmf_rdma_transport *rtransport /* The second highest priority is I/O waiting on memory buffers. */ TAILQ_FOREACH_SAFE(rdma_req, &rqpair->ch->pending_data_buf_queue, link, req_tmp) { - if (spdk_nvmf_rdma_request_process(rtransport, rdma_req) == false) { + if (spdk_nvmf_rdma_request_process(rtransport, rdma_req) == false && drain == false) { break; } } @@ -2687,7 +2687,7 @@ spdk_nvmf_rdma_poller_poll(struct spdk_nvmf_rdma_transport *rtransport, count++; /* Try to process other queued requests */ - spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair); + spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair, false); break; case IBV_WC_RDMA_WRITE: @@ -2696,7 +2696,7 @@ spdk_nvmf_rdma_poller_poll(struct spdk_nvmf_rdma_transport *rtransport, rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair); /* Try to process other queued requests */ - spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair); + spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair, false); break; case IBV_WC_RDMA_READ: @@ -2709,7 +2709,7 @@ spdk_nvmf_rdma_poller_poll(struct spdk_nvmf_rdma_transport *rtransport, spdk_nvmf_rdma_request_process(rtransport, rdma_req); /* Try to process other queued requests */ - spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair); + spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair, false); break; case IBV_WC_RECV: @@ -2719,7 +2719,7 @@ spdk_nvmf_rdma_poller_poll(struct spdk_nvmf_rdma_transport *rtransport, TAILQ_INSERT_TAIL(&rqpair->incoming_queue, rdma_recv, link); /* Try to process other queued requests */ - spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair); + spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair, false); break; default: