From e03aca3ce34c160f55bcb2192e484f7ba584c66f Mon Sep 17 00:00:00 2001 From: Seth Howell Date: Thu, 2 Aug 2018 10:35:02 -0700 Subject: [PATCH] nvmf/rdma: don't delete queue pair until it is empty. Change-Id: I6ee2f9fd02292cc03db6ed16858a9d2cc9c4de05 Signed-off-by: Seth Howell Reviewed-on: https://review.gerrithub.io/421167 Tested-by: SPDK CI Jenkins Chandler-Test-Pool: SPDK Automated Test System Reviewed-by: Changpeng Liu Reviewed-by: Jim Harris Reviewed-by: Ben Walker --- lib/nvmf/rdma.c | 44 +++++++++++++++++++++++++++++--------------- 1 file changed, 29 insertions(+), 15 deletions(-) diff --git a/lib/nvmf/rdma.c b/lib/nvmf/rdma.c index 8e890f7ab..3ff51a88e 100644 --- a/lib/nvmf/rdma.c +++ b/lib/nvmf/rdma.c @@ -265,6 +265,8 @@ struct spdk_nvmf_rdma_qpair { */ struct ibv_qp_init_attr ibv_init_attr; struct ibv_qp_attr ibv_attr; + + bool qpair_disconnected; }; struct spdk_nvmf_rdma_poller { @@ -483,9 +485,28 @@ spdk_nvmf_rdma_mgmt_channel_destroy(void *io_device, void *ctx_buf) } } +static int +spdk_nvmf_rdma_cur_rw_depth(struct spdk_nvmf_rdma_qpair *rqpair) +{ + return rqpair->state_cntr[RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER] + + rqpair->state_cntr[RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST]; +} + +static int +spdk_nvmf_rdma_cur_queue_depth(struct spdk_nvmf_rdma_qpair *rqpair) +{ + return rqpair->max_queue_depth - + rqpair->state_cntr[RDMA_REQUEST_STATE_FREE]; +} + static void spdk_nvmf_rdma_qpair_destroy(struct spdk_nvmf_rdma_qpair *rqpair) { + if (spdk_nvmf_rdma_cur_queue_depth(rqpair)) { + rqpair->qpair_disconnected = true; + return; + } + if (rqpair->poller) { TAILQ_REMOVE(&rqpair->poller->qpairs, rqpair, link); } @@ -1177,20 +1198,6 @@ spdk_nvmf_rdma_request_parse_sgl(struct spdk_nvmf_rdma_transport *rtransport, return -1; } -static int -spdk_nvmf_rdma_cur_rw_depth(struct spdk_nvmf_rdma_qpair *rqpair) -{ - return rqpair->state_cntr[RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER] + - rqpair->state_cntr[RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST]; -} - -static int -spdk_nvmf_rdma_cur_queue_depth(struct spdk_nvmf_rdma_qpair *rqpair) -{ - return rqpair->max_queue_depth - - rqpair->state_cntr[RDMA_REQUEST_STATE_FREE]; -} - static bool spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport, struct spdk_nvmf_rdma_request *rdma_req) @@ -1889,6 +1896,11 @@ spdk_nvmf_rdma_qpair_process_pending(struct spdk_nvmf_rdma_transport *rtransport } } + if (rqpair->qpair_disconnected) { + spdk_nvmf_rdma_qpair_destroy(rqpair); + return; + } + /* Do not process newly received commands if qp is in ERROR state, * wait till the recovery is complete. */ @@ -2368,7 +2380,9 @@ spdk_nvmf_rdma_request_complete(struct spdk_nvmf_request *req) static void spdk_nvmf_rdma_close_qpair(struct spdk_nvmf_qpair *qpair) { - spdk_nvmf_rdma_qpair_destroy(SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair)); + struct spdk_nvmf_rdma_qpair *rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair); + + spdk_nvmf_rdma_qpair_destroy(rqpair); } static struct spdk_nvmf_rdma_request *