From 12444f400dcbd474308cd1b54325537befecdaeb Mon Sep 17 00:00:00 2001 From: Ben Walker Date: Wed, 1 Aug 2018 16:05:40 -0700 Subject: [PATCH] nvmf/rdma: Only abort all requests when first entering error state There is no need to keep attempting to abort all requests later on, there won't be any in these other states. Change-Id: I7b12e10b87e0d0bb4a74fdf67fb278b443e70e8a Signed-off-by: Ben Walker Reviewed-on: https://review.gerrithub.io/421042 Tested-by: SPDK CI Jenkins Chandler-Test-Pool: SPDK Automated Test System Reviewed-by: Jim Harris Reviewed-by: Changpeng Liu Reviewed-by: Shuhei Matsumoto Reviewed-by: Seth Howell --- lib/nvmf/rdma.c | 37 ++++++++++++++++++------------------- 1 file changed, 18 insertions(+), 19 deletions(-) diff --git a/lib/nvmf/rdma.c b/lib/nvmf/rdma.c index 69cd91ab4..0505a937e 100644 --- a/lib/nvmf/rdma.c +++ b/lib/nvmf/rdma.c @@ -2004,27 +2004,8 @@ spdk_nvmf_rdma_drain_state_queue(struct spdk_nvmf_rdma_qpair *rqpair, static void spdk_nvmf_rdma_qp_drained(struct spdk_nvmf_rdma_qpair *rqpair) { - struct spdk_nvmf_rdma_request *rdma_req, *req_tmp; - SPDK_NOTICELOG("IBV QP#%u drained\n", rqpair->qpair.qid); - if (spdk_nvmf_qpair_is_admin_queue(&rqpair->qpair)) { - spdk_nvmf_ctrlr_abort_aer(rqpair->qpair.ctrlr); - } - - spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING); - - /* First wipe the requests waiting for buffer from the global list */ - TAILQ_FOREACH_SAFE(rdma_req, &rqpair->state_queue[RDMA_REQUEST_STATE_NEED_BUFFER], link, req_tmp) { - TAILQ_REMOVE(&rqpair->ch->pending_data_buf_queue, rdma_req, link); - } - /* Then drain the requests through the rdma queue */ - spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_NEED_BUFFER); - - spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); - spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); - spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_COMPLETING); - if (!spdk_nvmf_rdma_qpair_is_idle(&rqpair->qpair)) { /* There must be outstanding requests down to media. * If so, wait till they're complete. @@ -2052,9 +2033,27 @@ static void _spdk_nvmf_rdma_qp_error(void *arg) { struct spdk_nvmf_rdma_qpair *rqpair = arg; + struct spdk_nvmf_rdma_request *rdma_req, *req_tmp; rqpair->qpair.state = SPDK_NVMF_QPAIR_ERROR; + if (spdk_nvmf_qpair_is_admin_queue(&rqpair->qpair)) { + spdk_nvmf_ctrlr_abort_aer(rqpair->qpair.ctrlr); + } + + spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING); + + /* First wipe the requests waiting for buffer from the global list */ + TAILQ_FOREACH_SAFE(rdma_req, &rqpair->state_queue[RDMA_REQUEST_STATE_NEED_BUFFER], link, req_tmp) { + TAILQ_REMOVE(&rqpair->ch->pending_data_buf_queue, rdma_req, link); + } + /* Then drain the requests through the rdma queue */ + spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_NEED_BUFFER); + + spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); + spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); + spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_COMPLETING); + spdk_nvmf_rdma_qp_drained(rqpair); }