nvmf/rdma: Only abort all requests when first entering error state

There is no need to keep attempting to abort all requests later on,
there won't be any in these other states.

Change-Id: I7b12e10b87e0d0bb4a74fdf67fb278b443e70e8a
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.gerrithub.io/421042
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Seth Howell <seth.howell5141@gmail.com>
This commit is contained in:
Ben Walker 2018-08-01 16:05:40 -07:00 committed by Jim Harris
parent d0d3dc4e8b
commit 12444f400d

View File

@ -2004,27 +2004,8 @@ spdk_nvmf_rdma_drain_state_queue(struct spdk_nvmf_rdma_qpair *rqpair,
static void
spdk_nvmf_rdma_qp_drained(struct spdk_nvmf_rdma_qpair *rqpair)
{
struct spdk_nvmf_rdma_request *rdma_req, *req_tmp;
SPDK_NOTICELOG("IBV QP#%u drained\n", rqpair->qpair.qid);
if (spdk_nvmf_qpair_is_admin_queue(&rqpair->qpair)) {
spdk_nvmf_ctrlr_abort_aer(rqpair->qpair.ctrlr);
}
spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING);
/* First wipe the requests waiting for buffer from the global list */
TAILQ_FOREACH_SAFE(rdma_req, &rqpair->state_queue[RDMA_REQUEST_STATE_NEED_BUFFER], link, req_tmp) {
TAILQ_REMOVE(&rqpair->ch->pending_data_buf_queue, rdma_req, link);
}
/* Then drain the requests through the rdma queue */
spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_NEED_BUFFER);
spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST);
spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_COMPLETING);
if (!spdk_nvmf_rdma_qpair_is_idle(&rqpair->qpair)) {
/* There must be outstanding requests down to media.
* If so, wait till they're complete.
@ -2052,9 +2033,27 @@ static void
_spdk_nvmf_rdma_qp_error(void *arg)
{
struct spdk_nvmf_rdma_qpair *rqpair = arg;
struct spdk_nvmf_rdma_request *rdma_req, *req_tmp;
rqpair->qpair.state = SPDK_NVMF_QPAIR_ERROR;
if (spdk_nvmf_qpair_is_admin_queue(&rqpair->qpair)) {
spdk_nvmf_ctrlr_abort_aer(rqpair->qpair.ctrlr);
}
spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING);
/* First wipe the requests waiting for buffer from the global list */
TAILQ_FOREACH_SAFE(rdma_req, &rqpair->state_queue[RDMA_REQUEST_STATE_NEED_BUFFER], link, req_tmp) {
TAILQ_REMOVE(&rqpair->ch->pending_data_buf_queue, rdma_req, link);
}
/* Then drain the requests through the rdma queue */
spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_NEED_BUFFER);
spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST);
spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_COMPLETING);
spdk_nvmf_rdma_qp_drained(rqpair);
}