From a9b9f0952d6a0c1a37e544ef2977e7db136a8e86 Mon Sep 17 00:00:00 2001 From: Ben Walker Date: Tue, 31 Jul 2018 15:10:32 -0700 Subject: [PATCH] nvmf/rdma: Don't trigger error recovery on IBV_EVENT_SQ_DRAINED IBV_EVENT_SQ_DRAINED can occur during both error recovery and normal operation. We don't want to spend time sending a message to the correct qpair thread and then attempting to abort all I/O in the case where this wasn't triggered by an error. The case where this occurs during an error is very rare and only in response to a user forcing the state to err from the sqd state. For now, don't handle that case at all. Handle that corner case in a later patch. Change-Id: I16462ca52739b68f6b52a963f7344e12f7f48a55 Signed-off-by: Ben Walker Reviewed-on: https://review.gerrithub.io/420936 Tested-by: SPDK CI Jenkins Chandler-Test-Pool: SPDK Automated Test System Reviewed-by: Jim Harris Reviewed-by: Changpeng Liu Reviewed-by: Shuhei Matsumoto Reviewed-by: Seth Howell --- lib/nvmf/rdma.c | 45 ++++++--------------------------------------- 1 file changed, 6 insertions(+), 39 deletions(-) diff --git a/lib/nvmf/rdma.c b/lib/nvmf/rdma.c index bb4244c49..720883ff6 100644 --- a/lib/nvmf/rdma.c +++ b/lib/nvmf/rdma.c @@ -2050,25 +2050,6 @@ spdk_nvmf_rdma_qp_drained(struct spdk_nvmf_rdma_qpair *rqpair) } } -static void -_spdk_nvmf_rdma_sq_drained(void *cb_arg) -{ - spdk_nvmf_rdma_qp_drained(cb_arg); -} - -static void -_spdk_nvmf_rdma_qp_last_wqe(void *cb_arg) -{ - struct spdk_nvmf_rdma_qpair *rqpair = cb_arg; - - if (rqpair->qpair.state != SPDK_NVMF_QPAIR_ERROR) { - SPDK_ERRLOG("QP#%u is not in ERROR state, dropping LAST_WQE event...\n", - rqpair->qpair.qid); - return; - } - spdk_nvmf_rdma_qp_drained(rqpair); -} - static void _spdk_nvmf_rdma_qp_error(void *arg) { @@ -2076,24 +2057,15 @@ _spdk_nvmf_rdma_qp_error(void *arg) rqpair->qpair.state = SPDK_NVMF_QPAIR_ERROR; - if (spdk_nvmf_rdma_qpair_is_idle(&rqpair->qpair)) { - /* There are no outstanding requests */ - spdk_nvmf_rdma_qp_drained(rqpair); - } -} - -static struct spdk_nvmf_rdma_qpair * -spdk_nvmf_rqpair_from_qp(struct ibv_qp *qp) -{ - return qp->qp_context; + spdk_nvmf_rdma_qp_drained(rqpair); } static void spdk_nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device) { - int rc; + int rc; struct spdk_nvmf_rdma_qpair *rqpair; - struct ibv_async_event event; + struct ibv_async_event event; rc = ibv_get_async_event(device->context, &event); @@ -2106,19 +2078,14 @@ spdk_nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device) SPDK_NOTICELOG("Async event: %s\n", ibv_event_type_str(event.event_type)); + rqpair = event.element.qp->qp_context; + switch (event.event_type) { case IBV_EVENT_QP_FATAL: - rqpair = spdk_nvmf_rqpair_from_qp(event.element.qp); + case IBV_EVENT_QP_LAST_WQE_REACHED: spdk_thread_send_msg(rqpair->qpair.group->thread, _spdk_nvmf_rdma_qp_error, rqpair); break; case IBV_EVENT_SQ_DRAINED: - rqpair = spdk_nvmf_rqpair_from_qp(event.element.qp); - spdk_thread_send_msg(rqpair->qpair.group->thread, _spdk_nvmf_rdma_sq_drained, rqpair); - break; - case IBV_EVENT_QP_LAST_WQE_REACHED: - rqpair = spdk_nvmf_rqpair_from_qp(event.element.qp); - spdk_thread_send_msg(rqpair->qpair.group->thread, _spdk_nvmf_rdma_qp_last_wqe, rqpair); - break; case IBV_EVENT_CQ_ERR: case IBV_EVENT_QP_REQ_ERR: case IBV_EVENT_QP_ACCESS_ERR: