nvmf/rdma: Don't trigger error recovery on IBV_EVENT_SQ_DRAINED

IBV_EVENT_SQ_DRAINED can occur during both error recovery and
normal operation. We don't want to spend time sending a message
to the correct qpair thread and then attempting to abort
all I/O in the case where this wasn't triggered by an error.

The case where this occurs during an error is very rare and
only in response to a user forcing the state to err from the
sqd state. For now, don't handle that case at all. Handle that
corner case in a later patch.

Change-Id: I16462ca52739b68f6b52a963f7344e12f7f48a55
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.gerrithub.io/420936
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Seth Howell <seth.howell5141@gmail.com>
This commit is contained in:
Ben Walker 2018-07-31 15:10:32 -07:00 committed by Jim Harris
parent 13a887f1e2
commit a9b9f0952d

View File

@ -2050,25 +2050,6 @@ spdk_nvmf_rdma_qp_drained(struct spdk_nvmf_rdma_qpair *rqpair)
}
}
static void
_spdk_nvmf_rdma_sq_drained(void *cb_arg)
{
spdk_nvmf_rdma_qp_drained(cb_arg);
}
static void
_spdk_nvmf_rdma_qp_last_wqe(void *cb_arg)
{
struct spdk_nvmf_rdma_qpair *rqpair = cb_arg;
if (rqpair->qpair.state != SPDK_NVMF_QPAIR_ERROR) {
SPDK_ERRLOG("QP#%u is not in ERROR state, dropping LAST_WQE event...\n",
rqpair->qpair.qid);
return;
}
spdk_nvmf_rdma_qp_drained(rqpair);
}
static void
_spdk_nvmf_rdma_qp_error(void *arg)
{
@ -2076,24 +2057,15 @@ _spdk_nvmf_rdma_qp_error(void *arg)
rqpair->qpair.state = SPDK_NVMF_QPAIR_ERROR;
if (spdk_nvmf_rdma_qpair_is_idle(&rqpair->qpair)) {
/* There are no outstanding requests */
spdk_nvmf_rdma_qp_drained(rqpair);
}
}
static struct spdk_nvmf_rdma_qpair *
spdk_nvmf_rqpair_from_qp(struct ibv_qp *qp)
{
return qp->qp_context;
spdk_nvmf_rdma_qp_drained(rqpair);
}
static void
spdk_nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)
{
int rc;
int rc;
struct spdk_nvmf_rdma_qpair *rqpair;
struct ibv_async_event event;
struct ibv_async_event event;
rc = ibv_get_async_event(device->context, &event);
@ -2106,19 +2078,14 @@ spdk_nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)
SPDK_NOTICELOG("Async event: %s\n",
ibv_event_type_str(event.event_type));
rqpair = event.element.qp->qp_context;
switch (event.event_type) {
case IBV_EVENT_QP_FATAL:
rqpair = spdk_nvmf_rqpair_from_qp(event.element.qp);
case IBV_EVENT_QP_LAST_WQE_REACHED:
spdk_thread_send_msg(rqpair->qpair.group->thread, _spdk_nvmf_rdma_qp_error, rqpair);
break;
case IBV_EVENT_SQ_DRAINED:
rqpair = spdk_nvmf_rqpair_from_qp(event.element.qp);
spdk_thread_send_msg(rqpair->qpair.group->thread, _spdk_nvmf_rdma_sq_drained, rqpair);
break;
case IBV_EVENT_QP_LAST_WQE_REACHED:
rqpair = spdk_nvmf_rqpair_from_qp(event.element.qp);
spdk_thread_send_msg(rqpair->qpair.group->thread, _spdk_nvmf_rdma_qp_last_wqe, rqpair);
break;
case IBV_EVENT_CQ_ERR:
case IBV_EVENT_QP_REQ_ERR:
case IBV_EVENT_QP_ACCESS_ERR: