nvmf/rdma: Ignore async_event if its qp_context is NULL

If initiator and target run on the same application, and initiator
uses SRQ, target may get async events for initiator, e.g.,
IBV_EVENT_QP_LAST_WQE_REACHED unexpectedly.

The reason is initiator and target may use the same device
simultaneously and only target polls async events.

Target sets attr.qp_context to rqpair when creating QP, but initiator
sets attr.qp_context to NULL when creating QP.

Hence one simple fix is to ignore async events whose qp_context is
NULL.

Signed-off-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
Change-Id: Id9ead1934f0b2ad1e18b174d2df2f1bf9853f7e1
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/14297
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
Reviewed-by: Dong Yi <dongx.yi@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Mellanox Build Bot
This commit is contained in:
Shuhei Matsumoto 2022-09-02 14:06:07 +09:00 committed by Tomasz Zawadzki
parent 0e4b13dc53
commit b3e1db32a3

View File

@ -3239,7 +3239,23 @@ nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)
switch (event.event_type) {
case IBV_EVENT_QP_FATAL:
case IBV_EVENT_QP_LAST_WQE_REACHED:
case IBV_EVENT_SQ_DRAINED:
case IBV_EVENT_QP_REQ_ERR:
case IBV_EVENT_QP_ACCESS_ERR:
case IBV_EVENT_COMM_EST:
case IBV_EVENT_PATH_MIG:
case IBV_EVENT_PATH_MIG_ERR:
rqpair = event.element.qp->qp_context;
if (!rqpair) {
/* Any QP event for NVMe-RDMA initiator may be returned. */
SPDK_NOTICELOG("Async QP event for unknown QP: %s\n",
ibv_event_type_str(event.event_type));
break;
}
switch (event.event_type) {
case IBV_EVENT_QP_FATAL:
SPDK_ERRLOG("Fatal event received for rqpair %p\n", rqpair);
spdk_trace_record(TRACE_RDMA_IBV_ASYNC_EVENT, 0, 0,
(uintptr_t)rqpair, event.event_type);
@ -3248,7 +3264,6 @@ nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)
break;
case IBV_EVENT_QP_LAST_WQE_REACHED:
/* This event only occurs for shared receive queues. */
rqpair = event.element.qp->qp_context;
SPDK_DEBUGLOG(rdma, "Last WQE reached event received for rqpair %p\n", rqpair);
rc = nvmf_rdma_send_qpair_async_event(rqpair, nvmf_rdma_handle_last_wqe_reached);
if (rc) {
@ -3259,7 +3274,6 @@ nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)
case IBV_EVENT_SQ_DRAINED:
/* This event occurs frequently in both error and non-error states.
* Check if the qpair is in an error state before sending a message. */
rqpair = event.element.qp->qp_context;
SPDK_DEBUGLOG(rdma, "Last sq drained event received for rqpair %p\n", rqpair);
spdk_trace_record(TRACE_RDMA_IBV_ASYNC_EVENT, 0, 0,
(uintptr_t)rqpair, event.event_type);
@ -3272,13 +3286,16 @@ nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)
case IBV_EVENT_COMM_EST:
case IBV_EVENT_PATH_MIG:
case IBV_EVENT_PATH_MIG_ERR:
SPDK_NOTICELOG("Async event: %s\n",
SPDK_NOTICELOG("Async QP event: %s\n",
ibv_event_type_str(event.event_type));
rqpair = event.element.qp->qp_context;
spdk_trace_record(TRACE_RDMA_IBV_ASYNC_EVENT, 0, 0,
(uintptr_t)rqpair, event.event_type);
nvmf_rdma_update_ibv_state(rqpair);
break;
default:
break;
}
break;
case IBV_EVENT_CQ_ERR:
case IBV_EVENT_DEVICE_FATAL:
case IBV_EVENT_PORT_ACTIVE: