diff --git a/lib/nvmf/rdma.c b/lib/nvmf/rdma.c index 2d5c324e6..aa11d5dcf 100644 --- a/lib/nvmf/rdma.c +++ b/lib/nvmf/rdma.c @@ -330,6 +330,15 @@ struct spdk_nvmf_rdma_resources { STAILQ_HEAD(, spdk_nvmf_rdma_request) free_queue; }; +typedef void (*spdk_nvmf_rdma_qpair_ibv_event)(struct spdk_nvmf_rdma_qpair *rqpair); + +struct spdk_nvmf_rdma_ibv_event_ctx { + struct spdk_nvmf_rdma_qpair *rqpair; + spdk_nvmf_rdma_qpair_ibv_event cb_fn; + /* Link to other ibv events associated with this qpair */ + STAILQ_ENTRY(spdk_nvmf_rdma_ibv_event_ctx) link; +}; + struct spdk_nvmf_rdma_qpair { struct spdk_nvmf_qpair qpair; @@ -399,6 +408,9 @@ struct spdk_nvmf_rdma_qpair { struct spdk_poller *destruct_poller; + /* List of ibv async events */ + STAILQ_HEAD(, spdk_nvmf_rdma_ibv_event_ctx) ibv_events; + /* There are several ways a disconnect can start on a qpair * and they are not all mutually exclusive. It is important * that we only initialize one of these paths. @@ -898,6 +910,17 @@ cleanup: return NULL; } +static void +spdk_nvmf_rdma_qpair_clean_ibv_events(struct spdk_nvmf_rdma_qpair *rqpair) +{ + struct spdk_nvmf_rdma_ibv_event_ctx *ctx, *tctx; + STAILQ_FOREACH_SAFE(ctx, &rqpair->ibv_events, link, tctx) { + ctx->rqpair = NULL; + /* Memory allocated for ctx is freed in spdk_nvmf_rdma_qpair_process_ibv_event */ + STAILQ_REMOVE(&rqpair->ibv_events, ctx, spdk_nvmf_rdma_ibv_event_ctx, link); + } +} + static void spdk_nvmf_rdma_qpair_destroy(struct spdk_nvmf_rdma_qpair *rqpair) { @@ -948,6 +971,8 @@ spdk_nvmf_rdma_qpair_destroy(struct spdk_nvmf_rdma_qpair *rqpair) nvmf_rdma_resources_destroy(rqpair->resources); } + spdk_nvmf_rdma_qpair_clean_ibv_events(rqpair); + free(rqpair); } @@ -1331,6 +1356,7 @@ nvmf_rdma_connect(struct spdk_nvmf_transport *transport, struct rdma_cm_event *e rqpair->cm_id = event->id; rqpair->listen_id = event->listen_id; rqpair->qpair.transport = transport; + STAILQ_INIT(&rqpair->ibv_events); /* use qid from the private data to determine the qpair type qid will be set to the appropriate value when the controller is created */ rqpair->qpair.qid = private_data->qid; @@ -2932,15 +2958,6 @@ static const char *CM_EVENT_STR[] = { }; #endif /* DEBUG */ -static void -nvmf_rdma_handle_last_wqe_reached(void *ctx) -{ - struct spdk_nvmf_rdma_qpair *rqpair = ctx; - rqpair->last_wqe_reached = true; - - nvmf_rdma_destroy_drained_qpair(rqpair); -} - static void spdk_nvmf_process_cm_event(struct spdk_nvmf_transport *transport, new_qpair_fn cb_fn, void *cb_arg) { @@ -3021,6 +3038,50 @@ spdk_nvmf_process_cm_event(struct spdk_nvmf_transport *transport, new_qpair_fn c } } +static void +nvmf_rdma_handle_last_wqe_reached(struct spdk_nvmf_rdma_qpair *rqpair) +{ + rqpair->last_wqe_reached = true; + nvmf_rdma_destroy_drained_qpair(rqpair); +} + +static void +spdk_nvmf_rdma_qpair_process_ibv_event(void *ctx) +{ + struct spdk_nvmf_rdma_ibv_event_ctx *event_ctx = ctx; + + if (event_ctx->rqpair) { + STAILQ_REMOVE(&event_ctx->rqpair->ibv_events, event_ctx, spdk_nvmf_rdma_ibv_event_ctx, link); + if (event_ctx->cb_fn) { + event_ctx->cb_fn(event_ctx->rqpair); + } + } + free(event_ctx); +} + +static int +spdk_nvmf_rdma_send_qpair_async_event(struct spdk_nvmf_rdma_qpair *rqpair, + spdk_nvmf_rdma_qpair_ibv_event fn) +{ + struct spdk_nvmf_rdma_ibv_event_ctx *ctx; + + if (!rqpair->qpair.group) { + return EINVAL; + } + + ctx = calloc(1, sizeof(*ctx)); + if (!ctx) { + return ENOMEM; + } + + ctx->rqpair = rqpair; + ctx->cb_fn = fn; + STAILQ_INSERT_TAIL(&rqpair->ibv_events, ctx, link); + + return spdk_thread_send_msg(rqpair->qpair.group->thread, spdk_nvmf_rdma_qpair_process_ibv_event, + ctx); +} + static void spdk_nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device) { @@ -3050,14 +3111,10 @@ spdk_nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device) /* This event only occurs for shared receive queues. */ rqpair = event.element.qp->qp_context; SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Last WQE reached event received for rqpair %p\n", rqpair); - /* This must be handled on the polling thread if it exists. Otherwise the timeout will catch it. */ - if (rqpair->qpair.group) { - spdk_thread_send_msg(rqpair->qpair.group->thread, nvmf_rdma_handle_last_wqe_reached, rqpair); - } else { - SPDK_ERRLOG("Unable to destroy the qpair %p since it does not have a poll group.\n", rqpair); + if (spdk_nvmf_rdma_send_qpair_async_event(rqpair, nvmf_rdma_handle_last_wqe_reached)) { + SPDK_ERRLOG("Failed to send LAST_WQE_REACHED event for rqpair %p\n", rqpair); rqpair->last_wqe_reached = true; } - break; case IBV_EVENT_SQ_DRAINED: /* This event occurs frequently in both error and non-error states.