rdma: destroy qpairs based on num_outstanding_wr.
Both Mellanox and Soft-RoCE NICs work with this approach. Change-Id: I7b05e54037761c4d5e58484e1c55934c47ac1ab9 Signed-off-by: Seth Howell <seth.howell@intel.com> Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/446134 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
parent
4a5e97c9d0
commit
bb3e441388
@ -197,8 +197,6 @@ enum spdk_nvmf_rdma_wr_type {
|
||||
RDMA_WR_TYPE_RECV,
|
||||
RDMA_WR_TYPE_SEND,
|
||||
RDMA_WR_TYPE_DATA,
|
||||
RDMA_WR_TYPE_DRAIN_SEND,
|
||||
RDMA_WR_TYPE_DRAIN_RECV
|
||||
};
|
||||
|
||||
struct spdk_nvmf_rdma_wr {
|
||||
@ -343,8 +341,6 @@ struct spdk_nvmf_rdma_qpair {
|
||||
struct ibv_qp_attr ibv_attr;
|
||||
|
||||
uint32_t disconnect_flags;
|
||||
struct spdk_nvmf_rdma_wr drain_send_wr;
|
||||
struct spdk_nvmf_rdma_wr drain_recv_wr;
|
||||
|
||||
/* Poller registered in case the qpair doesn't properly
|
||||
* complete the qpair destruct process and becomes defunct.
|
||||
@ -2235,6 +2231,16 @@ spdk_nvmf_rdma_start_disconnect(struct spdk_nvmf_rdma_qpair *rqpair)
|
||||
}
|
||||
}
|
||||
|
||||
static void spdk_nvmf_rdma_destroy_drained_qpair(struct spdk_nvmf_rdma_qpair *rqpair,
|
||||
struct spdk_nvmf_rdma_transport *rtransport)
|
||||
{
|
||||
if (rqpair->current_send_depth == 0 && rqpair->current_recv_depth == rqpair->max_queue_depth) {
|
||||
/* The qpair has been drained. Free the resources. */
|
||||
spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair, true);
|
||||
spdk_nvmf_rdma_qpair_destroy(rqpair);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
nvmf_rdma_disconnect(struct rdma_cm_event *evt)
|
||||
@ -2686,11 +2692,6 @@ static void
|
||||
spdk_nvmf_rdma_close_qpair(struct spdk_nvmf_qpair *qpair)
|
||||
{
|
||||
struct spdk_nvmf_rdma_qpair *rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
|
||||
struct ibv_recv_wr recv_wr = {};
|
||||
struct ibv_recv_wr *bad_recv_wr;
|
||||
struct ibv_send_wr send_wr = {};
|
||||
struct ibv_send_wr *bad_send_wr;
|
||||
int rc;
|
||||
|
||||
if (rqpair->disconnect_flags & RDMA_QP_DISCONNECTING) {
|
||||
return;
|
||||
@ -2712,26 +2713,6 @@ spdk_nvmf_rdma_close_qpair(struct spdk_nvmf_qpair *qpair)
|
||||
spdk_nvmf_rdma_set_ibv_state(rqpair, IBV_QPS_ERR);
|
||||
}
|
||||
|
||||
rqpair->drain_recv_wr.type = RDMA_WR_TYPE_DRAIN_RECV;
|
||||
recv_wr.wr_id = (uintptr_t)&rqpair->drain_recv_wr;
|
||||
rc = ibv_post_recv(rqpair->cm_id->qp, &recv_wr, &bad_recv_wr);
|
||||
if (rc) {
|
||||
SPDK_ERRLOG("Failed to post dummy receive WR, errno %d\n", errno);
|
||||
assert(false);
|
||||
return;
|
||||
}
|
||||
|
||||
rqpair->drain_send_wr.type = RDMA_WR_TYPE_DRAIN_SEND;
|
||||
send_wr.wr_id = (uintptr_t)&rqpair->drain_send_wr;
|
||||
send_wr.opcode = IBV_WR_SEND;
|
||||
rc = ibv_post_send(rqpair->cm_id->qp, &send_wr, &bad_send_wr);
|
||||
if (rc) {
|
||||
SPDK_ERRLOG("Failed to post dummy send WR, errno %d\n", errno);
|
||||
assert(false);
|
||||
return;
|
||||
}
|
||||
rqpair->current_send_depth++;
|
||||
|
||||
rqpair->destruct_poller = spdk_poller_register(spdk_nvmf_rdma_destroy_defunct_qpair, (void *)rqpair,
|
||||
NVMF_RDMA_QPAIR_DESTROY_TIMEOUT_US);
|
||||
}
|
||||
@ -2821,31 +2802,6 @@ spdk_nvmf_rdma_poller_poll(struct spdk_nvmf_rdma_transport *rtransport,
|
||||
}
|
||||
rqpair->current_send_depth--;
|
||||
break;
|
||||
case RDMA_WR_TYPE_DRAIN_RECV:
|
||||
rqpair = SPDK_CONTAINEROF(rdma_wr, struct spdk_nvmf_rdma_qpair, drain_recv_wr);
|
||||
assert(rqpair->disconnect_flags & RDMA_QP_DISCONNECTING);
|
||||
SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Drained QP RECV %u (%p)\n", rqpair->qpair.qid, rqpair);
|
||||
rqpair->disconnect_flags |= RDMA_QP_RECV_DRAINED;
|
||||
assert(rqpair->current_recv_depth == rqpair->max_queue_depth);
|
||||
/* Don't worry about responding to recv overflow, we are disconnecting anyways */
|
||||
if (rqpair->disconnect_flags & RDMA_QP_SEND_DRAINED) {
|
||||
spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair, true);
|
||||
spdk_nvmf_rdma_qpair_destroy(rqpair);
|
||||
}
|
||||
/* Continue so that this does not trigger the disconnect path below. */
|
||||
continue;
|
||||
case RDMA_WR_TYPE_DRAIN_SEND:
|
||||
rqpair = SPDK_CONTAINEROF(rdma_wr, struct spdk_nvmf_rdma_qpair, drain_send_wr);
|
||||
assert(rqpair->disconnect_flags & RDMA_QP_DISCONNECTING);
|
||||
SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Drained QP SEND %u (%p)\n", rqpair->qpair.qid, rqpair);
|
||||
rqpair->disconnect_flags |= RDMA_QP_SEND_DRAINED;
|
||||
rqpair->current_send_depth--;
|
||||
if (rqpair->disconnect_flags & RDMA_QP_RECV_DRAINED) {
|
||||
spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair, true);
|
||||
spdk_nvmf_rdma_qpair_destroy(rqpair);
|
||||
}
|
||||
/* Continue so that this does not trigger the disconnect path below. */
|
||||
continue;
|
||||
default:
|
||||
SPDK_ERRLOG("Received an unknown opcode on the CQ: %d\n", wc[i].opcode);
|
||||
continue;
|
||||
@ -2854,6 +2810,8 @@ spdk_nvmf_rdma_poller_poll(struct spdk_nvmf_rdma_transport *rtransport,
|
||||
if (rqpair->qpair.state == SPDK_NVMF_QPAIR_ACTIVE) {
|
||||
/* Disconnect the connection. */
|
||||
spdk_nvmf_rdma_start_disconnect(rqpair);
|
||||
} else {
|
||||
spdk_nvmf_rdma_destroy_drained_qpair(rqpair, rtransport);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
@ -2927,6 +2885,10 @@ spdk_nvmf_rdma_poller_poll(struct spdk_nvmf_rdma_transport *rtransport,
|
||||
SPDK_ERRLOG("Received an unknown opcode on the CQ: %d\n", wc[i].opcode);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (rqpair->qpair.state != SPDK_NVMF_QPAIR_ACTIVE) {
|
||||
spdk_nvmf_rdma_destroy_drained_qpair(rqpair, rtransport);
|
||||
}
|
||||
}
|
||||
|
||||
if (error == true) {
|
||||
|
Loading…
Reference in New Issue
Block a user