rdma: release incomplete requests during qpair desctruction

qpair might be deleted with incomplete requests (e.g. when NIC
is removed or when huge amount of qpair are being destroyed
simultaneously), this reduces the capacity of the transport
buffers pool. Check that qpair qd is nonzero and process
requests whose state is not FREE. Processing of requests
when qpair is being deleted leads to their release.

Change-Id: I0e42b5cb78f35add9f37942db77781db72c1e59c
Signed-off-by: Alexey Marchuk <alexeymar@mellanox.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/676
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Alexey Marchuk 2020-02-10 10:20:56 +03:00 committed by Tomasz Zawadzki
parent 33204a4354
commit f8cbdf2c81

View File

@ -519,6 +519,10 @@ struct spdk_nvmf_rdma_transport {
static inline void static inline void
spdk_nvmf_rdma_start_disconnect(struct spdk_nvmf_rdma_qpair *rqpair); spdk_nvmf_rdma_start_disconnect(struct spdk_nvmf_rdma_qpair *rqpair);
static bool
spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
struct spdk_nvmf_rdma_request *rdma_req);
static inline int static inline int
spdk_nvmf_rdma_check_ibv_state(enum ibv_qp_state state) spdk_nvmf_rdma_check_ibv_state(enum ibv_qp_state state)
{ {
@ -930,10 +934,31 @@ spdk_nvmf_rdma_qpair_destroy(struct spdk_nvmf_rdma_qpair *rqpair)
spdk_poller_unregister(&rqpair->destruct_poller); spdk_poller_unregister(&rqpair->destruct_poller);
if (rqpair->qd != 0) { if (rqpair->qd != 0) {
struct spdk_nvmf_qpair *qpair = &rqpair->qpair;
struct spdk_nvmf_rdma_transport *rtransport = SPDK_CONTAINEROF(qpair->transport,
struct spdk_nvmf_rdma_transport, transport);
struct spdk_nvmf_rdma_request *req;
uint32_t i, max_req_count = 0;
SPDK_WARNLOG("Destroying qpair when queue depth is %d\n", rqpair->qd);
if (rqpair->srq == NULL) { if (rqpair->srq == NULL) {
nvmf_rdma_dump_qpair_contents(rqpair); nvmf_rdma_dump_qpair_contents(rqpair);
max_req_count = rqpair->max_queue_depth;
} else if (rqpair->poller && rqpair->resources) {
max_req_count = rqpair->poller->max_srq_depth;
} }
SPDK_WARNLOG("Destroying qpair when queue depth is %d\n", rqpair->qd);
SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Release incomplete requests\n");
for (i = 0; i < max_req_count; i++) {
req = &rqpair->resources->reqs[i];
if (req->req.qpair == qpair && req->state != RDMA_REQUEST_STATE_FREE) {
/* spdk_nvmf_rdma_request_process checks qpair ibv and internal state
* and completes a request */
spdk_nvmf_rdma_request_process(rtransport, req);
}
}
assert(rqpair->qd == 0);
} }
if (rqpair->poller) { if (rqpair->poller) {
@ -1992,7 +2017,7 @@ nvmf_rdma_request_free(struct spdk_nvmf_rdma_request *rdma_req,
rdma_req->state = RDMA_REQUEST_STATE_FREE; rdma_req->state = RDMA_REQUEST_STATE_FREE;
} }
static bool bool
spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport, spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
struct spdk_nvmf_rdma_request *rdma_req) struct spdk_nvmf_rdma_request *rdma_req)
{ {