From e7beb0d1fd4e41864621af60b3a99a6faeaa1ba5 Mon Sep 17 00:00:00 2001 From: Seth Howell Date: Tue, 5 Feb 2019 14:07:36 -0700 Subject: [PATCH] nvme_rdma: don't put req until both send and recv have completed This prevents us from overrunning the send queue. Change-Id: I6afbd9e2ba0ff266eb8fee2ae0361ac89fad7f81 Signed-off-by: Seth Howell Reviewed-on: https://review.gerrithub.io/c/443476 Tested-by: SPDK CI Jenkins Reviewed-by: Ben Walker Reviewed-by: Shuhei Matsumoto Reviewed-by: Jim Harris --- lib/nvme/nvme_rdma.c | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/lib/nvme/nvme_rdma.c b/lib/nvme/nvme_rdma.c index 901b0ee9e..ec761cb85 100644 --- a/lib/nvme/nvme_rdma.c +++ b/lib/nvme/nvme_rdma.c @@ -141,6 +141,8 @@ struct spdk_nvme_rdma_req { struct ibv_sge send_sgl[NVME_RDMA_DEFAULT_TX_SGE]; TAILQ_ENTRY(spdk_nvme_rdma_req) link; + + bool request_ready_to_put; }; static const char *rdma_cm_event_str[] = { @@ -198,6 +200,7 @@ nvme_rdma_req_get(struct nvme_rdma_qpair *rqpair) static void nvme_rdma_req_put(struct nvme_rdma_qpair *rqpair, struct spdk_nvme_rdma_req *rdma_req) { + rdma_req->request_ready_to_put = false; TAILQ_REMOVE(&rqpair->outstanding_reqs, rdma_req, link); TAILQ_INSERT_HEAD(&rqpair->free_reqs, rdma_req, link); } @@ -491,7 +494,12 @@ nvme_rdma_recv(struct nvme_rdma_qpair *rqpair, uint64_t rsp_idx) req = rdma_req->req; nvme_rdma_req_complete(req, rsp); - nvme_rdma_req_put(rqpair, rdma_req); + if (rdma_req->request_ready_to_put) { + nvme_rdma_req_put(rqpair, rdma_req); + } else { + rdma_req->request_ready_to_put = true; + } + if (nvme_rdma_post_recv(rqpair, rsp_idx)) { SPDK_ERRLOG("Unable to re-post rx descriptor\n"); return -1; @@ -1644,11 +1652,12 @@ int nvme_rdma_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions) { - struct nvme_rdma_qpair *rqpair = nvme_rdma_qpair(qpair); - struct ibv_wc wc[MAX_COMPLETIONS_PER_POLL]; - int i, rc, batch_size; - uint32_t reaped; - struct ibv_cq *cq; + struct nvme_rdma_qpair *rqpair = nvme_rdma_qpair(qpair); + struct ibv_wc wc[MAX_COMPLETIONS_PER_POLL]; + int i, rc, batch_size; + uint32_t reaped; + struct ibv_cq *cq; + struct spdk_nvme_rdma_req *rdma_req; if (max_completions == 0) { max_completions = rqpair->num_entries; @@ -1697,6 +1706,13 @@ nvme_rdma_qpair_process_completions(struct spdk_nvme_qpair *qpair, break; case IBV_WC_SEND: + rdma_req = (struct spdk_nvme_rdma_req *)wc[i].wr_id; + + if (rdma_req->request_ready_to_put) { + nvme_rdma_req_put(rqpair, rdma_req); + } else { + rdma_req->request_ready_to_put = true; + } break; default: