From 6b314fb5dc704b92248b86294079dd0f9cf2cc86 Mon Sep 17 00:00:00 2001 From: Seth Howell Date: Thu, 17 Oct 2019 13:05:26 -0700 Subject: [PATCH] nvme_rdma: properly separate alloc_reqs and register_reqs. The way these two functions were separated previously represented a pretty sserious bug when doing a controller reset. If there were any outstanding requests in the rqpair, they would get overwritten during the call to nvme_rdma_qpair_register_reqs and the application would never get a completion for the higher level requests. The only thing that we need to do in this function is assign the proper lkeys. Change-Id: I304c70646daf9b563cd00badba7141e5e8653aad Signed-off-by: Seth Howell Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/471659 Tested-by: SPDK CI Jenkins Reviewed-by: Ben Walker Reviewed-by: Alexey Marchuk Reviewed-by: Jim Harris --- lib/nvme/nvme_rdma.c | 55 ++++++++++++++++++++++++-------------------- 1 file changed, 30 insertions(+), 25 deletions(-) diff --git a/lib/nvme/nvme_rdma.c b/lib/nvme/nvme_rdma.c index 1c2d2e3dc..54651463e 100644 --- a/lib/nvme/nvme_rdma.c +++ b/lib/nvme/nvme_rdma.c @@ -587,6 +587,8 @@ nvme_rdma_free_reqs(struct nvme_rdma_qpair *rqpair) static int nvme_rdma_alloc_reqs(struct nvme_rdma_qpair *rqpair) { + int i; + rqpair->rdma_reqs = calloc(rqpair->num_entries, sizeof(struct spdk_nvme_rdma_req)); if (rqpair->rdma_reqs == NULL) { SPDK_ERRLOG("Failed to allocate rdma_reqs\n"); @@ -599,6 +601,33 @@ nvme_rdma_alloc_reqs(struct nvme_rdma_qpair *rqpair) goto fail; } + + TAILQ_INIT(&rqpair->free_reqs); + TAILQ_INIT(&rqpair->outstanding_reqs); + for (i = 0; i < rqpair->num_entries; i++) { + struct spdk_nvme_rdma_req *rdma_req; + struct spdk_nvmf_cmd *cmd; + + rdma_req = &rqpair->rdma_reqs[i]; + cmd = &rqpair->cmds[i]; + + rdma_req->id = i; + + /* The first RDMA sgl element will always point + * at this data structure. Depending on whether + * an NVMe-oF SGL is required, the length of + * this element may change. */ + rdma_req->send_sgl[0].addr = (uint64_t)cmd; + rdma_req->send_wr.wr_id = (uint64_t)rdma_req; + rdma_req->send_wr.next = NULL; + rdma_req->send_wr.opcode = IBV_WR_SEND; + rdma_req->send_wr.send_flags = IBV_SEND_SIGNALED; + rdma_req->send_wr.sg_list = rdma_req->send_sgl; + rdma_req->send_wr.imm_data = 0; + + TAILQ_INSERT_TAIL(&rqpair->free_reqs, rdma_req, link); + } + return 0; fail: nvme_rdma_free_reqs(rqpair); @@ -617,32 +646,8 @@ nvme_rdma_register_reqs(struct nvme_rdma_qpair *rqpair) goto fail; } - TAILQ_INIT(&rqpair->free_reqs); - TAILQ_INIT(&rqpair->outstanding_reqs); for (i = 0; i < rqpair->num_entries; i++) { - struct spdk_nvme_rdma_req *rdma_req; - struct spdk_nvmf_cmd *cmd; - - rdma_req = &rqpair->rdma_reqs[i]; - cmd = &rqpair->cmds[i]; - - rdma_req->id = i; - - /* The first RDMA sgl element will always point - * at this data structure. Depending on whether - * an NVMe-oF SGL is required, the length of - * this element may change. */ - rdma_req->send_sgl[0].addr = (uint64_t)cmd; - rdma_req->send_sgl[0].lkey = rqpair->cmd_mr->lkey; - - rdma_req->send_wr.wr_id = (uint64_t)rdma_req; - rdma_req->send_wr.next = NULL; - rdma_req->send_wr.opcode = IBV_WR_SEND; - rdma_req->send_wr.send_flags = IBV_SEND_SIGNALED; - rdma_req->send_wr.sg_list = rdma_req->send_sgl; - rdma_req->send_wr.imm_data = 0; - - TAILQ_INSERT_TAIL(&rqpair->free_reqs, rdma_req, link); + rqpair->rdma_reqs[i].send_sgl[0].lkey = rqpair->cmd_mr->lkey; } return 0;