nvme_rdma: properly separate alloc_reqs and register_reqs.
The way these two functions were separated previously represented a pretty sserious bug when doing a controller reset. If there were any outstanding requests in the rqpair, they would get overwritten during the call to nvme_rdma_qpair_register_reqs and the application would never get a completion for the higher level requests. The only thing that we need to do in this function is assign the proper lkeys. Change-Id: I304c70646daf9b563cd00badba7141e5e8653aad Signed-off-by: Seth Howell <seth.howell@intel.com> Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/471659 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Alexey Marchuk <alexeymar@mellanox.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
parent
4c1a18c41d
commit
6b314fb5dc
@ -587,6 +587,8 @@ nvme_rdma_free_reqs(struct nvme_rdma_qpair *rqpair)
|
||||
static int
|
||||
nvme_rdma_alloc_reqs(struct nvme_rdma_qpair *rqpair)
|
||||
{
|
||||
int i;
|
||||
|
||||
rqpair->rdma_reqs = calloc(rqpair->num_entries, sizeof(struct spdk_nvme_rdma_req));
|
||||
if (rqpair->rdma_reqs == NULL) {
|
||||
SPDK_ERRLOG("Failed to allocate rdma_reqs\n");
|
||||
@ -599,6 +601,33 @@ nvme_rdma_alloc_reqs(struct nvme_rdma_qpair *rqpair)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
||||
TAILQ_INIT(&rqpair->free_reqs);
|
||||
TAILQ_INIT(&rqpair->outstanding_reqs);
|
||||
for (i = 0; i < rqpair->num_entries; i++) {
|
||||
struct spdk_nvme_rdma_req *rdma_req;
|
||||
struct spdk_nvmf_cmd *cmd;
|
||||
|
||||
rdma_req = &rqpair->rdma_reqs[i];
|
||||
cmd = &rqpair->cmds[i];
|
||||
|
||||
rdma_req->id = i;
|
||||
|
||||
/* The first RDMA sgl element will always point
|
||||
* at this data structure. Depending on whether
|
||||
* an NVMe-oF SGL is required, the length of
|
||||
* this element may change. */
|
||||
rdma_req->send_sgl[0].addr = (uint64_t)cmd;
|
||||
rdma_req->send_wr.wr_id = (uint64_t)rdma_req;
|
||||
rdma_req->send_wr.next = NULL;
|
||||
rdma_req->send_wr.opcode = IBV_WR_SEND;
|
||||
rdma_req->send_wr.send_flags = IBV_SEND_SIGNALED;
|
||||
rdma_req->send_wr.sg_list = rdma_req->send_sgl;
|
||||
rdma_req->send_wr.imm_data = 0;
|
||||
|
||||
TAILQ_INSERT_TAIL(&rqpair->free_reqs, rdma_req, link);
|
||||
}
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
nvme_rdma_free_reqs(rqpair);
|
||||
@ -617,32 +646,8 @@ nvme_rdma_register_reqs(struct nvme_rdma_qpair *rqpair)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
TAILQ_INIT(&rqpair->free_reqs);
|
||||
TAILQ_INIT(&rqpair->outstanding_reqs);
|
||||
for (i = 0; i < rqpair->num_entries; i++) {
|
||||
struct spdk_nvme_rdma_req *rdma_req;
|
||||
struct spdk_nvmf_cmd *cmd;
|
||||
|
||||
rdma_req = &rqpair->rdma_reqs[i];
|
||||
cmd = &rqpair->cmds[i];
|
||||
|
||||
rdma_req->id = i;
|
||||
|
||||
/* The first RDMA sgl element will always point
|
||||
* at this data structure. Depending on whether
|
||||
* an NVMe-oF SGL is required, the length of
|
||||
* this element may change. */
|
||||
rdma_req->send_sgl[0].addr = (uint64_t)cmd;
|
||||
rdma_req->send_sgl[0].lkey = rqpair->cmd_mr->lkey;
|
||||
|
||||
rdma_req->send_wr.wr_id = (uint64_t)rdma_req;
|
||||
rdma_req->send_wr.next = NULL;
|
||||
rdma_req->send_wr.opcode = IBV_WR_SEND;
|
||||
rdma_req->send_wr.send_flags = IBV_SEND_SIGNALED;
|
||||
rdma_req->send_wr.sg_list = rdma_req->send_sgl;
|
||||
rdma_req->send_wr.imm_data = 0;
|
||||
|
||||
TAILQ_INSERT_TAIL(&rqpair->free_reqs, rdma_req, link);
|
||||
rqpair->rdma_reqs[i].send_sgl[0].lkey = rqpair->cmd_mr->lkey;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
Loading…
Reference in New Issue
Block a user