nvme/rdma: Dequeue request from outstanding list before calling completion
Each request has a callback context as cb_arg, and the callback to nvme_complete_request() for the completed request may reuse the context to the new request. On the other hand, RDMA transport dequeues rdma_req from rqpair->outstanding_reqs after calling nvme_complete_request() for the request pointed by rdma_req. Hence while nvme_complete_request() is executed, rqpair->outstanding_reqs may have two requests which has the same callback context, the completed request and the new submitted request. The upcoming patch will search all requests whose cb_arg matches to abort them. In the above case, the search may find two requests by mistake. To avoid such error, move dequeueing rdma_req from rqpair->outstanding_reqs before calling nvme_request_complete(). Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Change-Id: Ia183733f4a4cd4f85de17514ef3a884693910a05 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/2863 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Community-CI: Broadcom CI Community-CI: Mellanox Build Bot Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com> Reviewed-by: Michael Haeuptle <michaelhaeuptle@gmail.com>
This commit is contained in:
parent
299fce881a
commit
a57aeac1fe
@ -364,7 +364,6 @@ nvme_rdma_req_put(struct nvme_rdma_qpair *rqpair, struct spdk_nvme_rdma_req *rdm
|
||||
{
|
||||
rdma_req->completion_flags = 0;
|
||||
rdma_req->req = NULL;
|
||||
TAILQ_REMOVE(&rqpair->outstanding_reqs, rdma_req, link);
|
||||
TAILQ_INSERT_HEAD(&rqpair->free_reqs, rdma_req, link);
|
||||
}
|
||||
|
||||
@ -2054,6 +2053,7 @@ nvme_rdma_qpair_submit_request(struct spdk_nvme_qpair *qpair,
|
||||
|
||||
if (nvme_rdma_req_init(rqpair, req, rdma_req)) {
|
||||
SPDK_ERRLOG("nvme_rdma_req_init() failed\n");
|
||||
TAILQ_REMOVE(&rqpair->outstanding_reqs, rdma_req, link);
|
||||
nvme_rdma_req_put(rqpair, rdma_req);
|
||||
return -1;
|
||||
}
|
||||
@ -2097,6 +2097,7 @@ nvme_rdma_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr)
|
||||
assert(rdma_req->req != NULL);
|
||||
req = rdma_req->req;
|
||||
|
||||
TAILQ_REMOVE(&rqpair->outstanding_reqs, rdma_req, link);
|
||||
nvme_rdma_req_complete(req, &cpl);
|
||||
nvme_rdma_req_put(rqpair, rdma_req);
|
||||
}
|
||||
@ -2144,6 +2145,7 @@ nvme_rdma_qpair_check_timeout(struct spdk_nvme_qpair *qpair)
|
||||
static inline int
|
||||
nvme_rdma_request_ready(struct nvme_rdma_qpair *rqpair, struct spdk_nvme_rdma_req *rdma_req)
|
||||
{
|
||||
TAILQ_REMOVE(&rqpair->outstanding_reqs, rdma_req, link);
|
||||
nvme_rdma_req_complete(rdma_req->req, &rqpair->rsps[rdma_req->rsp_idx].cpl);
|
||||
nvme_rdma_req_put(rqpair, rdma_req);
|
||||
return nvme_rdma_post_recv(rqpair, rdma_req->rsp_idx);
|
||||
@ -2398,12 +2400,14 @@ nvme_rdma_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
|
||||
cpl.status.sct = SPDK_NVME_SCT_GENERIC;
|
||||
|
||||
TAILQ_FOREACH_SAFE(rdma_req, &rqpair->outstanding_reqs, link, tmp) {
|
||||
if (rdma_req->req->cmd.opc != SPDK_NVME_OPC_ASYNC_EVENT_REQUEST) {
|
||||
continue;
|
||||
}
|
||||
assert(rdma_req->req != NULL);
|
||||
req = rdma_req->req;
|
||||
|
||||
if (req->cmd.opc != SPDK_NVME_OPC_ASYNC_EVENT_REQUEST) {
|
||||
continue;
|
||||
}
|
||||
|
||||
TAILQ_REMOVE(&rqpair->outstanding_reqs, rdma_req, link);
|
||||
nvme_rdma_req_complete(req, &cpl);
|
||||
nvme_rdma_req_put(rqpair, rdma_req);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user