nvmf/rdma: Abort request whose CID matches if it is pending

If the request is queued and is not in completing, we can abort
it safely.

If the state of the request is NEED_BUFFERING, the request is
queued to tqpair->group->group.pending_buf_queue.

If the state of the request is DATA_TRANSFER_TO_CONTROLLER_PENDING,
the request is queued to rqpair->pending_rdma_read_queue.

If the state of the request is DATA_TRANSFER_TO_HOST_PENDING,
the request is queued to rqpair->pending_rdma_write_queue.

According to the current state, dequeue from the corresponding
queue, and then call an new helper function
nvmf_rdma_request_set_abort_status().

Using helper function will be easier to read.

Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: Id0327f4d2c4728a11b3b6bbc7c2252f0b35263cf
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/3012
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
Reviewed-by: Michael Haeuptle <michaelhaeuptle@gmail.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Shuhei Matsumoto 2020-06-30 00:26:25 +09:00 committed by Tomasz Zawadzki
parent deec1fc790
commit c1305e71b6

View File

@ -4028,6 +4028,18 @@ spdk_nvmf_rdma_init_hooks(struct spdk_nvme_rdma_hooks *hooks)
g_nvmf_hooks = *hooks;
}
static void
nvmf_rdma_request_set_abort_status(struct spdk_nvmf_request *req,
struct spdk_nvmf_rdma_request *rdma_req_to_abort)
{
rdma_req_to_abort->req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
rdma_req_to_abort->req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
rdma_req_to_abort->state = RDMA_REQUEST_STATE_READY_TO_COMPLETE;
req->rsp->nvme_cpl.cdw0 &= ~1U; /* Command was successfully aborted. */
}
static void
nvmf_rdma_qpair_abort_request(struct spdk_nvmf_qpair *qpair,
struct spdk_nvmf_request *req)
@ -4061,6 +4073,28 @@ nvmf_rdma_qpair_abort_request(struct spdk_nvmf_qpair *qpair,
return;
}
break;
case RDMA_REQUEST_STATE_NEED_BUFFER:
STAILQ_REMOVE(&rqpair->poller->group->group.pending_buf_queue, &rdma_req_to_abort->req,
spdk_nvmf_request, buf_link);
nvmf_rdma_request_set_abort_status(req, rdma_req_to_abort);
break;
case RDMA_REQUEST_STATE_DATA_TRANSFER_TO_CONTROLLER_PENDING:
STAILQ_REMOVE(&rqpair->pending_rdma_read_queue, rdma_req_to_abort, spdk_nvmf_rdma_request,
state_link);
nvmf_rdma_request_set_abort_status(req, rdma_req_to_abort);
break;
case RDMA_REQUEST_STATE_DATA_TRANSFER_TO_HOST_PENDING:
STAILQ_REMOVE(&rqpair->pending_rdma_write_queue, rdma_req_to_abort, spdk_nvmf_rdma_request,
state_link);
nvmf_rdma_request_set_abort_status(req, rdma_req_to_abort);
break;
default:
break;
}