nvmf/tcp: Wait until request is abortable if it is transferring

If the state of the request is TRANSFERRING_HOST_TO_CONTROLLER,
we cannot abort it now but may be able to abort it when its state
is EXECUTING. Hence wait until its state is EXECUTING, and then
retry aborting.

The following patch will make the timeout value configurable as
an new transport option.

Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: I98347b68e8b6b4a804c47894964cb81eae215aaa
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/3010
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Michael Haeuptle <michaelhaeuptle@gmail.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
This commit is contained in:
Shuhei Matsumoto 2020-07-09 09:44:37 +09:00 committed by Tomasz Zawadzki
parent 0190e71eb6
commit 040ee27c16

View File

@ -2473,37 +2473,25 @@ nvmf_tcp_req_set_abort_status(struct spdk_nvmf_request *req,
req->rsp->nvme_cpl.cdw0 &= ~1U; /* Command was successfully aborted. */ req->rsp->nvme_cpl.cdw0 &= ~1U; /* Command was successfully aborted. */
} }
static void #define NVMF_TCP_ABORT_TIMEOUT_SEC 1
nvmf_tcp_qpair_abort_request(struct spdk_nvmf_qpair *qpair,
struct spdk_nvmf_request *req) static int
_nvmf_tcp_qpair_abort_request(void *ctx)
{ {
struct spdk_nvmf_tcp_qpair *tqpair; struct spdk_nvmf_request *req = ctx;
uint16_t cid; struct spdk_nvmf_tcp_req *tcp_req_to_abort = SPDK_CONTAINEROF(req->req_to_abort,
uint32_t i; struct spdk_nvmf_tcp_req, req);
struct spdk_nvmf_tcp_req *tcp_req_to_abort = NULL; struct spdk_nvmf_tcp_qpair *tqpair = SPDK_CONTAINEROF(req->req_to_abort->qpair,
struct spdk_nvmf_tcp_qpair, qpair);
int rc; int rc;
tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair); spdk_poller_unregister(&req->poller);
cid = req->cmd->nvme_cmd.cdw10_bits.abort.cid;
for (i = 0; i < tqpair->resource_count; i++) {
tcp_req_to_abort = &tqpair->reqs[i];
if (tcp_req_to_abort->state != TCP_REQUEST_STATE_FREE &&
tcp_req_to_abort->req.cmd->nvme_cmd.cid == cid) {
break;
}
}
if (tcp_req_to_abort == NULL) {
goto complete;
}
switch (tcp_req_to_abort->state) { switch (tcp_req_to_abort->state) {
case TCP_REQUEST_STATE_EXECUTING: case TCP_REQUEST_STATE_EXECUTING:
rc = nvmf_ctrlr_abort_request(req, &tcp_req_to_abort->req); rc = nvmf_ctrlr_abort_request(req, &tcp_req_to_abort->req);
if (rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS) { if (rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS) {
return; return SPDK_POLLER_BUSY;
} }
break; break;
@ -2518,12 +2506,52 @@ nvmf_tcp_qpair_abort_request(struct spdk_nvmf_qpair *qpair,
nvmf_tcp_req_set_abort_status(req, tcp_req_to_abort); nvmf_tcp_req_set_abort_status(req, tcp_req_to_abort);
break; break;
case TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER:
if (spdk_get_ticks() < req->timeout_tsc) {
req->poller = SPDK_POLLER_REGISTER(_nvmf_tcp_qpair_abort_request, req, 0);
return SPDK_POLLER_BUSY;
}
break;
default: default:
break; break;
} }
complete:
spdk_nvmf_request_complete(req); spdk_nvmf_request_complete(req);
return SPDK_POLLER_BUSY;
}
static void
nvmf_tcp_qpair_abort_request(struct spdk_nvmf_qpair *qpair,
struct spdk_nvmf_request *req)
{
struct spdk_nvmf_tcp_qpair *tqpair;
uint16_t cid;
uint32_t i;
struct spdk_nvmf_tcp_req *tcp_req_to_abort = NULL;
tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair);
cid = req->cmd->nvme_cmd.cdw10_bits.abort.cid;
for (i = 0; i < tqpair->resource_count; i++) {
tcp_req_to_abort = &tqpair->reqs[i];
if (tcp_req_to_abort->state != TCP_REQUEST_STATE_FREE &&
tcp_req_to_abort->req.cmd->nvme_cmd.cid == cid) {
break;
}
}
if (tcp_req_to_abort == NULL) {
spdk_nvmf_request_complete(req);
return;
}
req->req_to_abort = &tcp_req_to_abort->req;
req->timeout_tsc = spdk_get_ticks() + NVMF_TCP_ABORT_TIMEOUT_SEC * spdk_get_ticks_hz();
req->poller = NULL;
_nvmf_tcp_qpair_abort_request(req);
} }
#define SPDK_NVMF_TCP_DEFAULT_MAX_QUEUE_DEPTH 128 #define SPDK_NVMF_TCP_DEFAULT_MAX_QUEUE_DEPTH 128