lib/nvme: Abort queued requests whose cb_arg matches

Use another list dedicated to hold queued requests being aborted
to avoid potential infinite recursive calls.

Add a helper function nvme_qpair_abort_queued_req() to move requests
whose cb_arg matches from qpair->queued_req to qpair->aborted_queued_req.
Then nvme_qpair_resubmit_requests() aborts all requests in
qpair->aborted_queued_req.

The first idea was that nvme_qpair_abort_queued_req() aborts queued
requests directly. However, this caused infinite recursive calls.
Hence separate requesting abort to queued requests and actually
aborting queued requests.

The detail of the infinite recursive calls is as follows:

Some SPDK tool submits the next request from the callback to the completion
of a request in the completion polling loop. For such tool, if the callback
submits a request and then aborts the request immediately, and the request
could not be submitted but queued, it will create infinite recursive calls
by request submit and abort, and it will not be able to get out of
completion polling loop.

Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: I8196182b981bc52dee2074d7642498a5d6ef97d4
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/2891
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Mellanox Build Bot
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Shuhei Matsumoto 2020-06-23 05:25:02 +09:00 committed by Tomasz Zawadzki
parent e137881e4e
commit 576a373d58
2 changed files with 45 additions and 0 deletions

View File

@ -406,6 +406,7 @@ struct spdk_nvme_qpair {
STAILQ_HEAD(, nvme_request) free_req; STAILQ_HEAD(, nvme_request) free_req;
STAILQ_HEAD(, nvme_request) queued_req; STAILQ_HEAD(, nvme_request) queued_req;
STAILQ_HEAD(, nvme_request) aborting_queued_req;
/* List entry for spdk_nvme_transport_poll_group::qpairs */ /* List entry for spdk_nvme_transport_poll_group::qpairs */
STAILQ_ENTRY(spdk_nvme_qpair) poll_group_stailq; STAILQ_ENTRY(spdk_nvme_qpair) poll_group_stailq;
@ -933,6 +934,7 @@ void nvme_qpair_complete_error_reqs(struct spdk_nvme_qpair *qpair);
int nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, int nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair,
struct nvme_request *req); struct nvme_request *req);
void nvme_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr); void nvme_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr);
uint32_t nvme_qpair_abort_queued_reqs(struct spdk_nvme_qpair *qpair, void *cmd_cb_arg);
void nvme_qpair_resubmit_requests(struct spdk_nvme_qpair *qpair, uint32_t num_requests); void nvme_qpair_resubmit_requests(struct spdk_nvme_qpair *qpair, uint32_t num_requests);
int nvme_ctrlr_identify_active_ns(struct spdk_nvme_ctrlr *ctrlr); int nvme_ctrlr_identify_active_ns(struct spdk_nvme_ctrlr *ctrlr);

View File

@ -557,6 +557,44 @@ _nvme_qpair_abort_queued_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr)
} }
} }
/* The callback to a request may submit the next request which is queued and
* then the same callback may abort it immediately. This repetition may cause
* infinite recursive calls. Hence move aborting requests to another list here
* and abort them later at resubmission.
*/
static void
_nvme_qpair_complete_abort_queued_reqs(struct spdk_nvme_qpair *qpair)
{
struct nvme_request *req;
while (!STAILQ_EMPTY(&qpair->aborting_queued_req)) {
req = STAILQ_FIRST(&qpair->aborting_queued_req);
STAILQ_REMOVE_HEAD(&qpair->aborting_queued_req, stailq);
nvme_qpair_manual_complete_request(qpair, req, SPDK_NVME_SCT_GENERIC,
SPDK_NVME_SC_ABORTED_BY_REQUEST, 1, true);
}
}
uint32_t
nvme_qpair_abort_queued_reqs(struct spdk_nvme_qpair *qpair, void *cmd_cb_arg)
{
struct nvme_request *req, *tmp;
uint32_t aborting = 0;
STAILQ_FOREACH_SAFE(req, &qpair->queued_req, stailq, tmp) {
if (req->cb_arg == cmd_cb_arg) {
STAILQ_REMOVE(&qpair->queued_req, req, nvme_request, stailq);
STAILQ_INSERT_TAIL(&qpair->aborting_queued_req, req, stailq);
if (!qpair->ctrlr->opts.disable_error_logging) {
SPDK_ERRLOG("aborting queued i/o\n");
}
aborting++;
}
}
return aborting;
}
static inline bool static inline bool
nvme_qpair_check_enabled(struct spdk_nvme_qpair *qpair) nvme_qpair_check_enabled(struct spdk_nvme_qpair *qpair)
{ {
@ -629,6 +667,8 @@ nvme_qpair_resubmit_requests(struct spdk_nvme_qpair *qpair, uint32_t num_request
break; break;
} }
} }
_nvme_qpair_complete_abort_queued_reqs(qpair);
} }
int32_t int32_t
@ -720,6 +760,7 @@ nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
STAILQ_INIT(&qpair->free_req); STAILQ_INIT(&qpair->free_req);
STAILQ_INIT(&qpair->queued_req); STAILQ_INIT(&qpair->queued_req);
STAILQ_INIT(&qpair->aborting_queued_req);
TAILQ_INIT(&qpair->err_cmd_head); TAILQ_INIT(&qpair->err_cmd_head);
STAILQ_INIT(&qpair->err_req_head); STAILQ_INIT(&qpair->err_req_head);
@ -763,6 +804,7 @@ nvme_qpair_deinit(struct spdk_nvme_qpair *qpair)
struct nvme_error_cmd *cmd, *entry; struct nvme_error_cmd *cmd, *entry;
_nvme_qpair_abort_queued_reqs(qpair, 1); _nvme_qpair_abort_queued_reqs(qpair, 1);
_nvme_qpair_complete_abort_queued_reqs(qpair);
nvme_qpair_complete_error_reqs(qpair); nvme_qpair_complete_error_reqs(qpair);
TAILQ_FOREACH_SAFE(cmd, &qpair->err_cmd_head, link, entry) { TAILQ_FOREACH_SAFE(cmd, &qpair->err_cmd_head, link, entry) {
@ -956,6 +998,7 @@ nvme_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr)
{ {
nvme_qpair_complete_error_reqs(qpair); nvme_qpair_complete_error_reqs(qpair);
_nvme_qpair_abort_queued_reqs(qpair, dnr); _nvme_qpair_abort_queued_reqs(qpair, dnr);
_nvme_qpair_complete_abort_queued_reqs(qpair);
nvme_transport_qpair_abort_reqs(qpair, dnr); nvme_transport_qpair_abort_reqs(qpair, dnr);
} }