lib/nvme: abort all requests when disconnecting a qpair.
By aborting all requests from every qpair when it is disconnected, we can completely avoid having to abort requests when we enable the qpair since nothing will be left enabled. Signed-off-by: Seth Howell <seth.howell@intel.com> Change-Id: Iba3bd866405dd182b72285def0843c9809f6500e Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1788 Community-CI: Mellanox Build Bot Community-CI: Broadcom CI Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
parent
b2a93a320d
commit
6189c0ceb7
@ -1164,8 +1164,7 @@ spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
|
||||
TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) {
|
||||
qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL;
|
||||
}
|
||||
nvme_qpair_complete_error_reqs(ctrlr->adminq);
|
||||
nvme_transport_qpair_abort_reqs(ctrlr->adminq, 0 /* retry */);
|
||||
|
||||
ctrlr->adminq->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL;
|
||||
nvme_transport_ctrlr_disconnect_qpair(ctrlr, ctrlr->adminq);
|
||||
if (nvme_transport_ctrlr_connect_qpair(ctrlr, ctrlr->adminq) != 0) {
|
||||
|
@ -417,8 +417,14 @@ nvme_qpair_check_enabled(struct spdk_nvme_qpair *qpair)
|
||||
*/
|
||||
if (nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTED && !qpair->ctrlr->is_resetting) {
|
||||
nvme_qpair_set_state(qpair, NVME_QPAIR_ENABLING);
|
||||
nvme_qpair_complete_error_reqs(qpair);
|
||||
nvme_transport_qpair_abort_reqs(qpair, 0 /* retry */);
|
||||
/*
|
||||
* PCIe is special, for fabrics transports, we can abort requests before disconnect during reset
|
||||
* but we have historically not disconnected pcie qpairs during reset so we have to abort requests
|
||||
* here.
|
||||
*/
|
||||
if (qpair->ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
|
||||
nvme_qpair_abort_reqs(qpair, 0);
|
||||
}
|
||||
nvme_qpair_set_state(qpair, NVME_QPAIR_ENABLED);
|
||||
while (!STAILQ_EMPTY(&qpair->queued_req)) {
|
||||
req = STAILQ_FIRST(&qpair->queued_req);
|
||||
@ -776,6 +782,7 @@ nvme_qpair_resubmit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *
|
||||
void
|
||||
nvme_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr)
|
||||
{
|
||||
nvme_qpair_complete_error_reqs(qpair);
|
||||
nvme_qpair_abort_queued_reqs(qpair, dnr);
|
||||
nvme_transport_qpair_abort_reqs(qpair, dnr);
|
||||
}
|
||||
|
@ -2086,7 +2086,7 @@ nvme_rdma_qpair_process_completions(struct spdk_nvme_qpair *qpair,
|
||||
fail:
|
||||
/*
|
||||
* Since admin queues take the ctrlr_lock before entering this function,
|
||||
* we can call nvme_rdma_ctrlr_disconnect_qpair. For other qpairs we need
|
||||
* we can call nvme_transport_ctrlr_disconnect_qpair. For other qpairs we need
|
||||
* to call the generic function which will take the lock for us.
|
||||
*/
|
||||
if (rc == IBV_WC_RETRY_EXC_ERR) {
|
||||
|
@ -1426,7 +1426,7 @@ fail:
|
||||
|
||||
/*
|
||||
* Since admin queues take the ctrlr_lock before entering this function,
|
||||
* we can call nvme_tcp_ctrlr_disconnect_qpair. For other qpairs we need
|
||||
* we can call nvme_transport_ctrlr_disconnect_qpair. For other qpairs we need
|
||||
* to call the generic function which will take the lock for us.
|
||||
*/
|
||||
qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
|
||||
|
@ -330,7 +330,10 @@ nvme_transport_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk
|
||||
if (qpair->poll_group) {
|
||||
nvme_poll_group_deactivate_qpair(qpair);
|
||||
}
|
||||
|
||||
transport->ops.ctrlr_disconnect_qpair(ctrlr, qpair);
|
||||
|
||||
nvme_qpair_abort_reqs(qpair, 0);
|
||||
nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user