nvme: Factor out operations done after disconnect qpair completes

This is a preparation to make nvme_transport_ctrlr_disconnect_qpair()
asynchronous.

For nvme_transport_ctrlr_disconnect_qpair(), factor out operations after
returning from transport's specific ctrlr_disconnect_qpair() into a helper
function nvme_transport_ctrlr_disconnect_qpair_done().

Then move nvme_transport_ctrlr_disconnect_qpair_done() into the end of
the transport specific ctrlr_disconnect_qpair().

Additionally remove the operation to overwrite the qpair state to
DISCONNECTED from nvme_transport_connect_qpair_fail() because
this is duplicated and nvme_transport_ctrlr_disconnect_qpair() is responsible
to make the qpair disconnected even after it completes asynchronously.

Change-Id: I9c8faa7039d306d3e31a8f51826755ce8840a8aa
Signed-off-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/10851
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
This commit is contained in:
Shuhei Matsumoto 2021-12-24 20:48:42 +09:00 committed by Tomasz Zawadzki
parent 0b32309bf6
commit cfe11bd1db
7 changed files with 16 additions and 2 deletions

View File

@ -3,7 +3,7 @@
*
* Copyright (c) Intel Corporation. All rights reserved.
* Copyright (c) 2020, 2021 Mellanox Technologies LTD. All rights reserved.
* Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -1515,6 +1515,7 @@ int nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr,
struct spdk_nvme_qpair *qpair);
void nvme_transport_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr,
struct spdk_nvme_qpair *qpair);
void nvme_transport_ctrlr_disconnect_qpair_done(struct spdk_nvme_qpair *qpair);
int nvme_transport_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
struct spdk_memory_domain **domains, int array_size);
void nvme_transport_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr);

View File

@ -602,6 +602,7 @@ nvme_pcie_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qp
void
nvme_pcie_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
{
nvme_transport_ctrlr_disconnect_qpair_done(qpair);
}
/* Used when dst points to MMIO (i.e. CMB) in a virtual machine - in these cases we must

View File

@ -1849,6 +1849,8 @@ nvme_rdma_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme
ibv_destroy_cq(rqpair->cq);
rqpair->cq = NULL;
}
nvme_transport_ctrlr_disconnect_qpair_done(qpair);
}
static void nvme_rdma_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr);

View File

@ -344,6 +344,8 @@ nvme_tcp_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_
*/
TAILQ_REMOVE(&tqpair->send_queue, pdu, tailq);
}
nvme_transport_ctrlr_disconnect_qpair_done(qpair);
}
static void nvme_tcp_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr);

View File

@ -465,7 +465,6 @@ nvme_transport_connect_qpair_fail(struct spdk_nvme_qpair *qpair, void *unused)
/* If the qpair was unable to reconnect, restore the original failure reason */
qpair->transport_failure_reason = qpair->last_transport_failure_reason;
nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED);
}
int
@ -535,7 +534,11 @@ nvme_transport_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk
}
transport->ops.ctrlr_disconnect_qpair(ctrlr, qpair);
}
void
nvme_transport_ctrlr_disconnect_qpair_done(struct spdk_nvme_qpair *qpair)
{
nvme_qpair_abort_all_queued_reqs(qpair, 0);
nvme_transport_qpair_abort_reqs(qpair, 0);
nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED);

View File

@ -3,6 +3,7 @@
*
* Copyright (c) Intel Corporation.
* All rights reserved.
* Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -81,6 +82,7 @@ DEFINE_STUB_V(nvme_qpair_deinit, (struct spdk_nvme_qpair *qpair));
DEFINE_STUB_V(spdk_nvme_transport_register, (const struct spdk_nvme_transport_ops *ops));
DEFINE_STUB(nvme_transport_ctrlr_connect_qpair, int, (struct spdk_nvme_ctrlr *ctrlr,
struct spdk_nvme_qpair *qpair), 0);
DEFINE_STUB_V(nvme_transport_ctrlr_disconnect_qpair_done, (struct spdk_nvme_qpair *qpair));
DEFINE_STUB(nvme_ctrlr_get_current_process, struct spdk_nvme_ctrlr_process *,
(struct spdk_nvme_ctrlr *ctrlr), (struct spdk_nvme_ctrlr_process *)(uintptr_t)0x1);
DEFINE_STUB(nvme_ctrlr_add_process, int, (struct spdk_nvme_ctrlr *ctrlr, void *devhandle), 0);

View File

@ -3,6 +3,7 @@
*
* Copyright (c) Intel Corporation.
* All rights reserved.
* Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -74,6 +75,8 @@ DEFINE_STUB(nvme_request_check_timeout, int, (struct nvme_request *req, uint16_t
struct spdk_nvme_ctrlr_process *active_proc, uint64_t now_tick), 0);
DEFINE_STUB(spdk_strerror, const char *, (int errnum), NULL);
DEFINE_STUB_V(nvme_transport_ctrlr_disconnect_qpair_done, (struct spdk_nvme_qpair *qpair));
int nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
struct spdk_nvme_ctrlr *ctrlr,
enum spdk_nvme_qprio qprio,