nvme: Separate spdk_nvme_ctrlr_disconnect() into disconnect and after it

Serparate spdk_nvme_ctrlr_disconnect() into nvme_ctrlr_disconnect()
and nvme_ctrlr_disconnect_done() to call nvme_ctrlr_disconnect_done()
after adminq is actually disconnected when disconnecting adminq
asynchronously.

The following patches will add a new flag is_disconnecting to struct
spdk_nvme_ctrlr and prevent us from setting ctrlr->is_failed to true
between nvme_ctrlr_disconnect() and nvme_ctrlr_disconnect_done().

By this patch, nvme_ctrlr_disconnect() and nvme_ctrlr_disconnect_done()
are executed in the same context. So it is not possible to set
ctrlr->is_failed to true between nvme_ctrlr_disconnect() and
nvme_ctrlr_disconnect_done().

Hence nvme_ctrlr_disconnect_done() does not have to clear ctrlr->is_failed
again.

Signed-off-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
Change-Id: I18b5b68f37e27b54782691823edae9738c26faa1
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/10999
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Shuhei Matsumoto 2022-01-08 08:21:48 +09:00 committed by Tomasz Zawadzki
parent 4a73675dbf
commit 21322e01dd

View File

@ -1622,12 +1622,11 @@ nvme_ctrlr_abort_queued_aborts(struct spdk_nvme_ctrlr *ctrlr)
}
}
int
spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr)
static int
nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr)
{
struct spdk_nvme_qpair *qpair;
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
ctrlr->prepare_for_reset = false;
if (ctrlr->is_resetting || ctrlr->is_removed) {
@ -1636,7 +1635,6 @@ spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr)
* immediately since there is no need to kick off another
* reset in these cases.
*/
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
return ctrlr->is_resetting ? -EBUSY : -ENXIO;
}
@ -1661,6 +1659,12 @@ spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr)
ctrlr->adminq->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL;
nvme_transport_ctrlr_disconnect_qpair(ctrlr, ctrlr->adminq);
return 0;
}
static void
nvme_ctrlr_disconnect_done(struct spdk_nvme_ctrlr *ctrlr)
{
/* Doorbell buffer config is invalid during reset */
nvme_ctrlr_free_doorbell_buffer(ctrlr);
@ -1668,9 +1672,22 @@ spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr)
nvme_ctrlr_free_iocs_specific_data(ctrlr);
spdk_bit_array_free(&ctrlr->free_io_qids);
}
int
spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr)
{
int rc;
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
rc = nvme_ctrlr_disconnect(ctrlr);
if (rc == 0) {
nvme_ctrlr_disconnect_done(ctrlr);
}
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
return 0;
return rc;
}
void
@ -1769,7 +1786,15 @@ spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
{
int rc;
rc = spdk_nvme_ctrlr_disconnect(ctrlr);
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
rc = nvme_ctrlr_disconnect(ctrlr);
if (rc == 0) {
nvme_ctrlr_disconnect_done(ctrlr);
}
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
if (rc != 0) {
if (rc == -EBUSY) {
rc = 0;