From 3911922005614e85b89353c3392b227b015d8e05 Mon Sep 17 00:00:00 2001 From: Seth Howell Date: Mon, 25 Nov 2019 15:33:21 -0700 Subject: [PATCH] nvme: remove redundant transport_qp_is_failed checks The qpair state transport_qpair_is_failed is actually equivalent to NVME_QPAIR_IS_CONNECTED in the qpair state machine. There are a couple of places where we check against transport_qp_is_failed and then immediately check to see if we are in the connected state. If we are failed, or we are not in the connected state we return the same value to the calling function. Since the checks for transport_qpair_is_failed are not necessary, they can be removed. As a result, there is no need to keep track of it and it can be removed from the qpair structure. Change-Id: I4aef5d20eb267bfd6118e5d1d088df05574d9ffd Signed-off-by: Seth Howell Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/475802 Tested-by: SPDK CI Jenkins Reviewed-by: Jim Harris Reviewed-by: Shuhei Matsumoto --- lib/nvme/nvme_ctrlr.c | 6 +----- lib/nvme/nvme_internal.h | 2 -- lib/nvme/nvme_qpair.c | 8 -------- lib/nvme/nvme_rdma.c | 11 +++++------ lib/nvme/nvme_tcp.c | 5 ++--- test/unit/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut.c | 4 ++-- test/unit/lib/nvme/nvme_qpair.c/nvme_qpair_ut.c | 4 ++-- 7 files changed, 12 insertions(+), 28 deletions(-) diff --git a/lib/nvme/nvme_ctrlr.c b/lib/nvme/nvme_ctrlr.c index 8d3e35237..4cac0e0dc 100644 --- a/lib/nvme/nvme_ctrlr.c +++ b/lib/nvme/nvme_ctrlr.c @@ -408,7 +408,7 @@ spdk_nvme_ctrlr_reconnect_io_qpair(struct spdk_nvme_qpair *qpair) goto out; } - if (!qpair->transport_qp_is_failed) { + if (!nvme_qpair_state_equals(qpair, NVME_QPAIR_DISABLED)) { rc = 0; goto out; } @@ -419,12 +419,10 @@ spdk_nvme_ctrlr_reconnect_io_qpair(struct spdk_nvme_qpair *qpair) rc = nvme_transport_ctrlr_connect_qpair(ctrlr, qpair); if (rc) { nvme_qpair_set_state(qpair, NVME_QPAIR_DISABLED); - qpair->transport_qp_is_failed = true; rc = -EAGAIN; goto out; } nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTED); - qpair->transport_qp_is_failed = false; out: nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); @@ -1079,7 +1077,6 @@ spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr) /* Disable all queues before disabling the controller hardware. */ TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) { nvme_qpair_set_state(qpair, NVME_QPAIR_DISABLED); - qpair->transport_qp_is_failed = true; } nvme_qpair_set_state(ctrlr->adminq, NVME_QPAIR_DISABLED); nvme_qpair_complete_error_reqs(ctrlr->adminq); @@ -1124,7 +1121,6 @@ spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr) continue; } nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTED); - qpair->transport_qp_is_failed = false; } } diff --git a/lib/nvme/nvme_internal.h b/lib/nvme/nvme_internal.h index 83a5e6244..f71510f3a 100644 --- a/lib/nvme/nvme_internal.h +++ b/lib/nvme/nvme_internal.h @@ -356,8 +356,6 @@ struct spdk_nvme_qpair { uint8_t in_completion_context : 1; uint8_t delete_after_completion_context: 1; - uint8_t transport_qp_is_failed: 1; - /* * Set when no deletion notification is needed. For example, the process * which allocated this qpair exited unexpectedly. diff --git a/lib/nvme/nvme_qpair.c b/lib/nvme/nvme_qpair.c index ec743b78a..ff268b11e 100644 --- a/lib/nvme/nvme_qpair.c +++ b/lib/nvme/nvme_qpair.c @@ -448,10 +448,6 @@ spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_ return -ENXIO; } - if (spdk_unlikely(qpair->transport_qp_is_failed == true)) { - return -ENXIO; - } - if (spdk_unlikely(!nvme_qpair_check_enabled(qpair) && !nvme_qpair_state_equals(qpair, NVME_QPAIR_CONNECTING))) { /* @@ -708,10 +704,6 @@ nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *re return 0; } - if (spdk_unlikely(qpair->transport_qp_is_failed == true)) { - return -ENXIO; - } - rc = _nvme_qpair_submit_request(qpair, req); if (rc == -EAGAIN) { STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq); diff --git a/lib/nvme/nvme_rdma.c b/lib/nvme/nvme_rdma.c index f64f384c5..11b6b4994 100644 --- a/lib/nvme/nvme_rdma.c +++ b/lib/nvme/nvme_rdma.c @@ -288,13 +288,13 @@ nvme_rdma_qpair_process_cm_event(struct nvme_rdma_qpair *rqpair) break; case RDMA_CM_EVENT_DISCONNECTED: case RDMA_CM_EVENT_DEVICE_REMOVAL: - rqpair->qpair.transport_qp_is_failed = true; + nvme_qpair_set_state(&rqpair->qpair, NVME_QPAIR_DISABLED); break; case RDMA_CM_EVENT_MULTICAST_JOIN: case RDMA_CM_EVENT_MULTICAST_ERROR: break; case RDMA_CM_EVENT_ADDR_CHANGE: - rqpair->qpair.transport_qp_is_failed = true; + nvme_qpair_set_state(&rqpair->qpair, NVME_QPAIR_DISABLED); break; case RDMA_CM_EVENT_TIMEWAIT_EXIT: break; @@ -1058,10 +1058,9 @@ nvme_rdma_qpair_connect(struct nvme_rdma_qpair *rqpair) return -1; } - rqpair->qpair.transport_qp_is_failed = false; rc = nvme_fabric_qpair_connect(&rqpair->qpair, rqpair->num_entries); if (rc < 0) { - rqpair->qpair.transport_qp_is_failed = true; + nvme_qpair_set_state(&rqpair->qpair, NVME_QPAIR_DISABLED); SPDK_ERRLOG("Failed to send an NVMe-oF Fabric CONNECT command\n"); return -1; } @@ -1514,7 +1513,7 @@ nvme_rdma_qpair_disconnect(struct spdk_nvme_qpair *qpair) { struct nvme_rdma_qpair *rqpair = nvme_rdma_qpair(qpair); - qpair->transport_qp_is_failed = true; + nvme_qpair_set_state(qpair, NVME_QPAIR_DISABLED); nvme_rdma_unregister_mem(rqpair); nvme_rdma_unregister_reqs(rqpair); nvme_rdma_unregister_rsps(rqpair); @@ -1895,7 +1894,7 @@ nvme_rdma_qpair_process_completions(struct spdk_nvme_qpair *qpair, } nvme_rdma_qpair_process_cm_event(rqpair); - if (spdk_unlikely(qpair->transport_qp_is_failed)) { + if (spdk_unlikely(nvme_qpair_state_equals(qpair, NVME_QPAIR_DISABLED))) { goto fail; } diff --git a/lib/nvme/nvme_tcp.c b/lib/nvme/nvme_tcp.c index c9fe52a2e..207451b96 100644 --- a/lib/nvme/nvme_tcp.c +++ b/lib/nvme/nvme_tcp.c @@ -235,7 +235,7 @@ nvme_tcp_qpair_disconnect(struct spdk_nvme_qpair *qpair) struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair); struct nvme_tcp_pdu *pdu; - qpair->transport_qp_is_failed = true; + nvme_qpair_set_state(qpair, NVME_QPAIR_DISABLED); spdk_sock_close(&tqpair->sock); /* clear the send_queue */ @@ -1624,10 +1624,9 @@ nvme_tcp_qpair_connect(struct nvme_tcp_qpair *tqpair) return -1; } - tqpair->qpair.transport_qp_is_failed = false; rc = nvme_fabric_qpair_connect(&tqpair->qpair, tqpair->num_entries); if (rc < 0) { - tqpair->qpair.transport_qp_is_failed = true; + nvme_qpair_set_state(&tqpair->qpair, NVME_QPAIR_DISABLED); SPDK_ERRLOG("Failed to send an NVMe-oF Fabric CONNECT command\n"); return -1; } diff --git a/test/unit/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut.c b/test/unit/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut.c index 3f7a9df41..53446c32f 100644 --- a/test/unit/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut.c +++ b/test/unit/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut.c @@ -1468,14 +1468,14 @@ test_spdk_nvme_ctrlr_reconnect_io_qpair(void) /* qpair not failed. Make sure we don't call down to the transport */ ctrlr.is_failed = 0; - qpair.transport_qp_is_failed = false; + qpair.state = NVME_QPAIR_CONNECTED; g_connect_qpair_called = false; rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair); CU_ASSERT(g_connect_qpair_called == false); CU_ASSERT(rc == 0) /* transport qpair is failed. make sure we call down to the transport */ - qpair.transport_qp_is_failed = true; + qpair.state = NVME_QPAIR_DISABLED; rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair); CU_ASSERT(g_connect_qpair_called == true); CU_ASSERT(rc == 0) diff --git a/test/unit/lib/nvme/nvme_qpair.c/nvme_qpair_ut.c b/test/unit/lib/nvme/nvme_qpair.c/nvme_qpair_ut.c index e1be64a7c..c52f55038 100644 --- a/test/unit/lib/nvme/nvme_qpair.c/nvme_qpair_ut.c +++ b/test/unit/lib/nvme/nvme_qpair.c/nvme_qpair_ut.c @@ -218,7 +218,7 @@ static void test_nvme_qpair_process_completions(void) /* Same if the qpair is failed at the transport layer. */ ctrlr.is_failed = false; ctrlr.is_removed = false; - qpair.transport_qp_is_failed = true; + qpair.state = NVME_QPAIR_DISABLED; rc = spdk_nvme_qpair_process_completions(&qpair, 0); CU_ASSERT(rc == -ENXIO); CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req)); @@ -228,7 +228,7 @@ static void test_nvme_qpair_process_completions(void) /* If the controller is removed, make sure we abort the requests. */ ctrlr.is_failed = true; ctrlr.is_removed = true; - qpair.transport_qp_is_failed = false; + qpair.state = NVME_QPAIR_CONNECTED; rc = spdk_nvme_qpair_process_completions(&qpair, 0); CU_ASSERT(rc == -ENXIO); CU_ASSERT(STAILQ_EMPTY(&qpair.queued_req));