lib/nvme: rename NVME_QPAIR_DISABLED
This variable really indicates when a qpair is no longer connected. So NVME_QPAIR_DISCONNECTED is actually much more accurate. Signed-off-by: Seth Howell <seth.howell@intel.com> Change-Id: Ia480d94f795bb0d8f5b4eff9f2857d6fe8ea1b34 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1850 Community-CI: Mellanox Build Bot Community-CI: Broadcom CI Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
parent
296303037d
commit
9649ee09fa
@ -360,7 +360,7 @@ spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme
|
|||||||
{
|
{
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
if (nvme_qpair_get_state(qpair) != NVME_QPAIR_DISABLED) {
|
if (nvme_qpair_get_state(qpair) != NVME_QPAIR_DISCONNECTED) {
|
||||||
return -EISCONN;
|
return -EISCONN;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -467,7 +467,7 @@ spdk_nvme_ctrlr_reconnect_io_qpair(struct spdk_nvme_qpair *qpair)
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (nvme_qpair_get_state(qpair) != NVME_QPAIR_DISABLED) {
|
if (nvme_qpair_get_state(qpair) != NVME_QPAIR_DISCONNECTED) {
|
||||||
rc = 0;
|
rc = 0;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
@ -1165,9 +1165,9 @@ spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
|
|||||||
/* Disable all queues before disabling the controller hardware. */
|
/* Disable all queues before disabling the controller hardware. */
|
||||||
TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) {
|
TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) {
|
||||||
qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL;
|
qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL;
|
||||||
nvme_qpair_set_state(qpair, NVME_QPAIR_DISABLED);
|
nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED);
|
||||||
}
|
}
|
||||||
nvme_qpair_set_state(ctrlr->adminq, NVME_QPAIR_DISABLED);
|
nvme_qpair_set_state(ctrlr->adminq, NVME_QPAIR_DISCONNECTED);
|
||||||
nvme_qpair_complete_error_reqs(ctrlr->adminq);
|
nvme_qpair_complete_error_reqs(ctrlr->adminq);
|
||||||
nvme_transport_qpair_abort_reqs(ctrlr->adminq, 0 /* retry */);
|
nvme_transport_qpair_abort_reqs(ctrlr->adminq, 0 /* retry */);
|
||||||
ctrlr->adminq->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL;
|
ctrlr->adminq->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL;
|
||||||
|
@ -356,7 +356,7 @@ struct nvme_async_event_request {
|
|||||||
};
|
};
|
||||||
|
|
||||||
enum nvme_qpair_state {
|
enum nvme_qpair_state {
|
||||||
NVME_QPAIR_DISABLED,
|
NVME_QPAIR_DISCONNECTED,
|
||||||
NVME_QPAIR_CONNECTING,
|
NVME_QPAIR_CONNECTING,
|
||||||
NVME_QPAIR_CONNECTED,
|
NVME_QPAIR_CONNECTED,
|
||||||
NVME_QPAIR_ENABLING,
|
NVME_QPAIR_ENABLING,
|
||||||
|
@ -56,7 +56,7 @@ spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group, struct spdk_nvme_qp
|
|||||||
struct spdk_nvme_transport_poll_group *tgroup;
|
struct spdk_nvme_transport_poll_group *tgroup;
|
||||||
const struct spdk_nvme_transport *transport;
|
const struct spdk_nvme_transport *transport;
|
||||||
|
|
||||||
if (nvme_qpair_get_state(qpair) != NVME_QPAIR_DISABLED) {
|
if (nvme_qpair_get_state(qpair) != NVME_QPAIR_DISCONNECTED) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -330,18 +330,18 @@ nvme_rdma_qpair_process_cm_event(struct nvme_rdma_qpair *rqpair)
|
|||||||
break;
|
break;
|
||||||
case RDMA_CM_EVENT_DISCONNECTED:
|
case RDMA_CM_EVENT_DISCONNECTED:
|
||||||
rqpair->qpair.transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_REMOTE;
|
rqpair->qpair.transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_REMOTE;
|
||||||
nvme_qpair_set_state(&rqpair->qpair, NVME_QPAIR_DISABLED);
|
nvme_qpair_set_state(&rqpair->qpair, NVME_QPAIR_DISCONNECTED);
|
||||||
break;
|
break;
|
||||||
case RDMA_CM_EVENT_DEVICE_REMOVAL:
|
case RDMA_CM_EVENT_DEVICE_REMOVAL:
|
||||||
rqpair->qpair.transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL;
|
rqpair->qpair.transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL;
|
||||||
nvme_qpair_set_state(&rqpair->qpair, NVME_QPAIR_DISABLED);
|
nvme_qpair_set_state(&rqpair->qpair, NVME_QPAIR_DISCONNECTED);
|
||||||
break;
|
break;
|
||||||
case RDMA_CM_EVENT_MULTICAST_JOIN:
|
case RDMA_CM_EVENT_MULTICAST_JOIN:
|
||||||
case RDMA_CM_EVENT_MULTICAST_ERROR:
|
case RDMA_CM_EVENT_MULTICAST_ERROR:
|
||||||
break;
|
break;
|
||||||
case RDMA_CM_EVENT_ADDR_CHANGE:
|
case RDMA_CM_EVENT_ADDR_CHANGE:
|
||||||
rqpair->qpair.transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL;
|
rqpair->qpair.transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL;
|
||||||
nvme_qpair_set_state(&rqpair->qpair, NVME_QPAIR_DISABLED);
|
nvme_qpair_set_state(&rqpair->qpair, NVME_QPAIR_DISCONNECTED);
|
||||||
break;
|
break;
|
||||||
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
|
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
|
||||||
break;
|
break;
|
||||||
@ -1631,7 +1631,7 @@ nvme_rdma_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme
|
|||||||
struct nvme_rdma_ctrlr *rctrlr;
|
struct nvme_rdma_ctrlr *rctrlr;
|
||||||
struct nvme_rdma_cm_event_entry *entry, *tmp;
|
struct nvme_rdma_cm_event_entry *entry, *tmp;
|
||||||
|
|
||||||
nvme_qpair_set_state(qpair, NVME_QPAIR_DISABLED);
|
nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED);
|
||||||
nvme_rdma_unregister_mem(rqpair);
|
nvme_rdma_unregister_mem(rqpair);
|
||||||
nvme_rdma_unregister_reqs(rqpair);
|
nvme_rdma_unregister_reqs(rqpair);
|
||||||
nvme_rdma_unregister_rsps(rqpair);
|
nvme_rdma_unregister_rsps(rqpair);
|
||||||
@ -2007,7 +2007,7 @@ nvme_rdma_qpair_process_completions(struct spdk_nvme_qpair *qpair,
|
|||||||
}
|
}
|
||||||
nvme_rdma_qpair_process_cm_event(rqpair);
|
nvme_rdma_qpair_process_cm_event(rqpair);
|
||||||
|
|
||||||
if (spdk_unlikely(nvme_qpair_get_state(qpair) == NVME_QPAIR_DISABLED)) {
|
if (spdk_unlikely(nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTED)) {
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -235,12 +235,12 @@ nvme_tcp_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_
|
|||||||
struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
|
struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
|
||||||
struct nvme_tcp_pdu *pdu;
|
struct nvme_tcp_pdu *pdu;
|
||||||
|
|
||||||
if (nvme_qpair_get_state(qpair) == NVME_QPAIR_DISABLED) {
|
if (nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTED) {
|
||||||
/* Already disconnecting */
|
/* Already disconnecting */
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
nvme_qpair_set_state(qpair, NVME_QPAIR_DISABLED);
|
nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED);
|
||||||
spdk_sock_close(&tqpair->sock);
|
spdk_sock_close(&tqpair->sock);
|
||||||
|
|
||||||
/* clear the send_queue */
|
/* clear the send_queue */
|
||||||
|
@ -305,7 +305,7 @@ nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nv
|
|||||||
|
|
||||||
err:
|
err:
|
||||||
nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
|
nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
|
||||||
nvme_qpair_set_state(qpair, NVME_QPAIR_DISABLED);
|
nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1481,7 +1481,7 @@ test_spdk_nvme_ctrlr_reconnect_io_qpair(void)
|
|||||||
CU_ASSERT(rc == 0)
|
CU_ASSERT(rc == 0)
|
||||||
|
|
||||||
/* transport qpair is failed. make sure we call down to the transport */
|
/* transport qpair is failed. make sure we call down to the transport */
|
||||||
qpair.state = NVME_QPAIR_DISABLED;
|
qpair.state = NVME_QPAIR_DISCONNECTED;
|
||||||
rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
|
rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
|
||||||
CU_ASSERT(g_connect_qpair_called == true);
|
CU_ASSERT(g_connect_qpair_called == true);
|
||||||
CU_ASSERT(rc == 0)
|
CU_ASSERT(rc == 0)
|
||||||
|
@ -251,7 +251,7 @@ test_spdk_nvme_poll_group_add_remove(void)
|
|||||||
|
|
||||||
/* Add qpairs to a single transport. */
|
/* Add qpairs to a single transport. */
|
||||||
qpair1_1.transport = &t1;
|
qpair1_1.transport = &t1;
|
||||||
qpair1_1.state = NVME_QPAIR_DISABLED;
|
qpair1_1.state = NVME_QPAIR_DISCONNECTED;
|
||||||
qpair1_2.transport = &t1;
|
qpair1_2.transport = &t1;
|
||||||
qpair1_2.state = NVME_QPAIR_ENABLED;
|
qpair1_2.state = NVME_QPAIR_ENABLED;
|
||||||
CU_ASSERT(spdk_nvme_poll_group_add(group, &qpair1_1) == 0);
|
CU_ASSERT(spdk_nvme_poll_group_add(group, &qpair1_1) == 0);
|
||||||
@ -388,7 +388,7 @@ test_spdk_nvme_poll_group_process_completions(void)
|
|||||||
/* try it with three transport poll groups. */
|
/* try it with three transport poll groups. */
|
||||||
group = spdk_nvme_poll_group_create(NULL);
|
group = spdk_nvme_poll_group_create(NULL);
|
||||||
SPDK_CU_ASSERT_FATAL(group != NULL);
|
SPDK_CU_ASSERT_FATAL(group != NULL);
|
||||||
qpair1_1.state = NVME_QPAIR_DISABLED;
|
qpair1_1.state = NVME_QPAIR_DISCONNECTED;
|
||||||
qpair1_1.transport = &t1;
|
qpair1_1.transport = &t1;
|
||||||
CU_ASSERT(spdk_nvme_poll_group_add(group, &qpair1_1) == 0);
|
CU_ASSERT(spdk_nvme_poll_group_add(group, &qpair1_1) == 0);
|
||||||
qpair1_1.state = NVME_QPAIR_ENABLED;
|
qpair1_1.state = NVME_QPAIR_ENABLED;
|
||||||
|
@ -204,7 +204,7 @@ static void test_nvme_qpair_process_completions(void)
|
|||||||
/* Same if the qpair is failed at the transport layer. */
|
/* Same if the qpair is failed at the transport layer. */
|
||||||
ctrlr.is_failed = false;
|
ctrlr.is_failed = false;
|
||||||
ctrlr.is_removed = false;
|
ctrlr.is_removed = false;
|
||||||
qpair.state = NVME_QPAIR_DISABLED;
|
qpair.state = NVME_QPAIR_DISCONNECTED;
|
||||||
rc = spdk_nvme_qpair_process_completions(&qpair, 0);
|
rc = spdk_nvme_qpair_process_completions(&qpair, 0);
|
||||||
CU_ASSERT(rc == -ENXIO);
|
CU_ASSERT(rc == -ENXIO);
|
||||||
CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req));
|
CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req));
|
||||||
|
Loading…
Reference in New Issue
Block a user