lib/nvme: handle qpair state in transport layer.

The state should be changed and checked by the transport
layer. All transports should follow the same list of steps
when disconnecting/reconnecting.

Signed-off-by: Seth Howell <seth.howell@intel.com>
Change-Id: If2647624345f2c70f78a20bba4e2206d2762f120
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1853
Community-CI: Mellanox Build Bot
Community-CI: Broadcom CI
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Seth Howell 2020-04-14 15:02:43 -07:00 committed by Jim Harris
parent e1c9185005
commit 6338af34fc
5 changed files with 16 additions and 17 deletions

View File

@ -472,9 +472,6 @@ spdk_nvme_ctrlr_reconnect_io_qpair(struct spdk_nvme_qpair *qpair)
goto out; goto out;
} }
/* We have to confirm that any old memory is cleaned up. */
nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
rc = nvme_transport_ctrlr_connect_qpair(ctrlr, qpair); rc = nvme_transport_ctrlr_connect_qpair(ctrlr, qpair);
if (rc) { if (rc) {
rc = -EAGAIN; rc = -EAGAIN;
@ -1165,9 +1162,7 @@ spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
/* Disable all queues before disabling the controller hardware. */ /* Disable all queues before disabling the controller hardware. */
TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) { TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) {
qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL; qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL;
nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED);
} }
nvme_qpair_set_state(ctrlr->adminq, NVME_QPAIR_DISCONNECTED);
nvme_qpair_complete_error_reqs(ctrlr->adminq); nvme_qpair_complete_error_reqs(ctrlr->adminq);
nvme_transport_qpair_abort_reqs(ctrlr->adminq, 0 /* retry */); nvme_transport_qpair_abort_reqs(ctrlr->adminq, 0 /* retry */);
ctrlr->adminq->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL; ctrlr->adminq->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL;

View File

@ -357,6 +357,7 @@ struct nvme_async_event_request {
enum nvme_qpair_state { enum nvme_qpair_state {
NVME_QPAIR_DISCONNECTED, NVME_QPAIR_DISCONNECTED,
NVME_QPAIR_DISCONNECTING,
NVME_QPAIR_CONNECTING, NVME_QPAIR_CONNECTING,
NVME_QPAIR_CONNECTED, NVME_QPAIR_CONNECTED,
NVME_QPAIR_ENABLING, NVME_QPAIR_ENABLING,

View File

@ -330,18 +330,15 @@ nvme_rdma_qpair_process_cm_event(struct nvme_rdma_qpair *rqpair)
break; break;
case RDMA_CM_EVENT_DISCONNECTED: case RDMA_CM_EVENT_DISCONNECTED:
rqpair->qpair.transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_REMOTE; rqpair->qpair.transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_REMOTE;
nvme_qpair_set_state(&rqpair->qpair, NVME_QPAIR_DISCONNECTED);
break; break;
case RDMA_CM_EVENT_DEVICE_REMOVAL: case RDMA_CM_EVENT_DEVICE_REMOVAL:
rqpair->qpair.transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL; rqpair->qpair.transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL;
nvme_qpair_set_state(&rqpair->qpair, NVME_QPAIR_DISCONNECTED);
break; break;
case RDMA_CM_EVENT_MULTICAST_JOIN: case RDMA_CM_EVENT_MULTICAST_JOIN:
case RDMA_CM_EVENT_MULTICAST_ERROR: case RDMA_CM_EVENT_MULTICAST_ERROR:
break; break;
case RDMA_CM_EVENT_ADDR_CHANGE: case RDMA_CM_EVENT_ADDR_CHANGE:
rqpair->qpair.transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL; rqpair->qpair.transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL;
nvme_qpair_set_state(&rqpair->qpair, NVME_QPAIR_DISCONNECTED);
break; break;
case RDMA_CM_EVENT_TIMEWAIT_EXIT: case RDMA_CM_EVENT_TIMEWAIT_EXIT:
break; break;
@ -1631,7 +1628,6 @@ nvme_rdma_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme
struct nvme_rdma_ctrlr *rctrlr; struct nvme_rdma_ctrlr *rctrlr;
struct nvme_rdma_cm_event_entry *entry, *tmp; struct nvme_rdma_cm_event_entry *entry, *tmp;
nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED);
nvme_rdma_unregister_mem(rqpair); nvme_rdma_unregister_mem(rqpair);
nvme_rdma_unregister_reqs(rqpair); nvme_rdma_unregister_reqs(rqpair);
nvme_rdma_unregister_rsps(rqpair); nvme_rdma_unregister_rsps(rqpair);
@ -2007,7 +2003,7 @@ nvme_rdma_qpair_process_completions(struct spdk_nvme_qpair *qpair,
} }
nvme_rdma_qpair_process_cm_event(rqpair); nvme_rdma_qpair_process_cm_event(rqpair);
if (spdk_unlikely(nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTED)) { if (spdk_unlikely(qpair->transport_failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE)) {
goto fail; goto fail;
} }

View File

@ -235,12 +235,6 @@ nvme_tcp_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_
struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair); struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
struct nvme_tcp_pdu *pdu; struct nvme_tcp_pdu *pdu;
if (nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTED) {
/* Already disconnecting */
return;
}
nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED);
spdk_sock_close(&tqpair->sock); spdk_sock_close(&tqpair->sock);
/* clear the send_queue */ /* clear the send_queue */

View File

@ -280,12 +280,17 @@ int
nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair) nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
{ {
const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
uint8_t transport_failure_reason;
int rc; int rc;
assert(transport != NULL); assert(transport != NULL);
if (!nvme_qpair_is_admin_queue(qpair)) { if (!nvme_qpair_is_admin_queue(qpair)) {
qpair->transport = transport; qpair->transport = transport;
} }
transport_failure_reason = qpair->transport_failure_reason;
qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE;
nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTING); nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTING);
rc = transport->ops.ctrlr_connect_qpair(ctrlr, qpair); rc = transport->ops.ctrlr_connect_qpair(ctrlr, qpair);
if (rc != 0) { if (rc != 0) {
@ -300,10 +305,11 @@ nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nv
} }
} }
qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE;
return rc; return rc;
err: err:
/* If the qpair was unable to reconnect, restore the original failure reason. */
qpair->transport_failure_reason = transport_failure_reason;
nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair); nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED); nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED);
return rc; return rc;
@ -314,11 +320,18 @@ nvme_transport_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk
{ {
const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
if (nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTING ||
nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTED) {
return;
}
nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTING);
assert(transport != NULL); assert(transport != NULL);
if (qpair->poll_group) { if (qpair->poll_group) {
nvme_poll_group_deactivate_qpair(qpair); nvme_poll_group_deactivate_qpair(qpair);
} }
transport->ops.ctrlr_disconnect_qpair(ctrlr, qpair); transport->ops.ctrlr_disconnect_qpair(ctrlr, qpair);
nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED);
} }
void void