From 94cd652b1884130d509d8bcf6fc6a6617fe29f4c Mon Sep 17 00:00:00 2001 From: Ziye Yang Date: Wed, 28 Nov 2018 23:22:37 +0800 Subject: [PATCH] nvmf/tcp: Add a poller to check the timeout of each qpair This makes the timeout check for each qpair in the group efficient. If there are many qpairs in the group, we can scale. Change-Id: I75c29a92107dc32377a2ef7edb5ac92868f1c5df Signed-off-by: Ziye Yang Reviewed-on: https://review.gerrithub.io/435277 Chandler-Test-Pool: SPDK Automated Test System Tested-by: SPDK CI Jenkins Reviewed-by: Shuhei Matsumoto Reviewed-by: Ben Walker Reviewed-by: Changpeng Liu --- lib/nvmf/tcp.c | 81 ++++++++++++++++++------------- test/unit/lib/nvmf/tcp.c/tcp_ut.c | 7 +++ 2 files changed, 53 insertions(+), 35 deletions(-) diff --git a/lib/nvmf/tcp.c b/lib/nvmf/tcp.c index 0ed92405b..86b4e9568 100644 --- a/lib/nvmf/tcp.c +++ b/lib/nvmf/tcp.c @@ -282,6 +282,7 @@ struct nvme_tcp_qpair { struct spdk_nvmf_tcp_poll_group { struct spdk_nvmf_transport_poll_group group; struct spdk_sock_group *sock_group; + struct spdk_poller *timeout_poller; TAILQ_HEAD(, nvme_tcp_qpair) qpairs; }; @@ -1216,6 +1217,47 @@ spdk_nvmf_tcp_discover(struct spdk_nvmf_transport *transport, entry->tsas.tcp.sectype = SPDK_NVME_TCP_SECURITY_NONE; } +static void +spdk_nvmf_tcp_qpair_handle_timeout(struct nvme_tcp_qpair *tqpair, uint64_t tsc) +{ + if ((tqpair->state == NVME_TCP_QPAIR_STATE_EXITING) || + (tqpair->state == NVME_TCP_QPAIR_STATE_EXITED)) { + return; + } + + /* Currently, we did not have keep alive support, so make sure that we should have the generic support later */ + if ((tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_ERROR) || + (tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY)) { + return; + } + + /* Check for interval expiration */ + if ((tsc - tqpair->last_pdu_time) > (tqpair->timeout * spdk_get_ticks_hz())) { + SPDK_ERRLOG("No pdu coming for tqpair=%p within %d seconds\n", tqpair, tqpair->timeout); + tqpair->state = NVME_TCP_QPAIR_STATE_EXITING; + } +} + +static int +spdk_nvmf_tcp_poll_group_handle_timeout(void *ctx) +{ + struct spdk_nvmf_tcp_poll_group *tgroup = ctx; + struct nvme_tcp_qpair *tqpair, *tmp; + uint64_t tsc = spdk_get_ticks(); + + TAILQ_FOREACH_SAFE(tqpair, &tgroup->qpairs, link, tmp) { + spdk_nvmf_tcp_qpair_handle_timeout(tqpair, tsc); + if (tqpair->state == NVME_TCP_QPAIR_STATE_EXITING) { + /* to prevent the state is set again */ + tqpair->state = NVME_TCP_QPAIR_STATE_EXITED; + SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "will disconect the tqpair=%p\n", tqpair); + spdk_nvmf_qpair_disconnect(&tqpair->qpair, NULL, NULL); + } + } + + return -1; +} + static struct spdk_nvmf_transport_poll_group * spdk_nvmf_tcp_poll_group_create(struct spdk_nvmf_transport *transport) { @@ -1232,6 +1274,9 @@ spdk_nvmf_tcp_poll_group_create(struct spdk_nvmf_transport *transport) } TAILQ_INIT(&tgroup->qpairs); + + tgroup->timeout_poller = spdk_poller_register(spdk_nvmf_tcp_poll_group_handle_timeout, tgroup, + 1000000); return &tgroup->group; cleanup: @@ -1246,6 +1291,7 @@ spdk_nvmf_tcp_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group) tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group); spdk_sock_group_close(&tgroup->sock_group); + spdk_poller_unregister(&tgroup->timeout_poller); free(tgroup); } @@ -2693,34 +2739,10 @@ spdk_nvmf_tcp_close_qpair(struct spdk_nvmf_qpair *qpair) spdk_nvmf_tcp_qpair_destroy(SPDK_CONTAINEROF(qpair, struct nvme_tcp_qpair, qpair)); } -static void -spdk_nvmf_tcp_qpair_handle_timout(struct nvme_tcp_qpair *tqpair) -{ - uint64_t tsc; - - if ((tqpair->state == NVME_TCP_QPAIR_STATE_EXITING) || - (tqpair->state == NVME_TCP_QPAIR_STATE_EXITED)) { - return; - } - - /* Currently, we did not have keep alive support, so make sure that we should have the generic support later */ - if (tqpair->recv_state != NVME_TCP_PDU_RECV_STATE_ERROR) { - return; - } - - /* Check for interval expiration */ - tsc = spdk_get_ticks(); - if ((tsc - tqpair->last_pdu_time) > (tqpair->timeout * spdk_get_ticks_hz())) { - SPDK_ERRLOG("No pdu coming for tqpair=%p within %d seconds\n", tqpair, tqpair->timeout); - tqpair->state = NVME_TCP_QPAIR_STATE_EXITING; - } -} - static int spdk_nvmf_tcp_poll_group_poll(struct spdk_nvmf_transport_poll_group *group) { struct spdk_nvmf_tcp_poll_group *tgroup; - struct nvme_tcp_qpair *tqpair, *tmp; int rc; tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group); @@ -2735,17 +2757,6 @@ spdk_nvmf_tcp_poll_group_poll(struct spdk_nvmf_transport_poll_group *group) return rc; } - - TAILQ_FOREACH_SAFE(tqpair, &tgroup->qpairs, link, tmp) { - spdk_nvmf_tcp_qpair_handle_timout(tqpair); - if (tqpair->state == NVME_TCP_QPAIR_STATE_EXITING) { - /* to prevent the state is set again */ - tqpair->state = NVME_TCP_QPAIR_STATE_EXITED; - SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "will disconect the tqpair=%p\n", tqpair); - spdk_nvmf_qpair_disconnect(&tqpair->qpair, NULL, NULL); - } - } - return 0; } diff --git a/test/unit/lib/nvmf/tcp.c/tcp_ut.c b/test/unit/lib/nvmf/tcp.c/tcp_ut.c index 9c6690270..18037dc23 100644 --- a/test/unit/lib/nvmf/tcp.c/tcp_ut.c +++ b/test/unit/lib/nvmf/tcp.c/tcp_ut.c @@ -302,11 +302,18 @@ test_nvmf_tcp_poll_group_create(void) { struct spdk_nvmf_tcp_transport ttransport; struct spdk_nvmf_transport_poll_group *group; + struct spdk_thread *thread; + + thread = spdk_allocate_thread(NULL, NULL, NULL, NULL, NULL); + SPDK_CU_ASSERT_FATAL(thread != NULL); + spdk_set_thread(thread); memset(&ttransport, 0, sizeof(ttransport)); group = spdk_nvmf_tcp_poll_group_create(&ttransport.transport); CU_ASSERT_PTR_NOT_NULL(group); spdk_nvmf_tcp_poll_group_destroy(group); + + spdk_free_thread(); } static void