From e9be9df45f0c5790e0dcaa9f837e69dbbddb20e5 Mon Sep 17 00:00:00 2001 From: Ziye Yang Date: Wed, 30 Oct 2019 23:37:18 +0800 Subject: [PATCH] nvmf/tcp: Fix the potential issue of connection construction. When we use async writev (e.g., lib io_uring), we find that the callback of writev is executed after recving the new data from the initiator, and this is possible. For example, if the NVMe-oF TCP target receives the ic_req from the initiator, and sendout the ic_resp, the state of tqpair will change from invalid to running until the callback is executed. And the data of ic_resp is already sent to the initiator, and we receive the new command later. However, we may still not get the call back function executed (i.e, spdk_nvmf_tcp_send_icresp_complete). And it is possible for using lib io_uring, I faced this issue when using lib uring. And this patch can fix this issue. Signed-off-by: Ziye Yang Change-Id: I7f4332522866d475e106ac6d36a8ec715133f0dc Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/472770 Tested-by: SPDK CI Jenkins Reviewed-by: Jim Harris Reviewed-by: Ben Walker Reviewed-by: Shuhei Matsumoto Community-CI: Broadcom SPDK FC-NVMe CI --- include/spdk_internal/nvme_tcp.h | 7 ++++--- lib/nvmf/tcp.c | 5 +++++ 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/include/spdk_internal/nvme_tcp.h b/include/spdk_internal/nvme_tcp.h index 99f12b371..b9b765d33 100644 --- a/include/spdk_internal/nvme_tcp.h +++ b/include/spdk_internal/nvme_tcp.h @@ -153,9 +153,10 @@ enum nvme_tcp_error_codes { enum nvme_tcp_qpair_state { NVME_TCP_QPAIR_STATE_INVALID = 0, - NVME_TCP_QPAIR_STATE_RUNNING = 1, - NVME_TCP_QPAIR_STATE_EXITING = 2, - NVME_TCP_QPAIR_STATE_EXITED = 3, + NVME_TCP_QPAIR_STATE_INITIALIZING = 1, + NVME_TCP_QPAIR_STATE_RUNNING = 2, + NVME_TCP_QPAIR_STATE_EXITING = 3, + NVME_TCP_QPAIR_STATE_EXITED = 4, }; static uint32_t diff --git a/lib/nvmf/tcp.c b/lib/nvmf/tcp.c index 7138f149a..63953d28d 100644 --- a/lib/nvmf/tcp.c +++ b/lib/nvmf/tcp.c @@ -1760,6 +1760,7 @@ spdk_nvmf_tcp_icreq_handle(struct spdk_nvmf_tcp_transport *ttransport, SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "host_hdgst_enable: %u\n", tqpair->host_hdgst_enable); SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "host_ddgst_enable: %u\n", tqpair->host_ddgst_enable); + tqpair->state = NVME_TCP_QPAIR_STATE_INITIALIZING; spdk_nvmf_tcp_qpair_write_pdu(tqpair, rsp_pdu, spdk_nvmf_tcp_send_icresp_complete, tqpair); spdk_nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); return; @@ -2011,6 +2012,10 @@ spdk_nvmf_tcp_sock_process(struct spdk_nvmf_tcp_qpair *tqpair) /* Wait for the common header */ case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY: case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH: + if (spdk_unlikely(tqpair->state == NVME_TCP_QPAIR_STATE_INITIALIZING)) { + return rc; + } + if (!tqpair->pdu_recv_buf.remain_size) { rc = nvme_tcp_recv_buf_read(tqpair->sock, &tqpair->pdu_recv_buf); if (rc <= 0) {