lib/nvmf: Add a new state to wait for the req slot

Also need to update the spdk_nvmf_tcp_poll_group_poll.
Since if the tqpair recv state in wait_for_req,
we may already received the data, and there could be
not epoll event.

Signed-off-by: Ziye Yang <ziye.yang@intel.com>
Change-Id: I9c5a202e47e57aaba63da143f954a20c135a98ae
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/473626
Community-CI: Broadcom SPDK FC-NVMe CI <spdk-ci.pdl@broadcom.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Ziye Yang 2019-11-09 01:26:13 +08:00 committed by Tomasz Zawadzki
parent bdeb41a3cd
commit 4579a16f30
2 changed files with 12 additions and 8 deletions

View File

@ -138,6 +138,9 @@ enum nvme_tcp_pdu_recv_state {
/* Active tqpair waiting for any PDU specific header */
NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH,
/* Active tqpair waiting for a tcp request, only use in target side */
NVME_TCP_PDU_RECV_STATE_AWAIT_REQ,
/* Active tqpair waiting for payload */
NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD,

View File

@ -1277,6 +1277,7 @@ spdk_nvmf_tcp_qpair_set_recv_state(struct spdk_nvmf_tcp_qpair *tqpair,
switch (state) {
case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH:
case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH:
case NVME_TCP_PDU_RECV_STATE_AWAIT_REQ:
case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD:
break;
case NVME_TCP_PDU_RECV_STATE_ERROR:
@ -1363,6 +1364,9 @@ spdk_nvmf_tcp_capsule_cmd_hdr_handle(struct spdk_nvmf_tcp_transport *ttransport,
{
struct spdk_nvmf_tcp_req *tcp_req;
assert(pdu->psh_valid_bytes == pdu->psh_len);
assert(pdu->hdr->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD);
tcp_req = spdk_nvmf_tcp_req_get(tqpair);
if (!tcp_req) {
/* Directly return and make the allocation retry again */
@ -1807,7 +1811,7 @@ spdk_nvmf_tcp_pdu_psh_handle(struct spdk_nvmf_tcp_qpair *tqpair,
spdk_nvmf_tcp_icreq_handle(ttransport, tqpair, pdu);
break;
case SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD:
spdk_nvmf_tcp_capsule_cmd_hdr_handle(ttransport, tqpair, pdu);
spdk_nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_REQ);
break;
case SPDK_NVME_TCP_PDU_TYPE_H2C_DATA:
spdk_nvmf_tcp_h2c_data_hdr_handle(ttransport, tqpair, pdu);
@ -2045,13 +2049,6 @@ spdk_nvmf_tcp_sock_process(struct spdk_nvmf_tcp_qpair *tqpair)
break;
/* Wait for the pdu specific header */
case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH:
/* Handle the case if psh is already read but the nvmf tcp is not tied */
if (spdk_unlikely((pdu->psh_valid_bytes == pdu->psh_len) &&
(pdu->hdr->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD))) {
spdk_nvmf_tcp_capsule_cmd_hdr_handle(ttransport, tqpair, pdu);
break;
}
if (!tqpair->pdu_recv_buf.remain_size) {
rc = nvme_tcp_recv_buf_read(tqpair->sock, &tqpair->pdu_recv_buf);
if (rc <= 0) {
@ -2070,6 +2067,10 @@ spdk_nvmf_tcp_sock_process(struct spdk_nvmf_tcp_qpair *tqpair)
/* All header(ch, psh, head digist) of this PDU has now been read from the socket. */
spdk_nvmf_tcp_pdu_psh_handle(tqpair, ttransport);
break;
/* Wait for the req slot */
case NVME_TCP_PDU_RECV_STATE_AWAIT_REQ:
spdk_nvmf_tcp_capsule_cmd_hdr_handle(ttransport, tqpair, pdu);
break;
case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD:
/* check whether the data is valid, if not we just return */
if (!pdu->data_len) {