nvmf/tcp: Don't break out of poll loop based on number of PDUs

It's actually faster to process them until you run out of data.

Signed-off-by: Ben Walker <benjamin.walker@intel.com>
Change-Id: I9e81babdb9bdc405a8dbf03b2f701fe50bcc70f6
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1559
Community-CI: Broadcom CI
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
This commit is contained in:
Ben Walker 2020-03-27 14:16:59 -07:00 committed by Tomasz Zawadzki
parent 24d61956ab
commit ae6519e488

View File

@ -1680,15 +1680,13 @@ nvmf_tcp_pdu_payload_insert_dif(struct nvme_tcp_pdu *pdu, uint32_t read_offset,
return rc; return rc;
} }
#define MAX_NVME_TCP_PDU_LOOP_COUNT 32
static int static int
spdk_nvmf_tcp_sock_process(struct spdk_nvmf_tcp_qpair *tqpair) spdk_nvmf_tcp_sock_process(struct spdk_nvmf_tcp_qpair *tqpair)
{ {
int rc = 0; int rc = 0;
struct nvme_tcp_pdu *pdu; struct nvme_tcp_pdu *pdu;
enum nvme_tcp_pdu_recv_state prev_state; enum nvme_tcp_pdu_recv_state prev_state;
uint32_t data_len, current_pdu_num = 0; uint32_t data_len;
struct spdk_nvmf_tcp_transport *ttransport = SPDK_CONTAINEROF(tqpair->qpair.transport, struct spdk_nvmf_tcp_transport *ttransport = SPDK_CONTAINEROF(tqpair->qpair.transport,
struct spdk_nvmf_tcp_transport, transport); struct spdk_nvmf_tcp_transport, transport);
@ -1739,15 +1737,13 @@ spdk_nvmf_tcp_sock_process(struct spdk_nvmf_tcp_qpair *tqpair)
0, rc, 0, 0); 0, rc, 0, 0);
pdu->psh_valid_bytes += rc; pdu->psh_valid_bytes += rc;
} }
if (pdu->psh_valid_bytes < pdu->psh_len) { if (pdu->psh_valid_bytes < pdu->psh_len) {
return NVME_TCP_PDU_IN_PROGRESS; return NVME_TCP_PDU_IN_PROGRESS;
} }
/* All header(ch, psh, head digist) of this PDU has now been read from the socket. */ /* All header(ch, psh, head digist) of this PDU has now been read from the socket. */
spdk_nvmf_tcp_pdu_psh_handle(tqpair, ttransport); spdk_nvmf_tcp_pdu_psh_handle(tqpair, ttransport);
if (tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY) {
current_pdu_num++;
}
break; break;
/* Wait for the req slot */ /* Wait for the req slot */
case NVME_TCP_PDU_RECV_STATE_AWAIT_REQ: case NVME_TCP_PDU_RECV_STATE_AWAIT_REQ:
@ -1786,7 +1782,6 @@ spdk_nvmf_tcp_sock_process(struct spdk_nvmf_tcp_qpair *tqpair)
/* All of this PDU has now been read from the socket. */ /* All of this PDU has now been read from the socket. */
spdk_nvmf_tcp_pdu_payload_handle(tqpair, ttransport); spdk_nvmf_tcp_pdu_payload_handle(tqpair, ttransport);
current_pdu_num++;
break; break;
case NVME_TCP_PDU_RECV_STATE_ERROR: case NVME_TCP_PDU_RECV_STATE_ERROR:
if (!spdk_sock_is_connected(tqpair->sock)) { if (!spdk_sock_is_connected(tqpair->sock)) {
@ -1798,7 +1793,7 @@ spdk_nvmf_tcp_sock_process(struct spdk_nvmf_tcp_qpair *tqpair)
SPDK_ERRLOG("code should not come to here"); SPDK_ERRLOG("code should not come to here");
break; break;
} }
} while ((tqpair->recv_state != prev_state) && (current_pdu_num < MAX_NVME_TCP_PDU_LOOP_COUNT)); } while (tqpair->recv_state != prev_state);
return rc; return rc;
} }