nvme: In nvme_tcp_qpair_process_completions, do not call
nvme_tcp_read_pdu in a loop nvme_tcp_read_pdu itself has a loop in it that runs until no more data is available, so the extra loop does nothing. Change-Id: I1471018e396c43187d1f06bd18ce8a6846a71c94 Signed-off-by: Ben Walker <benjamin.walker@intel.com> Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/15139 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Community-CI: Mellanox Build Bot Reviewed-by: Shuhei Matsumoto <smatsumoto@nvidia.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
parent
8b29c21118
commit
73b02ffdc3
@ -1600,8 +1600,15 @@ nvme_tcp_read_pdu(struct nvme_tcp_qpair *tqpair, uint32_t *reaped, uint32_t max_
|
|||||||
uint32_t data_len;
|
uint32_t data_len;
|
||||||
enum nvme_tcp_pdu_recv_state prev_state;
|
enum nvme_tcp_pdu_recv_state prev_state;
|
||||||
|
|
||||||
|
*reaped = tqpair->async_complete;
|
||||||
|
tqpair->async_complete = 0;
|
||||||
|
|
||||||
/* The loop here is to allow for several back-to-back state changes. */
|
/* The loop here is to allow for several back-to-back state changes. */
|
||||||
do {
|
do {
|
||||||
|
if (*reaped >= max_completions) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
prev_state = tqpair->recv_state;
|
prev_state = tqpair->recv_state;
|
||||||
pdu = tqpair->recv_pdu;
|
pdu = tqpair->recv_pdu;
|
||||||
switch (tqpair->recv_state) {
|
switch (tqpair->recv_state) {
|
||||||
@ -1622,8 +1629,7 @@ nvme_tcp_read_pdu(struct nvme_tcp_qpair *tqpair, uint32_t *reaped, uint32_t max_
|
|||||||
}
|
}
|
||||||
pdu->ch_valid_bytes += rc;
|
pdu->ch_valid_bytes += rc;
|
||||||
if (pdu->ch_valid_bytes < sizeof(struct spdk_nvme_tcp_common_pdu_hdr)) {
|
if (pdu->ch_valid_bytes < sizeof(struct spdk_nvme_tcp_common_pdu_hdr)) {
|
||||||
rc = NVME_TCP_PDU_IN_PROGRESS;
|
return NVME_TCP_PDU_IN_PROGRESS;
|
||||||
goto out;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* The command header of this PDU has now been read from the socket. */
|
/* The command header of this PDU has now been read from the socket. */
|
||||||
@ -1642,8 +1648,7 @@ nvme_tcp_read_pdu(struct nvme_tcp_qpair *tqpair, uint32_t *reaped, uint32_t max_
|
|||||||
|
|
||||||
pdu->psh_valid_bytes += rc;
|
pdu->psh_valid_bytes += rc;
|
||||||
if (pdu->psh_valid_bytes < pdu->psh_len) {
|
if (pdu->psh_valid_bytes < pdu->psh_len) {
|
||||||
rc = NVME_TCP_PDU_IN_PROGRESS;
|
return NVME_TCP_PDU_IN_PROGRESS;
|
||||||
goto out;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* All header(ch, psh, head digist) of this PDU has now been read from the socket. */
|
/* All header(ch, psh, head digist) of this PDU has now been read from the socket. */
|
||||||
@ -1671,8 +1676,7 @@ nvme_tcp_read_pdu(struct nvme_tcp_qpair *tqpair, uint32_t *reaped, uint32_t max_
|
|||||||
|
|
||||||
pdu->rw_offset += rc;
|
pdu->rw_offset += rc;
|
||||||
if (pdu->rw_offset < data_len) {
|
if (pdu->rw_offset < data_len) {
|
||||||
rc = NVME_TCP_PDU_IN_PROGRESS;
|
return NVME_TCP_PDU_IN_PROGRESS;
|
||||||
goto out;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(pdu->rw_offset == data_len);
|
assert(pdu->rw_offset == data_len);
|
||||||
@ -1681,19 +1685,14 @@ nvme_tcp_read_pdu(struct nvme_tcp_qpair *tqpair, uint32_t *reaped, uint32_t max_
|
|||||||
break;
|
break;
|
||||||
case NVME_TCP_PDU_RECV_STATE_ERROR:
|
case NVME_TCP_PDU_RECV_STATE_ERROR:
|
||||||
memset(pdu, 0, sizeof(struct nvme_tcp_pdu));
|
memset(pdu, 0, sizeof(struct nvme_tcp_pdu));
|
||||||
rc = NVME_TCP_PDU_FATAL;
|
return NVME_TCP_PDU_FATAL;
|
||||||
break;
|
|
||||||
default:
|
default:
|
||||||
assert(0);
|
assert(0);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} while (prev_state != tqpair->recv_state && *reaped + tqpair->async_complete < max_completions);
|
} while (prev_state != tqpair->recv_state);
|
||||||
|
|
||||||
out:
|
return rc > 0 ? 0 : rc;
|
||||||
*reaped += tqpair->async_complete;
|
|
||||||
tqpair->async_complete = 0;
|
|
||||||
|
|
||||||
return rc;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -1758,25 +1757,19 @@ nvme_tcp_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_c
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (max_completions == 0) {
|
if (max_completions == 0) {
|
||||||
max_completions = tqpair->num_entries;
|
max_completions = spdk_max(tqpair->num_entries, 1);
|
||||||
} else {
|
} else {
|
||||||
max_completions = spdk_min(max_completions, tqpair->num_entries);
|
max_completions = spdk_min(max_completions, tqpair->num_entries);
|
||||||
}
|
}
|
||||||
|
|
||||||
reaped = 0;
|
reaped = 0;
|
||||||
do {
|
|
||||||
rc = nvme_tcp_read_pdu(tqpair, &reaped, max_completions);
|
rc = nvme_tcp_read_pdu(tqpair, &reaped, max_completions);
|
||||||
if (rc < 0) {
|
if (rc < 0) {
|
||||||
SPDK_DEBUGLOG(nvme, "Error polling CQ! (%d): %s\n",
|
SPDK_DEBUGLOG(nvme, "Error polling CQ! (%d): %s\n",
|
||||||
errno, spdk_strerror(errno));
|
errno, spdk_strerror(errno));
|
||||||
goto fail;
|
goto fail;
|
||||||
} else if (rc == 0) {
|
|
||||||
/* Partial PDU is read */
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} while (reaped < max_completions);
|
|
||||||
|
|
||||||
if (spdk_unlikely(tqpair->qpair.ctrlr->timeout_enabled)) {
|
if (spdk_unlikely(tqpair->qpair.ctrlr->timeout_enabled)) {
|
||||||
nvme_tcp_qpair_check_timeout(qpair);
|
nvme_tcp_qpair_check_timeout(qpair);
|
||||||
}
|
}
|
||||||
|
@ -1376,6 +1376,7 @@ test_nvme_tcp_ctrlr_connect_qpair(void)
|
|||||||
tqpair->send_pdu = &pdu;
|
tqpair->send_pdu = &pdu;
|
||||||
tqpair->qpair.ctrlr = &ctrlr;
|
tqpair->qpair.ctrlr = &ctrlr;
|
||||||
tqpair->qpair.state = NVME_QPAIR_CONNECTING;
|
tqpair->qpair.state = NVME_QPAIR_CONNECTING;
|
||||||
|
tqpair->num_entries = 128;
|
||||||
ic_req = &pdu.hdr.ic_req;
|
ic_req = &pdu.hdr.ic_req;
|
||||||
|
|
||||||
tqpair->recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
|
tqpair->recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
|
||||||
|
Loading…
Reference in New Issue
Block a user