nvme/tcp: Calculate requests completed asyncronously
A preparation step for enabling zero copy in NVMEoF TCP initiator. With zero copy enabled, some requests might be completed out of "process_completions" call and we should take them into account to return the correct number of completions. Change-Id: Iba7973f6da815645bbfad0334619d46b66379226 Signed-off-by: Alexey Marchuk <alexeymar@mellanox.com> Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com> Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/4209 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
parent
2ceff364e5
commit
a910bc647d
@ -88,6 +88,7 @@ struct nvme_tcp_qpair {
|
|||||||
struct nvme_tcp_req *tcp_reqs;
|
struct nvme_tcp_req *tcp_reqs;
|
||||||
|
|
||||||
uint16_t num_entries;
|
uint16_t num_entries;
|
||||||
|
uint16_t async_complete;
|
||||||
|
|
||||||
struct {
|
struct {
|
||||||
uint16_t host_hdgst_enable: 1;
|
uint16_t host_hdgst_enable: 1;
|
||||||
@ -550,6 +551,10 @@ nvme_tcp_req_complete_safe(struct nvme_tcp_req *tcp_req)
|
|||||||
|
|
||||||
SPDK_DEBUGLOG(SPDK_LOG_NVME, "complete tcp_req(%p) on tqpair=%p\n", tcp_req, tcp_req->tqpair);
|
SPDK_DEBUGLOG(SPDK_LOG_NVME, "complete tcp_req(%p) on tqpair=%p\n", tcp_req, tcp_req->tqpair);
|
||||||
|
|
||||||
|
if (!tcp_req->tqpair->qpair.in_completion_context) {
|
||||||
|
tcp_req->tqpair->async_complete++;
|
||||||
|
}
|
||||||
|
|
||||||
/* Cache arguments to be passed to nvme_complete_request since tcp_req can be zeroed when released */
|
/* Cache arguments to be passed to nvme_complete_request since tcp_req can be zeroed when released */
|
||||||
memcpy(&cpl, &tcp_req->rsp, sizeof(cpl));
|
memcpy(&cpl, &tcp_req->rsp, sizeof(cpl));
|
||||||
user_cb = tcp_req->req->cb_fn;
|
user_cb = tcp_req->req->cb_fn;
|
||||||
@ -1442,7 +1447,8 @@ nvme_tcp_read_pdu(struct nvme_tcp_qpair *tqpair, uint32_t *reaped)
|
|||||||
}
|
}
|
||||||
pdu->ch_valid_bytes += rc;
|
pdu->ch_valid_bytes += rc;
|
||||||
if (pdu->ch_valid_bytes < sizeof(struct spdk_nvme_tcp_common_pdu_hdr)) {
|
if (pdu->ch_valid_bytes < sizeof(struct spdk_nvme_tcp_common_pdu_hdr)) {
|
||||||
return NVME_TCP_PDU_IN_PROGRESS;
|
rc = NVME_TCP_PDU_IN_PROGRESS;
|
||||||
|
goto out;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1462,7 +1468,8 @@ nvme_tcp_read_pdu(struct nvme_tcp_qpair *tqpair, uint32_t *reaped)
|
|||||||
|
|
||||||
pdu->psh_valid_bytes += rc;
|
pdu->psh_valid_bytes += rc;
|
||||||
if (pdu->psh_valid_bytes < pdu->psh_len) {
|
if (pdu->psh_valid_bytes < pdu->psh_len) {
|
||||||
return NVME_TCP_PDU_IN_PROGRESS;
|
rc = NVME_TCP_PDU_IN_PROGRESS;
|
||||||
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* All header(ch, psh, head digist) of this PDU has now been read from the socket. */
|
/* All header(ch, psh, head digist) of this PDU has now been read from the socket. */
|
||||||
@ -1491,7 +1498,8 @@ nvme_tcp_read_pdu(struct nvme_tcp_qpair *tqpair, uint32_t *reaped)
|
|||||||
|
|
||||||
pdu->readv_offset += rc;
|
pdu->readv_offset += rc;
|
||||||
if (pdu->readv_offset < data_len) {
|
if (pdu->readv_offset < data_len) {
|
||||||
return NVME_TCP_PDU_IN_PROGRESS;
|
rc = NVME_TCP_PDU_IN_PROGRESS;
|
||||||
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(pdu->readv_offset == data_len);
|
assert(pdu->readv_offset == data_len);
|
||||||
@ -1507,6 +1515,10 @@ nvme_tcp_read_pdu(struct nvme_tcp_qpair *tqpair, uint32_t *reaped)
|
|||||||
}
|
}
|
||||||
} while (prev_state != tqpair->recv_state);
|
} while (prev_state != tqpair->recv_state);
|
||||||
|
|
||||||
|
out:
|
||||||
|
*reaped += tqpair->async_complete;
|
||||||
|
tqpair->async_complete = 0;
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user