lib/nvme: disable multi c2hs crc32 offload at host

An example:
There are 3 c2h data PDUs for one read request. Data digest is
enabled, accel_poller is enabled. The first PDU will be offload
to accel_poller. Then the others will use CPU to calc the crc32c.
If the last PDU is calc done and the first PDU is not calc down,
SPDK will direct success the read request, and free some objects.
When accel_poller calc down, it will find the request is freed,
and abort the SPDK.

Disable multi c2hs async process to prevent this situation.

Signed-off-by: MengjinWu <mengjin.wu@intel.com>
Change-Id: I03c9e5b30622bbe84523c0836aa93cfed672896
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/14079
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: GangCao <gang.cao@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
Reviewed-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
MengjinWu 2022-09-21 04:05:07 +00:00 committed by Ben Walker
parent 1819d49e4f
commit 03843f73cb

View File

@ -117,7 +117,6 @@ struct nvme_tcp_req {
* waiting for H2C complete */
uint16_t ttag_r2t_next;
bool in_capsule_data;
bool pdu_in_use;
/* It is used to track whether the req can be safely freed */
union {
uint8_t raw;
@ -193,7 +192,6 @@ nvme_tcp_req_get(struct nvme_tcp_qpair *tqpair)
tcp_req->expected_datao = 0;
tcp_req->req = NULL;
tcp_req->in_capsule_data = false;
tcp_req->pdu_in_use = false;
tcp_req->r2tl_remain = 0;
tcp_req->r2tl_remain_next = 0;
tcp_req->active_r2ts = 0;
@ -1097,7 +1095,6 @@ tcp_data_recv_crc32_done(void *cb_arg, int status)
}
end:
tcp_req->pdu_in_use = false;
nvme_tcp_c2h_data_payload_handle(tqpair, tcp_req->pdu, &dummy_reaped);
}
@ -1124,13 +1121,11 @@ nvme_tcp_pdu_payload_handle(struct nvme_tcp_qpair *tqpair,
/* But if the data digest is enabled, tcp_req cannot be NULL */
assert(tcp_req != NULL);
tgroup = nvme_tcp_poll_group(tqpair->qpair.poll_group);
/* Only suport this limitated case for the first step */
/* Only suport this limitated case that the request has only one c2h pdu */
if ((nvme_qpair_get_state(&tqpair->qpair) >= NVME_QPAIR_CONNECTED) &&
(tgroup != NULL && tgroup->group.group->accel_fn_table.submit_accel_crc32c) &&
spdk_likely(!pdu->dif_ctx && (pdu->data_len % SPDK_NVME_TCP_DIGEST_ALIGNMENT == 0)
&& !tcp_req->pdu_in_use)) {
tcp_req->pdu_in_use = true;
&& tcp_req->req->payload_size == pdu->data_len)) {
tcp_req->pdu->hdr = pdu->hdr;
tcp_req->pdu->req = tcp_req;
memcpy(tcp_req->pdu->data_digest, pdu->data_digest, sizeof(pdu->data_digest));