nvme/tcp: Report correct max sge

NVME TCP driver supports up to 16 sge elements
while only 1 sge is reported - that leads to
unnecessary requests split which degrades perf.
Also pass correct iovcnt to nvme_tcp_build_iovs -
it should be 32. Otherwise, pdu header consumes
1 iov and data is written partially.
Add a check that at least data_len bytes were
appended to the socket iovs and fail request
otherwise.

Signed-off-by: Alexey Marchuk <alexeymar@nvidia.com>
Change-Id: Ie83c807dd3fec2c7e7cbcda1e493d6fd74ebe599
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/17006
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Mellanox Build Bot
Reviewed-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
This commit is contained in:
Alexey Marchuk 2023-03-01 20:55:04 +01:00 committed by Tomasz Zawadzki
parent a8f7d7cf0a
commit ada9333423

View File

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause /* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2018 Intel Corporation. All rights reserved. * Copyright (C) 2018 Intel Corporation. All rights reserved.
* Copyright (c) 2020 Mellanox Technologies LTD. All rights reserved. * Copyright (c) 2020 Mellanox Technologies LTD. All rights reserved.
* Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*/ */
/* /*
@ -411,12 +411,18 @@ _tcp_write_pdu(struct nvme_tcp_pdu *pdu)
uint32_t mapped_length = 0; uint32_t mapped_length = 0;
struct nvme_tcp_qpair *tqpair = pdu->qpair; struct nvme_tcp_qpair *tqpair = pdu->qpair;
pdu->sock_req.iovcnt = nvme_tcp_build_iovs(pdu->iov, NVME_TCP_MAX_SGL_DESCRIPTORS, pdu, pdu->sock_req.iovcnt = nvme_tcp_build_iovs(pdu->iov, SPDK_COUNTOF(pdu->iov), pdu,
(bool)tqpair->flags.host_hdgst_enable, (bool)tqpair->flags.host_ddgst_enable, (bool)tqpair->flags.host_hdgst_enable, (bool)tqpair->flags.host_ddgst_enable,
&mapped_length); &mapped_length);
TAILQ_INSERT_TAIL(&tqpair->send_queue, pdu, tailq);
if (spdk_unlikely(mapped_length < pdu->data_len)) {
SPDK_ERRLOG("could not map the whole %u bytes (mapped only %u bytes)\n", pdu->data_len,
mapped_length);
_pdu_write_done(pdu, -EINVAL);
return;
}
pdu->sock_req.cb_fn = _pdu_write_done; pdu->sock_req.cb_fn = _pdu_write_done;
pdu->sock_req.cb_arg = pdu; pdu->sock_req.cb_arg = pdu;
TAILQ_INSERT_TAIL(&tqpair->send_queue, pdu, tailq);
tqpair->stats->submitted_requests++; tqpair->stats->submitted_requests++;
spdk_sock_writev_async(tqpair->sock, &pdu->sock_req); spdk_sock_writev_async(tqpair->sock, &pdu->sock_req);
} }
@ -2166,13 +2172,7 @@ nvme_tcp_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
static uint16_t static uint16_t
nvme_tcp_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr) nvme_tcp_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
{ {
/* return NVME_TCP_MAX_SGL_DESCRIPTORS;
* We do not support >1 SGE in the initiator currently,
* so we can only return 1 here. Once that support is
* added, this should return ctrlr->cdata.nvmf_specific.msdbd
* instead.
*/
return 1;
} }
static int static int