nvme/tcp: Change parameters of nvme_tcp_pdu_set_data_buf to use in target
This patch is the first patch of the patch series. The purpose of this patch series is to correct the bug of nvme_tcp_pdu_set_data_buf() when the multiple iovecs array is passed, to share nvme_tcp_pdu_set_data_buf() between NVMe/TCP initiator and target, and utilize nvme_tcp_pdu_set_data_buf() not only for C2H and H2C but also in-capsule data in NVMe/TCP target. This patch is necessary to satisfy the second requirement, to share nvme_tcp_pdu_set_data_buf() between NVMe/TCP initiator and target because struct nvme_tcp_req and struct spdk_nvmf_tcp_req are different. Four variables, iov, iovcnt, data_offset, and data_len are common, and hence this patch changes the parameters of nvme_tcp_pdu_set_data_buf() to accept them. The bug is fixed in the next patch and tested in after the next patch. Change-Id: Ifabd9a2227b25f4820738656e804d05dc3f874a5 Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/455622 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Changpeng Liu <changpeng.liu@intel.com> Reviewed-by: Paul Luse <paul.e.luse@intel.com> Reviewed-by: Ziye Yang <ziye.yang@intel.com>
This commit is contained in:
parent
f341e69a50
commit
a7b6d2ef00
@ -621,33 +621,33 @@ nvme_tcp_qpair_cmd_send_complete(void *cb_arg)
|
|||||||
|
|
||||||
static void
|
static void
|
||||||
nvme_tcp_pdu_set_data_buf(struct nvme_tcp_pdu *pdu,
|
nvme_tcp_pdu_set_data_buf(struct nvme_tcp_pdu *pdu,
|
||||||
struct nvme_tcp_req *tcp_req,
|
struct iovec *iov, int iovcnt,
|
||||||
uint32_t data_len)
|
uint32_t data_offset, uint32_t data_len)
|
||||||
{
|
{
|
||||||
uint32_t i, remain_len, len;
|
uint32_t i, remain_len, len;
|
||||||
struct _nvme_tcp_sgl *pdu_sgl;
|
struct _nvme_tcp_sgl *pdu_sgl;
|
||||||
|
|
||||||
if (tcp_req->iovcnt == 1) {
|
if (iovcnt == 1) {
|
||||||
nvme_tcp_pdu_set_data(pdu, (void *)((uint64_t)tcp_req->iov[0].iov_base + tcp_req->datao), data_len);
|
nvme_tcp_pdu_set_data(pdu, (void *)((uint64_t)iov[0].iov_base + data_offset), data_len);
|
||||||
} else {
|
} else {
|
||||||
i = 0;
|
i = 0;
|
||||||
pdu_sgl = &pdu->sgl;
|
pdu_sgl = &pdu->sgl;
|
||||||
assert(tcp_req->iovcnt <= NVME_TCP_MAX_SGL_DESCRIPTORS);
|
assert(iovcnt <= NVME_TCP_MAX_SGL_DESCRIPTORS);
|
||||||
_nvme_tcp_sgl_init(pdu_sgl, pdu->data_iov, tcp_req->iovcnt, tcp_req->datao);
|
_nvme_tcp_sgl_init(pdu_sgl, pdu->data_iov, iovcnt, data_offset);
|
||||||
remain_len = data_len;
|
remain_len = data_len;
|
||||||
|
|
||||||
while (remain_len > 0) {
|
while (remain_len > 0) {
|
||||||
assert(i < NVME_TCP_MAX_SGL_DESCRIPTORS);
|
assert(i < NVME_TCP_MAX_SGL_DESCRIPTORS);
|
||||||
len = spdk_min(remain_len, tcp_req->iov[i].iov_len);
|
len = spdk_min(remain_len, iov[i].iov_len);
|
||||||
remain_len -= len;
|
remain_len -= len;
|
||||||
if (!_nvme_tcp_sgl_append(pdu_sgl, tcp_req->iov[i].iov_base, len)) {
|
if (!_nvme_tcp_sgl_append(pdu_sgl, iov[i].iov_base, len)) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(remain_len == 0);
|
assert(remain_len == 0);
|
||||||
pdu->data_iovcnt = tcp_req->iovcnt - pdu_sgl->iovcnt;
|
pdu->data_iovcnt = iovcnt - pdu_sgl->iovcnt;
|
||||||
pdu->data_len = data_len;
|
pdu->data_len = data_len;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -702,7 +702,8 @@ nvme_tcp_qpair_capsule_cmd_send(struct nvme_tcp_qpair *tqpair,
|
|||||||
}
|
}
|
||||||
|
|
||||||
tcp_req->datao = 0;
|
tcp_req->datao = 0;
|
||||||
nvme_tcp_pdu_set_data_buf(pdu, tcp_req, tcp_req->req->payload_size);
|
nvme_tcp_pdu_set_data_buf(pdu, tcp_req->iov, tcp_req->iovcnt,
|
||||||
|
0, tcp_req->req->payload_size);
|
||||||
end:
|
end:
|
||||||
capsule_cmd->common.plen = plen;
|
capsule_cmd->common.plen = plen;
|
||||||
return nvme_tcp_qpair_write_pdu(tqpair, pdu, nvme_tcp_qpair_cmd_send_complete, NULL);
|
return nvme_tcp_qpair_write_pdu(tqpair, pdu, nvme_tcp_qpair_cmd_send_complete, NULL);
|
||||||
@ -1235,7 +1236,8 @@ nvme_tcp_c2h_data_hdr_handle(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_pdu
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
nvme_tcp_pdu_set_data_buf(pdu, tcp_req, c2h_data->datal);
|
nvme_tcp_pdu_set_data_buf(pdu, tcp_req->iov, tcp_req->iovcnt,
|
||||||
|
c2h_data->datao, c2h_data->datal);
|
||||||
pdu->ctx = tcp_req;
|
pdu->ctx = tcp_req;
|
||||||
|
|
||||||
nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
|
nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
|
||||||
@ -1281,7 +1283,8 @@ spdk_nvme_tcp_send_h2c_data(struct nvme_tcp_req *tcp_req)
|
|||||||
h2c_data->datao = tcp_req->datao;
|
h2c_data->datao = tcp_req->datao;
|
||||||
|
|
||||||
h2c_data->datal = spdk_min(tcp_req->r2tl_remain, tqpair->maxh2cdata);
|
h2c_data->datal = spdk_min(tcp_req->r2tl_remain, tqpair->maxh2cdata);
|
||||||
nvme_tcp_pdu_set_data_buf(rsp_pdu, tcp_req, h2c_data->datal);
|
nvme_tcp_pdu_set_data_buf(rsp_pdu, tcp_req->iov, tcp_req->iovcnt,
|
||||||
|
h2c_data->datao, h2c_data->datal);
|
||||||
tcp_req->r2tl_remain -= h2c_data->datal;
|
tcp_req->r2tl_remain -= h2c_data->datal;
|
||||||
|
|
||||||
if (tqpair->host_hdgst_enable) {
|
if (tqpair->host_hdgst_enable) {
|
||||||
|
Loading…
Reference in New Issue
Block a user