nvme/tcp: Change hdr in nvme_tcp_pdu to pointer

Purpose: Prepare the further optimnization in the
target side whening receving pdu headers, we expect
to use zero copy.

Change-Id: Iae7f9106844736d7160d39d0af1f5941084422ec
Signed-off-by: Ziye Yang <ziye.yang@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/465380
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Broadcom SPDK FC-NVMe CI <spdk-ci.pdl@broadcom.com>
This commit is contained in:
Ziye Yang 2019-08-20 15:38:24 +08:00 committed by Jim Harris
parent 02aa7d925b
commit ea5ad0b286
5 changed files with 152 additions and 135 deletions

View File

@ -81,8 +81,7 @@ struct _nvme_tcp_sgl {
uint32_t total_size;
};
struct nvme_tcp_pdu {
union {
union nvme_tcp_pdu_hdr {
/* to hold error pdu data */
uint8_t raw[SPDK_NVME_TCP_TERM_REQ_PDU_MAX_SIZE];
struct spdk_nvme_tcp_common_pdu_hdr common;
@ -95,8 +94,11 @@ struct nvme_tcp_pdu {
struct spdk_nvme_tcp_c2h_data_hdr c2h_data;
struct spdk_nvme_tcp_r2t_hdr r2t;
} hdr;
};
struct nvme_tcp_pdu {
union nvme_tcp_pdu_hdr hdr_mem;
union nvme_tcp_pdu_hdr *hdr;
bool has_hdgst;
bool ddgst_enable;
uint8_t data_digest[SPDK_NVME_TCP_DIGEST_LEN];
@ -159,9 +161,9 @@ static uint32_t
nvme_tcp_pdu_calc_header_digest(struct nvme_tcp_pdu *pdu)
{
uint32_t crc32c;
uint32_t hlen = pdu->hdr.common.hlen;
uint32_t hlen = pdu->hdr->common.hlen;
crc32c = spdk_crc32c_update(&pdu->hdr.raw, hlen, ~0);
crc32c = spdk_crc32c_update(&pdu->hdr->raw, hlen, ~0);
crc32c = crc32c ^ SPDK_CRC32C_XOR;
return crc32c;
}
@ -338,12 +340,12 @@ nvme_tcp_build_iovs(struct iovec *iov, int iovcnt, struct nvme_tcp_pdu *pdu,
sgl = &pdu->sgl;
_nvme_tcp_sgl_init(sgl, iov, iovcnt, pdu->writev_offset);
hlen = pdu->hdr.common.hlen;
hlen = pdu->hdr->common.hlen;
enable_digest = 1;
if (pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_REQ ||
pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP ||
pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ ||
pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ) {
if (pdu->hdr->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_REQ ||
pdu->hdr->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP ||
pdu->hdr->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ ||
pdu->hdr->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ) {
/* this PDU should be sent without digest */
enable_digest = 0;
}
@ -356,7 +358,7 @@ nvme_tcp_build_iovs(struct iovec *iov, int iovcnt, struct nvme_tcp_pdu *pdu,
plen = hlen;
if (!pdu->data_len) {
/* PDU header + possible header digest */
_nvme_tcp_sgl_append(sgl, (uint8_t *)&pdu->hdr.raw, hlen);
_nvme_tcp_sgl_append(sgl, (uint8_t *)&pdu->hdr->raw, hlen);
goto end;
}
@ -366,7 +368,7 @@ nvme_tcp_build_iovs(struct iovec *iov, int iovcnt, struct nvme_tcp_pdu *pdu,
plen = hlen;
}
if (!_nvme_tcp_sgl_append(sgl, (uint8_t *)&pdu->hdr.raw, hlen)) {
if (!_nvme_tcp_sgl_append(sgl, (uint8_t *)&pdu->hdr->raw, hlen)) {
goto end;
}
@ -391,7 +393,7 @@ nvme_tcp_build_iovs(struct iovec *iov, int iovcnt, struct nvme_tcp_pdu *pdu,
/* check the plen for the first time constructing iov */
if (!pdu->writev_offset) {
assert(plen == pdu->hdr.common.plen);
assert(plen == pdu->hdr->common.plen);
}
end:
@ -592,17 +594,17 @@ nvme_tcp_pdu_calc_psh_len(struct nvme_tcp_pdu *pdu, bool hdgst_enable)
{
uint8_t psh_len, pdo, padding_len;
psh_len = pdu->hdr.common.hlen;
psh_len = pdu->hdr->common.hlen;
/* Only the following five type has header digest */
if (((pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD) ||
(pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_DATA) ||
(pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP) ||
(pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_DATA) ||
(pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_R2T)) && hdgst_enable) {
if (((pdu->hdr->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD) ||
(pdu->hdr->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_DATA) ||
(pdu->hdr->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP) ||
(pdu->hdr->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_DATA) ||
(pdu->hdr->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_R2T)) && hdgst_enable) {
pdu->has_hdgst = true;
psh_len += SPDK_NVME_TCP_DIGEST_LEN;
if (pdu->hdr.common.plen > psh_len) {
pdo = pdu->hdr.common.pdo;
if (pdu->hdr->common.plen > psh_len) {
pdo = pdu->hdr->common.pdo;
padding_len = pdo - psh_len;
if (padding_len > 0) {
psh_len = pdo;

View File

@ -151,6 +151,7 @@ nvme_tcp_req_get(struct nvme_tcp_qpair *tqpair)
tcp_req->active_r2ts = 0;
tcp_req->iovcnt = 0;
memset(&tcp_req->send_pdu, 0, sizeof(tcp_req->send_pdu));
tcp_req->send_pdu.hdr = &tcp_req->send_pdu.hdr_mem;
TAILQ_INSERT_TAIL(&tqpair->outstanding_reqs, tcp_req, link);
return tcp_req;
@ -429,7 +430,7 @@ nvme_tcp_qpair_process_send_queue(struct nvme_tcp_qpair *tqpair)
*/
TAILQ_INIT(&completed_pdus_list);
while (bytes > 0) {
pdu_length = pdu->hdr.common.plen - pdu->writev_offset;
pdu_length = pdu->hdr->common.plen - pdu->writev_offset;
assert(pdu_length > 0);
if (bytes >= pdu_length) {
bytes -= pdu_length;
@ -464,10 +465,10 @@ nvme_tcp_qpair_write_pdu(struct nvme_tcp_qpair *tqpair,
int hlen;
uint32_t crc32c;
hlen = pdu->hdr.common.hlen;
hlen = pdu->hdr->common.hlen;
enable_digest = 1;
if (pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_REQ ||
pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ) {
if (pdu->hdr->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_REQ ||
pdu->hdr->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ) {
/* this PDU should be sent without digest */
enable_digest = 0;
}
@ -475,7 +476,7 @@ nvme_tcp_qpair_write_pdu(struct nvme_tcp_qpair *tqpair,
/* Header Digest */
if (enable_digest && tqpair->host_hdgst_enable) {
crc32c = nvme_tcp_pdu_calc_header_digest(pdu);
MAKE_DIGEST_WORD((uint8_t *)pdu->hdr.raw + hlen, crc32c);
MAKE_DIGEST_WORD((uint8_t *)pdu->hdr->raw + hlen, crc32c);
}
/* Data Digest */
@ -630,7 +631,7 @@ nvme_tcp_qpair_capsule_cmd_send(struct nvme_tcp_qpair *tqpair,
SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter\n");
pdu = &tcp_req->send_pdu;
capsule_cmd = &pdu->hdr.capsule_cmd;
capsule_cmd = &pdu->hdr->capsule_cmd;
capsule_cmd->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
plen = capsule_cmd->common.hlen = sizeof(*capsule_cmd);
capsule_cmd->ccsqe = tcp_req->req->cmd;
@ -762,6 +763,7 @@ nvme_tcp_qpair_set_recv_state(struct nvme_tcp_qpair *tqpair,
case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY:
case NVME_TCP_PDU_RECV_STATE_ERROR:
memset(&tqpair->recv_pdu, 0, sizeof(struct nvme_tcp_pdu));
tqpair->recv_pdu.hdr = &tqpair->recv_pdu.hdr_mem;
break;
case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH:
case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH:
@ -790,7 +792,8 @@ nvme_tcp_qpair_send_h2c_term_req(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_
rsp_pdu = &tqpair->send_pdu;
memset(rsp_pdu, 0, sizeof(*rsp_pdu));
h2c_term_req = &rsp_pdu->hdr.term_req;
rsp_pdu->hdr = &rsp_pdu->hdr_mem;
h2c_term_req = &rsp_pdu->hdr->term_req;
h2c_term_req->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ;
h2c_term_req->common.hlen = h2c_term_req_hdr_len;
@ -799,14 +802,14 @@ nvme_tcp_qpair_send_h2c_term_req(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_
DSET32(&h2c_term_req->fei, error_offset);
}
copy_len = pdu->hdr.common.hlen;
copy_len = pdu->hdr->common.hlen;
if (copy_len > SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE) {
copy_len = SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE;
}
/* Copy the error info into the buffer */
memcpy((uint8_t *)rsp_pdu->hdr.raw + h2c_term_req_hdr_len, pdu->hdr.raw, copy_len);
nvme_tcp_pdu_set_data(rsp_pdu, (uint8_t *)rsp_pdu->hdr.raw + h2c_term_req_hdr_len, copy_len);
memcpy((uint8_t *)rsp_pdu->hdr->raw + h2c_term_req_hdr_len, pdu->hdr->raw, copy_len);
nvme_tcp_pdu_set_data(rsp_pdu, (uint8_t *)rsp_pdu->hdr->raw + h2c_term_req_hdr_len, copy_len);
/* Contain the header len of the wrong received pdu */
h2c_term_req->common.plen = h2c_term_req->common.hlen + copy_len;
@ -826,15 +829,15 @@ nvme_tcp_pdu_ch_handle(struct nvme_tcp_qpair *tqpair)
pdu = &tqpair->recv_pdu;
SPDK_DEBUGLOG(SPDK_LOG_NVME, "pdu type = %d\n", pdu->hdr.common.pdu_type);
if (pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP) {
SPDK_DEBUGLOG(SPDK_LOG_NVME, "pdu type = %d\n", pdu->hdr->common.pdu_type);
if (pdu->hdr->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP) {
if (tqpair->state != NVME_TCP_QPAIR_STATE_INVALID) {
SPDK_ERRLOG("Already received IC_RESP PDU, and we should reject this pdu=%p\n", pdu);
fes = SPDK_NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR;
goto err;
}
expected_hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
if (pdu->hdr.common.plen != expected_hlen) {
if (pdu->hdr->common.plen != expected_hlen) {
plen_error = true;
}
} else {
@ -844,52 +847,52 @@ nvme_tcp_pdu_ch_handle(struct nvme_tcp_qpair *tqpair)
goto err;
}
switch (pdu->hdr.common.pdu_type) {
switch (pdu->hdr->common.pdu_type) {
case SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP:
expected_hlen = sizeof(struct spdk_nvme_tcp_rsp);
if (pdu->hdr.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF) {
if (pdu->hdr->common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF) {
hd_len = SPDK_NVME_TCP_DIGEST_LEN;
}
if (pdu->hdr.common.plen != (expected_hlen + hd_len)) {
if (pdu->hdr->common.plen != (expected_hlen + hd_len)) {
plen_error = true;
}
break;
case SPDK_NVME_TCP_PDU_TYPE_C2H_DATA:
expected_hlen = sizeof(struct spdk_nvme_tcp_c2h_data_hdr);
if (pdu->hdr.common.plen < pdu->hdr.common.pdo) {
if (pdu->hdr->common.plen < pdu->hdr->common.pdo) {
plen_error = true;
}
break;
case SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ:
expected_hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr);
if ((pdu->hdr.common.plen <= expected_hlen) ||
(pdu->hdr.common.plen > SPDK_NVME_TCP_TERM_REQ_PDU_MAX_SIZE)) {
if ((pdu->hdr->common.plen <= expected_hlen) ||
(pdu->hdr->common.plen > SPDK_NVME_TCP_TERM_REQ_PDU_MAX_SIZE)) {
plen_error = true;
}
break;
case SPDK_NVME_TCP_PDU_TYPE_R2T:
expected_hlen = sizeof(struct spdk_nvme_tcp_r2t_hdr);
if (pdu->hdr.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF) {
if (pdu->hdr->common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF) {
hd_len = SPDK_NVME_TCP_DIGEST_LEN;
}
if (pdu->hdr.common.plen != (expected_hlen + hd_len)) {
if (pdu->hdr->common.plen != (expected_hlen + hd_len)) {
plen_error = true;
}
break;
default:
SPDK_ERRLOG("Unexpected PDU type 0x%02x\n", tqpair->recv_pdu.hdr.common.pdu_type);
SPDK_ERRLOG("Unexpected PDU type 0x%02x\n", tqpair->recv_pdu.hdr->common.pdu_type);
fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, pdu_type);
goto err;
}
}
if (pdu->hdr.common.hlen != expected_hlen) {
if (pdu->hdr->common.hlen != expected_hlen) {
SPDK_ERRLOG("Expected PDU header length %u, got %u\n",
expected_hlen, pdu->hdr.common.hlen);
expected_hlen, pdu->hdr->common.hlen);
fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, hlen);
goto err;
@ -943,7 +946,7 @@ nvme_tcp_c2h_data_payload_handle(struct nvme_tcp_qpair *tqpair,
assert(tcp_req != NULL);
SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter\n");
c2h_data = &pdu->hdr.c2h_data;
c2h_data = &pdu->hdr->c2h_data;
tcp_req->datao += pdu->data_len;
flags = c2h_data->common.flags;
@ -990,7 +993,7 @@ static void
nvme_tcp_c2h_term_req_payload_handle(struct nvme_tcp_qpair *tqpair,
struct nvme_tcp_pdu *pdu)
{
nvme_tcp_c2h_term_req_dump(&pdu->hdr.term_req);
nvme_tcp_c2h_term_req_dump(&pdu->hdr->term_req);
nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
}
@ -1020,7 +1023,7 @@ nvme_tcp_pdu_payload_handle(struct nvme_tcp_qpair *tqpair,
}
}
switch (pdu->hdr.common.pdu_type) {
switch (pdu->hdr->common.pdu_type) {
case SPDK_NVME_TCP_PDU_TYPE_C2H_DATA:
nvme_tcp_c2h_data_payload_handle(tqpair, pdu, reaped);
break;
@ -1047,7 +1050,7 @@ static void
nvme_tcp_icresp_handle(struct nvme_tcp_qpair *tqpair,
struct nvme_tcp_pdu *pdu)
{
struct spdk_nvme_tcp_ic_resp *ic_resp = &pdu->hdr.ic_resp;
struct spdk_nvme_tcp_ic_resp *ic_resp = &pdu->hdr->ic_resp;
uint32_t error_offset = 0;
enum spdk_nvme_tcp_term_req_fes fes;
@ -1094,7 +1097,7 @@ nvme_tcp_capsule_resp_hdr_handle(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_
uint32_t *reaped)
{
struct nvme_tcp_req *tcp_req;
struct spdk_nvme_tcp_rsp *capsule_resp = &pdu->hdr.capsule_resp;
struct spdk_nvme_tcp_rsp *capsule_resp = &pdu->hdr->capsule_resp;
uint32_t cid, error_offset = 0;
enum spdk_nvme_tcp_term_req_fes fes;
struct spdk_nvme_cpl cpl;
@ -1135,7 +1138,7 @@ static void
nvme_tcp_c2h_term_req_hdr_handle(struct nvme_tcp_qpair *tqpair,
struct nvme_tcp_pdu *pdu)
{
struct spdk_nvme_tcp_term_req_hdr *c2h_term_req = &pdu->hdr.term_req;
struct spdk_nvme_tcp_term_req_hdr *c2h_term_req = &pdu->hdr->term_req;
uint32_t error_offset = 0;
enum spdk_nvme_tcp_term_req_fes fes;
@ -1147,7 +1150,7 @@ nvme_tcp_c2h_term_req_hdr_handle(struct nvme_tcp_qpair *tqpair,
}
/* set the data buffer */
nvme_tcp_pdu_set_data(pdu, (uint8_t *)pdu->hdr.raw + c2h_term_req->common.hlen,
nvme_tcp_pdu_set_data(pdu, (uint8_t *)pdu->hdr->raw + c2h_term_req->common.hlen,
c2h_term_req->common.plen - c2h_term_req->common.hlen);
nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
return;
@ -1160,7 +1163,7 @@ static void
nvme_tcp_c2h_data_hdr_handle(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu)
{
struct nvme_tcp_req *tcp_req;
struct spdk_nvme_tcp_c2h_data_hdr *c2h_data = &pdu->hdr.c2h_data;
struct spdk_nvme_tcp_c2h_data_hdr *c2h_data = &pdu->hdr->c2h_data;
uint32_t error_offset = 0;
enum spdk_nvme_tcp_term_req_fes fes;
@ -1241,7 +1244,8 @@ spdk_nvme_tcp_send_h2c_data(struct nvme_tcp_req *tcp_req)
rsp_pdu = &tcp_req->send_pdu;
memset(rsp_pdu, 0, sizeof(*rsp_pdu));
h2c_data = &rsp_pdu->hdr.h2c_data;
rsp_pdu->hdr = &rsp_pdu->hdr_mem;
h2c_data = &rsp_pdu->hdr->h2c_data;
h2c_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA;
plen = h2c_data->common.hlen = sizeof(*h2c_data);
@ -1292,7 +1296,7 @@ static void
nvme_tcp_r2t_hdr_handle(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu)
{
struct nvme_tcp_req *tcp_req;
struct spdk_nvme_tcp_r2t_hdr *r2t = &pdu->hdr.r2t;
struct spdk_nvme_tcp_r2t_hdr *r2t = &pdu->hdr->r2t;
uint32_t cid, error_offset = 0;
enum spdk_nvme_tcp_term_req_fes fes;
@ -1361,11 +1365,11 @@ nvme_tcp_pdu_psh_handle(struct nvme_tcp_qpair *tqpair, uint32_t *reaped)
assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH);
pdu = &tqpair->recv_pdu;
SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter: pdu type =%u\n", pdu->hdr.common.pdu_type);
SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter: pdu type =%u\n", pdu->hdr->common.pdu_type);
/* check header digest if needed */
if (pdu->has_hdgst) {
crc32c = nvme_tcp_pdu_calc_header_digest(pdu);
rc = MATCH_DIGEST_WORD((uint8_t *)pdu->hdr.raw + pdu->hdr.common.hlen, crc32c);
rc = MATCH_DIGEST_WORD((uint8_t *)pdu->hdr->raw + pdu->hdr->common.hlen, crc32c);
if (rc == 0) {
SPDK_ERRLOG("header digest error on tqpair=(%p) with pdu=%p\n", tqpair, pdu);
fes = SPDK_NVME_TCP_TERM_REQ_FES_HDGST_ERROR;
@ -1375,7 +1379,7 @@ nvme_tcp_pdu_psh_handle(struct nvme_tcp_qpair *tqpair, uint32_t *reaped)
}
}
switch (pdu->hdr.common.pdu_type) {
switch (pdu->hdr->common.pdu_type) {
case SPDK_NVME_TCP_PDU_TYPE_IC_RESP:
nvme_tcp_icresp_handle(tqpair, pdu);
break;
@ -1394,7 +1398,7 @@ nvme_tcp_pdu_psh_handle(struct nvme_tcp_qpair *tqpair, uint32_t *reaped)
break;
default:
SPDK_ERRLOG("Unexpected PDU type 0x%02x\n", tqpair->recv_pdu.hdr.common.pdu_type);
SPDK_ERRLOG("Unexpected PDU type 0x%02x\n", tqpair->recv_pdu.hdr->common.pdu_type);
fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
error_offset = 1;
nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
@ -1425,7 +1429,7 @@ nvme_tcp_read_pdu(struct nvme_tcp_qpair *tqpair, uint32_t *reaped)
if (pdu->ch_valid_bytes < sizeof(struct spdk_nvme_tcp_common_pdu_hdr)) {
rc = nvme_tcp_read_data(tqpair->sock,
sizeof(struct spdk_nvme_tcp_common_pdu_hdr) - pdu->ch_valid_bytes,
(uint8_t *)&pdu->hdr.common + pdu->ch_valid_bytes);
(uint8_t *)&pdu->hdr->common + pdu->ch_valid_bytes);
if (rc < 0) {
nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
break;
@ -1444,7 +1448,7 @@ nvme_tcp_read_pdu(struct nvme_tcp_qpair *tqpair, uint32_t *reaped)
pdu = &tqpair->recv_pdu;
rc = nvme_tcp_read_data(tqpair->sock,
pdu->psh_len - pdu->psh_valid_bytes,
(uint8_t *)&pdu->hdr.raw + sizeof(struct spdk_nvme_tcp_common_pdu_hdr) + pdu->psh_valid_bytes);
(uint8_t *)&pdu->hdr->raw + sizeof(struct spdk_nvme_tcp_common_pdu_hdr) + pdu->psh_valid_bytes);
if (rc < 0) {
nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
break;
@ -1467,7 +1471,7 @@ nvme_tcp_read_pdu(struct nvme_tcp_qpair *tqpair, uint32_t *reaped)
data_len = pdu->data_len;
/* data digest */
if (spdk_unlikely((pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_DATA) &&
if (spdk_unlikely((pdu->hdr->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_DATA) &&
tqpair->host_ddgst_enable)) {
data_len += SPDK_NVME_TCP_DIGEST_LEN;
pdu->ddgst_enable = true;
@ -1587,7 +1591,8 @@ nvme_tcp_qpair_icreq_send(struct nvme_tcp_qpair *tqpair)
pdu = &tqpair->send_pdu;
memset(&tqpair->send_pdu, 0, sizeof(tqpair->send_pdu));
ic_req = &pdu->hdr.ic_req;
pdu->hdr = &pdu->hdr_mem;
ic_req = &pdu->hdr->ic_req;
ic_req->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ;
ic_req->common.hlen = ic_req->common.plen = sizeof(*ic_req);
@ -1722,6 +1727,7 @@ nvme_tcp_ctrlr_create_qpair(struct spdk_nvme_ctrlr *ctrlr,
tqpair->num_entries = qsize;
qpair = &tqpair->qpair;
tqpair->recv_pdu.hdr = &tqpair->recv_pdu.hdr_mem;
rc = nvme_qpair_init(qpair, qid, ctrlr, qprio, num_requests);
if (rc != 0) {

View File

@ -328,6 +328,7 @@ spdk_nvmf_tcp_pdu_get(struct spdk_nvmf_tcp_qpair *tqpair)
TAILQ_REMOVE(&tqpair->free_queue, pdu, tailq);
memset(pdu, 0, sizeof(*pdu));
pdu->ref = 1;
pdu->hdr = &pdu->hdr_mem;
return pdu;
}
@ -418,7 +419,7 @@ spdk_nvmf_tcp_cleanup_all_states(struct spdk_nvmf_tcp_qpair *tqpair)
TAILQ_FOREACH_SAFE(pdu, &tqpair->send_queue, tailq, tmp_pdu) {
TAILQ_REMOVE(&tqpair->send_queue, pdu, tailq);
/* Also check the pdu type, we need to calculte the c2h_data_pdu_cnt later */
if (pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_DATA) {
if (pdu->hdr->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_DATA) {
assert(tqpair->c2h_data_pdu_cnt > 0);
tqpair->c2h_data_pdu_cnt--;
}
@ -826,7 +827,7 @@ spdk_nvmf_tcp_qpair_flush_pdus_internal(struct spdk_nvmf_tcp_qpair *tqpair)
*/
TAILQ_INIT(&completed_pdus_list);
while (bytes > 0) {
pdu_length = pdu->hdr.common.plen - pdu->writev_offset;
pdu_length = pdu->hdr->common.plen - pdu->writev_offset;
if (bytes >= pdu_length) {
bytes -= pdu_length;
TAILQ_REMOVE(&tqpair->send_queue, pdu, tailq);
@ -898,10 +899,10 @@ spdk_nvmf_tcp_qpair_write_pdu(struct spdk_nvmf_tcp_qpair *tqpair,
int hlen;
uint32_t crc32c;
hlen = pdu->hdr.common.hlen;
hlen = pdu->hdr->common.hlen;
enable_digest = 1;
if (pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP ||
pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ) {
if (pdu->hdr->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP ||
pdu->hdr->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ) {
/* this PDU should be sent without digest */
enable_digest = 0;
}
@ -909,7 +910,7 @@ spdk_nvmf_tcp_qpair_write_pdu(struct spdk_nvmf_tcp_qpair *tqpair,
/* Header Digest */
if (enable_digest && tqpair->host_hdgst_enable) {
crc32c = nvme_tcp_pdu_calc_header_digest(pdu);
MAKE_DIGEST_WORD((uint8_t *)pdu->hdr.raw + hlen, crc32c);
MAKE_DIGEST_WORD((uint8_t *)pdu->hdr->raw + hlen, crc32c);
}
/* Data Digest */
@ -1053,6 +1054,7 @@ spdk_nvmf_tcp_qpair_init(struct spdk_nvmf_qpair *qpair)
tqpair->host_hdgst_enable = true;
tqpair->host_ddgst_enable = true;
tqpair->pdu_in_progress.hdr = &tqpair->pdu_in_progress.hdr_mem;
return 0;
}
@ -1256,6 +1258,7 @@ spdk_nvmf_tcp_qpair_set_recv_state(struct spdk_nvmf_tcp_qpair *tqpair,
case NVME_TCP_PDU_RECV_STATE_ERROR:
case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY:
memset(&tqpair->pdu_in_progress, 0, sizeof(tqpair->pdu_in_progress));
tqpair->pdu_in_progress.hdr = &tqpair->pdu_in_progress.hdr_mem;
break;
default:
SPDK_ERRLOG("The state(%d) is invalid\n", state);
@ -1308,7 +1311,7 @@ spdk_nvmf_tcp_send_c2h_term_req(struct spdk_nvmf_tcp_qpair *tqpair, struct nvme_
return;
}
c2h_term_req = &rsp_pdu->hdr.term_req;
c2h_term_req = &rsp_pdu->hdr->term_req;
c2h_term_req->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ;
c2h_term_req->common.hlen = c2h_term_req_hdr_len;
@ -1317,14 +1320,14 @@ spdk_nvmf_tcp_send_c2h_term_req(struct spdk_nvmf_tcp_qpair *tqpair, struct nvme_
DSET32(&c2h_term_req->fei, error_offset);
}
copy_len = pdu->hdr.common.hlen;
copy_len = pdu->hdr->common.hlen;
if (copy_len > SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE) {
copy_len = SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE;
}
/* Copy the error info into the buffer */
memcpy((uint8_t *)rsp_pdu->hdr.raw + c2h_term_req_hdr_len, pdu->hdr.raw, copy_len);
nvme_tcp_pdu_set_data(rsp_pdu, (uint8_t *)rsp_pdu->hdr.raw + c2h_term_req_hdr_len, copy_len);
memcpy((uint8_t *)rsp_pdu->hdr->raw + c2h_term_req_hdr_len, pdu->hdr->raw, copy_len);
nvme_tcp_pdu_set_data(rsp_pdu, (uint8_t *)rsp_pdu->hdr->raw + c2h_term_req_hdr_len, copy_len);
/* Contain the header of the wrong received pdu */
c2h_term_req->common.plen = c2h_term_req->common.hlen + copy_len;
@ -1363,7 +1366,7 @@ spdk_nvmf_tcp_capsule_cmd_payload_handle(struct spdk_nvmf_tcp_transport *ttransp
uint32_t error_offset = 0;
enum spdk_nvme_tcp_term_req_fes fes;
capsule_cmd = &pdu->hdr.capsule_cmd;
capsule_cmd = &pdu->hdr->capsule_cmd;
tcp_req = pdu->ctx;
assert(tcp_req != NULL);
if (capsule_cmd->common.pdo > SPDK_NVME_TCP_PDU_PDO_MAX_OFFSET) {
@ -1394,7 +1397,7 @@ spdk_nvmf_tcp_h2c_data_hdr_handle(struct spdk_nvmf_tcp_transport *ttransport,
struct spdk_nvme_tcp_h2c_data_hdr *h2c_data;
bool ttag_offset_error = false;
h2c_data = &pdu->hdr.h2c_data;
h2c_data = &pdu->hdr->h2c_data;
SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "tqpair=%p, r2t_info: datao=%u, datal=%u, cccid=%u, ttag=%u\n",
tqpair, h2c_data->datao, h2c_data->datal, h2c_data->cccid, h2c_data->ttag);
@ -1482,7 +1485,7 @@ spdk_nvmf_tcp_send_capsule_resp_pdu(struct spdk_nvmf_tcp_req *tcp_req,
return;
}
capsule_resp = &rsp_pdu->hdr.capsule_resp;
capsule_resp = &rsp_pdu->hdr->capsule_resp;
capsule_resp->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP;
capsule_resp->common.plen = capsule_resp->common.hlen = sizeof(*capsule_resp);
capsule_resp->rccqe = tcp_req->req.rsp->nvme_cpl;
@ -1530,7 +1533,7 @@ spdk_nvmf_tcp_send_r2t_pdu(struct spdk_nvmf_tcp_qpair *tqpair,
return;
}
r2t = &rsp_pdu->hdr.r2t;
r2t = &rsp_pdu->hdr->r2t;
r2t->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_R2T;
r2t->common.plen = r2t->common.hlen = sizeof(*r2t);
@ -1594,7 +1597,7 @@ static void
spdk_nvmf_tcp_h2c_term_req_hdr_handle(struct spdk_nvmf_tcp_qpair *tqpair,
struct nvme_tcp_pdu *pdu)
{
struct spdk_nvme_tcp_term_req_hdr *h2c_term_req = &pdu->hdr.term_req;
struct spdk_nvme_tcp_term_req_hdr *h2c_term_req = &pdu->hdr->term_req;
uint32_t error_offset = 0;
enum spdk_nvme_tcp_term_req_fes fes;
@ -1607,7 +1610,7 @@ spdk_nvmf_tcp_h2c_term_req_hdr_handle(struct spdk_nvmf_tcp_qpair *tqpair,
}
/* set the data buffer */
nvme_tcp_pdu_set_data(pdu, (uint8_t *)pdu->hdr.raw + h2c_term_req->common.hlen,
nvme_tcp_pdu_set_data(pdu, (uint8_t *)pdu->hdr->raw + h2c_term_req->common.hlen,
h2c_term_req->common.plen - h2c_term_req->common.hlen);
spdk_nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
return;
@ -1620,7 +1623,7 @@ static void
spdk_nvmf_tcp_h2c_term_req_payload_handle(struct spdk_nvmf_tcp_qpair *tqpair,
struct nvme_tcp_pdu *pdu)
{
struct spdk_nvme_tcp_term_req_hdr *h2c_term_req = &pdu->hdr.term_req;
struct spdk_nvme_tcp_term_req_hdr *h2c_term_req = &pdu->hdr->term_req;
spdk_nvmf_tcp_h2c_term_req_dump(h2c_term_req);
spdk_nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
@ -1654,7 +1657,7 @@ spdk_nvmf_tcp_pdu_payload_handle(struct spdk_nvmf_tcp_qpair *tqpair)
}
ttransport = SPDK_CONTAINEROF(tqpair->qpair.transport, struct spdk_nvmf_tcp_transport, transport);
switch (pdu->hdr.common.pdu_type) {
switch (pdu->hdr->common.pdu_type) {
case SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD:
spdk_nvmf_tcp_capsule_cmd_payload_handle(ttransport, tqpair, pdu);
break;
@ -1686,7 +1689,7 @@ spdk_nvmf_tcp_icreq_handle(struct spdk_nvmf_tcp_transport *ttransport,
struct spdk_nvmf_tcp_qpair *tqpair,
struct nvme_tcp_pdu *pdu)
{
struct spdk_nvme_tcp_ic_req *ic_req = &pdu->hdr.ic_req;
struct spdk_nvme_tcp_ic_req *ic_req = &pdu->hdr->ic_req;
struct nvme_tcp_pdu *rsp_pdu;
struct spdk_nvme_tcp_ic_resp *ic_resp;
uint32_t error_offset = 0;
@ -1716,7 +1719,7 @@ spdk_nvmf_tcp_icreq_handle(struct spdk_nvmf_tcp_transport *ttransport,
return;
}
ic_resp = &rsp_pdu->hdr.ic_resp;
ic_resp = &rsp_pdu->hdr->ic_resp;
ic_resp->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
ic_resp->common.hlen = ic_resp->common.plen = sizeof(*ic_resp);
ic_resp->pfv = 0;
@ -1751,12 +1754,12 @@ spdk_nvmf_tcp_pdu_psh_handle(struct spdk_nvmf_tcp_qpair *tqpair)
pdu = &tqpair->pdu_in_progress;
SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "pdu type of tqpair(%p) is %d\n", tqpair,
pdu->hdr.common.pdu_type);
pdu->hdr->common.pdu_type);
/* check header digest if needed */
if (pdu->has_hdgst) {
SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "Compare the header of pdu=%p on tqpair=%p\n", pdu, tqpair);
crc32c = nvme_tcp_pdu_calc_header_digest(pdu);
rc = MATCH_DIGEST_WORD((uint8_t *)pdu->hdr.raw + pdu->hdr.common.hlen, crc32c);
rc = MATCH_DIGEST_WORD((uint8_t *)pdu->hdr->raw + pdu->hdr->common.hlen, crc32c);
if (rc == 0) {
SPDK_ERRLOG("Header digest error on tqpair=(%p) with pdu=%p\n", tqpair, pdu);
fes = SPDK_NVME_TCP_TERM_REQ_FES_HDGST_ERROR;
@ -1767,7 +1770,7 @@ spdk_nvmf_tcp_pdu_psh_handle(struct spdk_nvmf_tcp_qpair *tqpair)
}
ttransport = SPDK_CONTAINEROF(tqpair->qpair.transport, struct spdk_nvmf_tcp_transport, transport);
switch (pdu->hdr.common.pdu_type) {
switch (pdu->hdr->common.pdu_type) {
case SPDK_NVME_TCP_PDU_TYPE_IC_REQ:
spdk_nvmf_tcp_icreq_handle(ttransport, tqpair, pdu);
break;
@ -1783,7 +1786,7 @@ spdk_nvmf_tcp_pdu_psh_handle(struct spdk_nvmf_tcp_qpair *tqpair)
break;
default:
SPDK_ERRLOG("Unexpected PDU type 0x%02x\n", tqpair->pdu_in_progress.hdr.common.pdu_type);
SPDK_ERRLOG("Unexpected PDU type 0x%02x\n", tqpair->pdu_in_progress.hdr->common.pdu_type);
fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
error_offset = 1;
spdk_nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset);
@ -1803,14 +1806,14 @@ spdk_nvmf_tcp_pdu_ch_handle(struct spdk_nvmf_tcp_qpair *tqpair)
assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH);
pdu = &tqpair->pdu_in_progress;
if (pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_REQ) {
if (pdu->hdr->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_REQ) {
if (tqpair->state != NVME_TCP_QPAIR_STATE_INVALID) {
SPDK_ERRLOG("Already received ICreq PDU, and reject this pdu=%p\n", pdu);
fes = SPDK_NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR;
goto err;
}
expected_hlen = sizeof(struct spdk_nvme_tcp_ic_req);
if (pdu->hdr.common.plen != expected_hlen) {
if (pdu->hdr->common.plen != expected_hlen) {
plen_error = true;
}
} else {
@ -1820,51 +1823,51 @@ spdk_nvmf_tcp_pdu_ch_handle(struct spdk_nvmf_tcp_qpair *tqpair)
goto err;
}
switch (pdu->hdr.common.pdu_type) {
switch (pdu->hdr->common.pdu_type) {
case SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD:
expected_hlen = sizeof(struct spdk_nvme_tcp_cmd);
pdo = pdu->hdr.common.pdo;
pdo = pdu->hdr->common.pdo;
if ((tqpair->cpda != 0) && (pdo != ((tqpair->cpda + 1) << 2))) {
pdo_error = true;
break;
}
if (pdu->hdr.common.plen < expected_hlen) {
if (pdu->hdr->common.plen < expected_hlen) {
plen_error = true;
}
break;
case SPDK_NVME_TCP_PDU_TYPE_H2C_DATA:
expected_hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr);
pdo = pdu->hdr.common.pdo;
pdo = pdu->hdr->common.pdo;
if ((tqpair->cpda != 0) && (pdo != ((tqpair->cpda + 1) << 2))) {
pdo_error = true;
break;
}
if (pdu->hdr.common.plen < expected_hlen) {
if (pdu->hdr->common.plen < expected_hlen) {
plen_error = true;
}
break;
case SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ:
expected_hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr);
if ((pdu->hdr.common.plen <= expected_hlen) ||
(pdu->hdr.common.plen > SPDK_NVME_TCP_TERM_REQ_PDU_MAX_SIZE)) {
if ((pdu->hdr->common.plen <= expected_hlen) ||
(pdu->hdr->common.plen > SPDK_NVME_TCP_TERM_REQ_PDU_MAX_SIZE)) {
plen_error = true;
}
break;
default:
SPDK_ERRLOG("Unexpected PDU type 0x%02x\n", pdu->hdr.common.pdu_type);
SPDK_ERRLOG("Unexpected PDU type 0x%02x\n", pdu->hdr->common.pdu_type);
fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, pdu_type);
goto err;
}
}
if (pdu->hdr.common.hlen != expected_hlen) {
if (pdu->hdr->common.hlen != expected_hlen) {
SPDK_ERRLOG("PDU type=0x%02x, Expected ICReq header length %u, got %u on tqpair=%p\n",
pdu->hdr.common.pdu_type,
expected_hlen, pdu->hdr.common.hlen, tqpair);
pdu->hdr->common.pdu_type,
expected_hlen, pdu->hdr->common.hlen, tqpair);
fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, hlen);
goto err;
@ -1921,7 +1924,7 @@ spdk_nvmf_tcp_sock_process(struct spdk_nvmf_tcp_qpair *tqpair)
case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH:
rc = nvme_tcp_read_data(tqpair->sock,
sizeof(struct spdk_nvme_tcp_common_pdu_hdr) - pdu->ch_valid_bytes,
(void *)&pdu->hdr.common + pdu->ch_valid_bytes);
(void *)&pdu->hdr->common + pdu->ch_valid_bytes);
if (rc < 0) {
SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "will disconnect tqpair=%p\n", tqpair);
return NVME_TCP_PDU_FATAL;
@ -1944,7 +1947,7 @@ spdk_nvmf_tcp_sock_process(struct spdk_nvmf_tcp_qpair *tqpair)
case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH:
rc = nvme_tcp_read_data(tqpair->sock,
pdu->psh_len - pdu->psh_valid_bytes,
(void *)&pdu->hdr.raw + sizeof(struct spdk_nvme_tcp_common_pdu_hdr) + pdu->psh_valid_bytes);
(void *)&pdu->hdr->raw + sizeof(struct spdk_nvme_tcp_common_pdu_hdr) + pdu->psh_valid_bytes);
if (rc < 0) {
return NVME_TCP_PDU_FATAL;
} else if (rc > 0) {
@ -1970,7 +1973,7 @@ spdk_nvmf_tcp_sock_process(struct spdk_nvmf_tcp_qpair *tqpair)
data_len = pdu->data_len;
/* data digest */
if (spdk_unlikely((pdu->hdr.common.pdu_type != SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ) &&
if (spdk_unlikely((pdu->hdr->common.pdu_type != SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ) &&
tqpair->host_ddgst_enable)) {
data_len += SPDK_NVME_TCP_DIGEST_LEN;
pdu->ddgst_enable = true;
@ -1999,7 +2002,7 @@ spdk_nvmf_tcp_sock_process(struct spdk_nvmf_tcp_qpair *tqpair)
break;
case NVME_TCP_PDU_RECV_STATE_ERROR:
/* Check whether the connection is closed. Each time, we only read 1 byte every time */
rc = nvme_tcp_read_data(tqpair->sock, 1, (void *)&pdu->hdr.common);
rc = nvme_tcp_read_data(tqpair->sock, 1, (void *)&pdu->hdr->common);
if (rc < 0) {
return NVME_TCP_PDU_FATAL;
}
@ -2295,7 +2298,7 @@ spdk_nvmf_tcp_send_c2h_data(struct spdk_nvmf_tcp_qpair *tqpair,
rsp_pdu = spdk_nvmf_tcp_pdu_get(tqpair);
assert(rsp_pdu != NULL);
c2h_data = &rsp_pdu->hdr.c2h_data;
c2h_data = &rsp_pdu->hdr->c2h_data;
c2h_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_DATA;
plen = c2h_data->common.hlen = sizeof(*c2h_data);
@ -2460,13 +2463,13 @@ spdk_nvmf_tcp_set_incapsule_data(struct spdk_nvmf_tcp_qpair *tqpair,
uint32_t plen = 0;
pdu = &tqpair->pdu_in_progress;
plen = pdu->hdr.common.hlen;
plen = pdu->hdr->common.hlen;
if (tqpair->host_hdgst_enable) {
plen += SPDK_NVME_TCP_DIGEST_LEN;
}
if (pdu->hdr.common.plen != plen) {
if (pdu->hdr->common.plen != plen) {
tcp_req->has_incapsule_data = true;
}
}
@ -2502,7 +2505,7 @@ spdk_nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
spdk_trace_record(TRACE_TCP_REQUEST_STATE_NEW, 0, 0, (uintptr_t)tcp_req, 0);
/* copy the cmd from the receive pdu */
tcp_req->cmd = tqpair->pdu_in_progress.hdr.capsule_cmd.ccsqe;
tcp_req->cmd = tqpair->pdu_in_progress.hdr->capsule_cmd.ccsqe;
if (spdk_unlikely(spdk_nvmf_request_get_dif_ctx(&tcp_req->req, &tcp_req->dif_ctx))) {
tcp_req->dif_insert_or_strip = true;

View File

@ -59,6 +59,7 @@ test_nvme_tcp_pdu_set_data_buf(void)
uint32_t data_len;
uint64_t i;
pdu.hdr = &pdu.hdr_mem;
/* 1st case: input is a single SGL entry. */
iov[0].iov_base = (void *)0xDEADBEEF;
iov[0].iov_len = 4096;
@ -126,9 +127,10 @@ test_nvme_tcp_build_iovs(void)
uint32_t mapped_length = 0;
int rc;
pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
pdu.hdr.common.plen = pdu.hdr.common.hlen + SPDK_NVME_TCP_DIGEST_LEN + 4096 * 2 +
pdu.hdr = &pdu.hdr_mem;
pdu.hdr->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
pdu.hdr->common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
pdu.hdr->common.plen = pdu.hdr->common.hlen + SPDK_NVME_TCP_DIGEST_LEN + 4096 * 2 +
SPDK_NVME_TCP_DIGEST_LEN;
pdu.data_len = 4096 * 2;
pdu.padding_len = 0;
@ -139,7 +141,7 @@ test_nvme_tcp_build_iovs(void)
rc = nvme_tcp_build_iovs(iovs, 4, &pdu, true, true, &mapped_length);
CU_ASSERT(rc == 3);
CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr.raw);
CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr->raw);
CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN);
CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF);
CU_ASSERT(iovs[1].iov_len == 4096 * 2);
@ -175,7 +177,7 @@ test_nvme_tcp_build_iovs(void)
rc = nvme_tcp_build_iovs(iovs, 2, &pdu, true, true, &mapped_length);
CU_ASSERT(rc == 2);
CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr.raw);
CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr->raw);
CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN);
CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF);
CU_ASSERT(iovs[1].iov_len == 4096 * 2);
@ -299,6 +301,7 @@ test_nvme_tcp_pdu_set_data_buf_with_md(void)
struct spdk_dif_ctx dif_ctx = {};
int rc;
pdu.hdr = &pdu.hdr_mem;
pdu.dif_ctx = &dif_ctx;
rc = spdk_dif_ctx_init(&dif_ctx, 520, 8, true, false, SPDK_DIF_DISABLE, 0,
@ -395,15 +398,16 @@ test_nvme_tcp_build_iovs_with_md(void)
uint32_t mapped_length = 0;
int rc;
pdu.hdr = &pdu.hdr_mem;
rc = spdk_dif_ctx_init(&dif_ctx, 520, 8, true, false, SPDK_DIF_DISABLE, 0,
0, 0, 0, 0, 0);
CU_ASSERT(rc == 0);
pdu.dif_ctx = &dif_ctx;
pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
pdu.hdr.common.plen = pdu.hdr.common.hlen + SPDK_NVME_TCP_DIGEST_LEN + 512 * 8 +
pdu.hdr->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
pdu.hdr->common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
pdu.hdr->common.plen = pdu.hdr->common.hlen + SPDK_NVME_TCP_DIGEST_LEN + 512 * 8 +
SPDK_NVME_TCP_DIGEST_LEN;
pdu.data_len = 512 * 8;
pdu.padding_len = 0;
@ -416,7 +420,7 @@ test_nvme_tcp_build_iovs_with_md(void)
rc = nvme_tcp_build_iovs(iovs, 11, &pdu, true, true, &mapped_length);
CU_ASSERT(rc == 10);
CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr.raw);
CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr->raw);
CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN);
CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF);
CU_ASSERT(iovs[1].iov_len == 512);

View File

@ -406,6 +406,7 @@ test_nvmf_tcp_send_c2h_data(void)
struct nvme_tcp_pdu pdu = {};
struct spdk_nvme_tcp_c2h_data_hdr *c2h_data;
pdu.hdr = &pdu.hdr_mem;
thread = spdk_thread_create(NULL, NULL);
SPDK_CU_ASSERT_FATAL(thread != NULL);
spdk_set_thread(thread);
@ -447,7 +448,7 @@ test_nvmf_tcp_send_c2h_data(void)
TAILQ_INSERT_TAIL(&tqpair.free_queue, &pdu, tailq);
tqpair.free_pdu_num++;
c2h_data = &pdu.hdr.c2h_data;
c2h_data = &pdu.hdr->c2h_data;
CU_ASSERT(c2h_data->datao == NVMF_TCP_PDU_MAX_C2H_DATA_SIZE / 2);
CU_ASSERT(c2h_data->datal = NVMF_TCP_PDU_MAX_C2H_DATA_SIZE);
CU_ASSERT(c2h_data->common.plen == sizeof(*c2h_data) + NVMF_TCP_PDU_MAX_C2H_DATA_SIZE);
@ -470,7 +471,7 @@ test_nvmf_tcp_send_c2h_data(void)
TAILQ_INSERT_TAIL(&tqpair.free_queue, &pdu, tailq);
tqpair.free_pdu_num++;
c2h_data = &pdu.hdr.c2h_data;
c2h_data = &pdu.hdr->c2h_data;
CU_ASSERT(c2h_data->datao == (NVMF_TCP_PDU_MAX_C2H_DATA_SIZE / 2) * 3);
CU_ASSERT(c2h_data->datal = NVMF_TCP_PDU_MAX_C2H_DATA_SIZE);
CU_ASSERT(c2h_data->common.plen == sizeof(*c2h_data) + NVMF_TCP_PDU_MAX_C2H_DATA_SIZE);
@ -492,7 +493,7 @@ test_nvmf_tcp_send_c2h_data(void)
TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
CU_ASSERT(TAILQ_EMPTY(&tqpair.send_queue));
c2h_data = &pdu.hdr.c2h_data;
c2h_data = &pdu.hdr->c2h_data;
CU_ASSERT(c2h_data->datao == (NVMF_TCP_PDU_MAX_C2H_DATA_SIZE / 2) * 5);
CU_ASSERT(c2h_data->datal = NVMF_TCP_PDU_MAX_C2H_DATA_SIZE / 2);
CU_ASSERT(c2h_data->common.plen == sizeof(*c2h_data) + NVMF_TCP_PDU_MAX_C2H_DATA_SIZE / 2);
@ -521,6 +522,7 @@ test_nvmf_tcp_h2c_data_hdr_handle(void)
struct spdk_nvmf_tcp_req tcp_req = {};
struct spdk_nvme_tcp_h2c_data_hdr *h2c_data;
pdu.hdr = &pdu.hdr_mem;
TAILQ_INIT(&tqpair.state_queue[TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER]);
tqpair.maxh2cdata = NVMF_TCP_PDU_MAX_H2C_DATA_SIZE;
@ -544,7 +546,7 @@ test_nvmf_tcp_h2c_data_hdr_handle(void)
TAILQ_INSERT_TAIL(&tqpair.state_queue[TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER],
&tcp_req, state_link);
h2c_data = &pdu.hdr.h2c_data;
h2c_data = &pdu.hdr->h2c_data;
h2c_data->cccid = 1;
h2c_data->ttag = 2;
h2c_data->datao = NVMF_TCP_PDU_MAX_H2C_DATA_SIZE * 2;