nvme TCP: Make the control related pdu also allocated from the SPDK DMA memory

Purpose: To make the pdu management consistent with other PDUs, then
we can easily adapt our code into some hardware offloading solution.

Signed-off-by: Ziye Yang <ziye.yang@intel.com>
Change-Id: Ic4a2847fd1b6cacda4cbaa52ff12c338f0394805
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/3588
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Ziye Yang 2020-07-30 19:43:51 +08:00 committed by Jim Harris
parent cca62c633f
commit 7bac9b06f1
2 changed files with 16 additions and 11 deletions

View File

@ -81,7 +81,7 @@ struct nvme_tcp_qpair {
TAILQ_HEAD(, nvme_tcp_pdu) send_queue;
struct nvme_tcp_pdu recv_pdu;
struct nvme_tcp_pdu send_pdu; /* only for error pdu and init pdu */
struct nvme_tcp_pdu *send_pdu; /* only for error pdu and init pdu */
struct nvme_tcp_pdu *send_pdus; /* Used by tcp_reqs */
enum nvme_tcp_pdu_recv_state recv_state;
@ -241,7 +241,8 @@ nvme_tcp_alloc_reqs(struct nvme_tcp_qpair *tqpair)
goto fail;
}
tqpair->send_pdus = spdk_zmalloc(tqpair->num_entries * sizeof(struct nvme_tcp_pdu),
/* Add additional one member for the send_pdu owned by the tqpair */
tqpair->send_pdus = spdk_zmalloc((tqpair->num_entries + 1) * sizeof(struct nvme_tcp_pdu),
0x1000, NULL,
SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
@ -261,6 +262,8 @@ nvme_tcp_alloc_reqs(struct nvme_tcp_qpair *tqpair)
TAILQ_INSERT_TAIL(&tqpair->free_reqs, tcp_req, link);
}
tqpair->send_pdu = &tqpair->send_pdus[i];
return 0;
fail:
nvme_tcp_free_reqs(tqpair);
@ -688,7 +691,7 @@ nvme_tcp_qpair_send_h2c_term_req(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_
uint32_t h2c_term_req_hdr_len = sizeof(*h2c_term_req);
uint8_t copy_len;
rsp_pdu = &tqpair->send_pdu;
rsp_pdu = tqpair->send_pdu;
memset(rsp_pdu, 0, sizeof(*rsp_pdu));
h2c_term_req = &rsp_pdu->hdr.term_req;
h2c_term_req->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ;
@ -1539,8 +1542,8 @@ nvme_tcp_qpair_icreq_send(struct nvme_tcp_qpair *tqpair)
uint64_t icreq_timeout_tsc;
int rc;
pdu = &tqpair->send_pdu;
memset(&tqpair->send_pdu, 0, sizeof(tqpair->send_pdu));
pdu = tqpair->send_pdu;
memset(tqpair->send_pdu, 0, sizeof(*tqpair->send_pdu));
ic_req = &pdu->hdr.ic_req;
ic_req->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ;

View File

@ -215,7 +215,7 @@ struct spdk_nvmf_tcp_qpair {
/* This is a spare PDU used for sending special management
* operations. Primarily, this is used for the initial
* connection response and c2h termination request. */
struct nvme_tcp_pdu mgmt_pdu;
struct nvme_tcp_pdu *mgmt_pdu;
TAILQ_HEAD(, nvme_tcp_pdu) send_queue;
@ -779,8 +779,6 @@ nvmf_tcp_qpair_init_mem_resource(struct spdk_nvmf_tcp_qpair *tqpair)
tqpair->resource_count = opts->max_queue_depth;
tqpair->mgmt_pdu.qpair = tqpair;
tqpair->reqs = calloc(tqpair->resource_count, sizeof(*tqpair->reqs));
if (!tqpair->reqs) {
SPDK_ERRLOG("Unable to allocate reqs on tqpair=%p\n", tqpair);
@ -797,7 +795,8 @@ nvmf_tcp_qpair_init_mem_resource(struct spdk_nvmf_tcp_qpair *tqpair)
}
}
tqpair->pdus = spdk_dma_malloc(tqpair->resource_count * sizeof(*tqpair->pdus), 0x1000, NULL);
/* Add addtional one member, which will be used for mgmt_pdu owned by the tqpair */
tqpair->pdus = spdk_dma_malloc((tqpair->resource_count + 1) * sizeof(*tqpair->pdus), 0x1000, NULL);
if (!tqpair->pdus) {
SPDK_ERRLOG("Unable to allocate pdu pool on tqpair =%p.\n", tqpair);
return -1;
@ -827,6 +826,9 @@ nvmf_tcp_qpair_init_mem_resource(struct spdk_nvmf_tcp_qpair *tqpair)
tqpair->state_cntr[TCP_REQUEST_STATE_FREE]++;
}
tqpair->mgmt_pdu = &tqpair->pdus[i];
tqpair->mgmt_pdu->qpair = tqpair;
tqpair->recv_buf_size = (in_capsule_data_size + sizeof(struct spdk_nvme_tcp_cmd) + 2 *
SPDK_NVME_TCP_DIGEST_LEN) * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR;
@ -1082,7 +1084,7 @@ nvmf_tcp_send_c2h_term_req(struct spdk_nvmf_tcp_qpair *tqpair, struct nvme_tcp_p
uint32_t c2h_term_req_hdr_len = sizeof(*c2h_term_req);
uint32_t copy_len;
rsp_pdu = &tqpair->mgmt_pdu;
rsp_pdu = tqpair->mgmt_pdu;
c2h_term_req = &rsp_pdu->hdr.term_req;
c2h_term_req->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ;
@ -1521,7 +1523,7 @@ nvmf_tcp_icreq_handle(struct spdk_nvmf_tcp_transport *ttransport,
tqpair->cpda = spdk_min(ic_req->hpda, SPDK_NVME_TCP_CPDA_MAX);
SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "cpda of tqpair=(%p) is : %u\n", tqpair, tqpair->cpda);
rsp_pdu = &tqpair->mgmt_pdu;
rsp_pdu = tqpair->mgmt_pdu;
ic_resp = &rsp_pdu->hdr.ic_resp;
ic_resp->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;