nvmf/tcp: Move the accel_engine into the poll group.

With this change, each polling group will use one
accel_engine channel. This change will be more suitable
to utlize the underlying accelerated device.

Signed-off-by: Ziye Yang <ziye.yang@intel.com>
Change-Id: Ibab183a1f65baff7e58529ee05e96b1b04731285
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/7055
Community-CI: Broadcom CI
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
This commit is contained in:
Ziye Yang 2021-03-26 01:11:48 +08:00 committed by Tomasz Zawadzki
parent 85d48c64a9
commit f0956d33fb
2 changed files with 42 additions and 24 deletions

View File

@ -252,7 +252,6 @@ struct spdk_nvmf_tcp_qpair {
*/
struct spdk_poller *timeout_poller;
struct spdk_io_channel *accel_channel;
TAILQ_ENTRY(spdk_nvmf_tcp_qpair) link;
};
@ -273,6 +272,7 @@ struct spdk_nvmf_tcp_poll_group {
TAILQ_HEAD(, spdk_nvmf_tcp_qpair) qpairs;
TAILQ_HEAD(, spdk_nvmf_tcp_qpair) await_req;
struct spdk_io_channel *accel_channel;
struct spdk_nvmf_tcp_control_msg_list *control_msg_list;
};
@ -484,9 +484,6 @@ nvmf_tcp_qpair_destroy(struct spdk_nvmf_tcp_qpair *tqpair)
nvmf_tcp_dump_qpair_req_contents(tqpair);
}
if (tqpair->accel_channel) {
spdk_put_io_channel(tqpair->accel_channel);
}
spdk_dma_free(tqpair->pdus);
free(tqpair->reqs);
spdk_free(tqpair->bufs);
@ -861,8 +858,9 @@ pdu_data_crc32_compute(struct nvme_tcp_pdu *pdu)
/* Data Digest */
if (pdu->data_len > 0 && g_nvme_tcp_ddgst[pdu->hdr.common.pdu_type] && tqpair->host_ddgst_enable) {
/* Only suport this limitated case for the first step */
if (spdk_likely(!pdu->dif_ctx && (pdu->data_len % SPDK_NVME_TCP_DIGEST_ALIGNMENT == 0))) {
spdk_accel_submit_crc32cv(tqpair->accel_channel, &pdu->data_digest_crc32,
if (spdk_likely(!pdu->dif_ctx && (pdu->data_len % SPDK_NVME_TCP_DIGEST_ALIGNMENT == 0)
&& tqpair->group)) {
spdk_accel_submit_crc32cv(tqpair->group->accel_channel, &pdu->data_digest_crc32,
pdu->data_iov, pdu->data_iovcnt, 0, data_crc32_accel_done, pdu);
return;
}
@ -909,8 +907,8 @@ nvmf_tcp_qpair_write_pdu(struct spdk_nvmf_tcp_qpair *tqpair,
pdu->iov[0].iov_len = hlen;
/* Header Digest */
if (g_nvme_tcp_hdgst[pdu->hdr.common.pdu_type] && tqpair->host_hdgst_enable) {
spdk_accel_submit_crc32cv(tqpair->accel_channel, &pdu->header_digest_crc32,
if (g_nvme_tcp_hdgst[pdu->hdr.common.pdu_type] && tqpair->host_hdgst_enable && tqpair->group) {
spdk_accel_submit_crc32cv(tqpair->group->accel_channel, &pdu->header_digest_crc32,
pdu->iov, 1, 0, header_crc32_accel_done, pdu);
return;
}
@ -1184,6 +1182,12 @@ nvmf_tcp_poll_group_create(struct spdk_nvmf_transport *transport)
}
}
tgroup->accel_channel = spdk_accel_engine_get_io_channel();
if (spdk_unlikely(!tgroup->accel_channel)) {
SPDK_ERRLOG("Cannot create accel_channel for tgroup=%p\n", tgroup);
goto cleanup;
}
return &tgroup->group;
cleanup:
@ -1218,6 +1222,10 @@ nvmf_tcp_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
nvmf_tcp_control_msg_list_free(tgroup->control_msg_list);
}
if (tgroup->accel_channel) {
spdk_put_io_channel(tgroup->accel_channel);
}
free(tgroup);
}
@ -1737,17 +1745,6 @@ nvmf_tcp_icreq_handle(struct spdk_nvmf_tcp_transport *ttransport,
/* Not fatal. */
}
if (tqpair->host_hdgst_enable || tqpair->host_ddgst_enable) {
tqpair->accel_channel = spdk_accel_engine_get_io_channel();
if (spdk_unlikely(!tqpair->accel_channel)) {
fes = SPDK_NVME_TCP_TERM_REQ_FES_HDGST_ERROR;
error_offset = offsetof(struct spdk_nvme_tcp_ic_req, dgst);
SPDK_ERRLOG("Unabled to get accel_channel for tqpair=%p, failed to enable digest for header or data\n",
tqpair);
goto end;
}
}
tqpair->cpda = spdk_min(ic_req->hpda, SPDK_NVME_TCP_CPDA_MAX);
SPDK_DEBUGLOG(nvmf_tcp, "cpda of tqpair=(%p) is : %u\n", tqpair, tqpair->cpda);

View File

@ -55,6 +55,8 @@
#define UT_SQ_HEAD_MAX 128
#define UT_NUM_SHARED_BUFFERS 128
static void *g_accel_p = (void *)0xdeadbeaf;
SPDK_LOG_REGISTER_COMPONENT(nvmf)
DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid,
@ -222,10 +224,14 @@ DEFINE_STUB(nvmf_transport_req_free,
(struct spdk_nvmf_request *req),
0);
DEFINE_STUB(spdk_accel_engine_get_io_channel,
struct spdk_io_channel *,
(void),
NULL);
DEFINE_STUB(accel_engine_create_cb, int, (void *io_device, void *ctx_buf), 0);
DEFINE_STUB_V(accel_engine_destroy_cb, (void *io_device, void *ctx_buf));
struct spdk_io_channel *
spdk_accel_engine_get_io_channel(void)
{
return spdk_get_io_channel(g_accel_p);
}
DEFINE_STUB(spdk_accel_submit_crc32cv,
int,
@ -476,6 +482,19 @@ test_nvmf_tcp_destroy(void)
spdk_thread_destroy(thread);
}
static void
init_accel(void)
{
spdk_io_device_register(g_accel_p, accel_engine_create_cb, accel_engine_destroy_cb,
sizeof(int), "accel_p");
}
static void
fini_accel(void)
{
spdk_io_device_unregister(g_accel_p, NULL);
}
static void
test_nvmf_tcp_poll_group_create(void)
{
@ -490,6 +509,8 @@ test_nvmf_tcp_poll_group_create(void)
SPDK_CU_ASSERT_FATAL(thread != NULL);
spdk_set_thread(thread);
init_accel();
memset(&opts, 0, sizeof(opts));
opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
@ -513,6 +534,7 @@ test_nvmf_tcp_poll_group_create(void)
nvmf_tcp_poll_group_destroy(group);
nvmf_tcp_destroy(transport, NULL, NULL);
fini_accel();
spdk_thread_exit(thread);
while (!spdk_thread_is_exited(thread)) {
spdk_thread_poll(thread, 0, 0);
@ -721,7 +743,6 @@ test_nvmf_tcp_incapsule_data_handle(void)
CU_ASSERT(tqpair.pdu_in_progress.req == (void *)&tcp_req2);
}
int main(int argc, char **argv)
{
CU_pSuite suite = NULL;