Spdk/test/unit/lib/nvme/nvme_tcp.c/nvme_tcp_ut.c

824 lines
28 KiB
C
Raw Normal View History

/*-
* BSD LICENSE
*
* Copyright (c) Intel Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "spdk/stdinc.h"
#include "spdk_cunit.h"
#include "common/lib/test_sock.c"
#include "nvme/nvme_tcp.c"
#include "common/lib/nvme/common_stubs.h"
SPDK_LOG_REGISTER_COMPONENT(nvme)
DEFINE_STUB(nvme_qpair_submit_request,
int, (struct spdk_nvme_qpair *qpair, struct nvme_request *req), 0);
DEFINE_STUB(spdk_sock_set_priority,
int, (struct spdk_sock *sock, int priority), 0);
DEFINE_STUB(spdk_nvme_poll_group_remove, int, (struct spdk_nvme_poll_group *group,
struct spdk_nvme_qpair *qpair), 0);
DEFINE_STUB(spdk_sock_get_optimal_sock_group,
int,
(struct spdk_sock *sock, struct spdk_sock_group **group),
0);
DEFINE_STUB(spdk_sock_group_get_ctx,
void *,
(struct spdk_sock_group *group),
NULL);
DEFINE_STUB(spdk_nvme_poll_group_process_completions, int64_t, (struct spdk_nvme_poll_group *group,
uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb), 0);
DEFINE_STUB(nvme_poll_group_connect_qpair, int, (struct spdk_nvme_qpair *qpair), 0);
static void
test_nvme_tcp_pdu_set_data_buf(void)
{
struct nvme_tcp_pdu pdu = {};
struct iovec iov[NVME_TCP_MAX_SGL_DESCRIPTORS] = {};
uint32_t data_len;
uint64_t i;
/* 1st case: input is a single SGL entry. */
iov[0].iov_base = (void *)0xDEADBEEF;
iov[0].iov_len = 4096;
nvme_tcp_pdu_set_data_buf(&pdu, iov, 1, 1024, 512);
CU_ASSERT(pdu.data_iovcnt == 1);
CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF + 1024);
CU_ASSERT(pdu.data_iov[0].iov_len == 512);
/* 2nd case: simulate split on multiple SGL entries. */
iov[0].iov_base = (void *)0xDEADBEEF;
iov[0].iov_len = 4096;
iov[1].iov_base = (void *)0xFEEDBEEF;
iov[1].iov_len = 512 * 7;
iov[2].iov_base = (void *)0xF00DF00D;
iov[2].iov_len = 4096 * 2;
nvme_tcp_pdu_set_data_buf(&pdu, iov, 3, 0, 2048);
CU_ASSERT(pdu.data_iovcnt == 1);
CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF);
CU_ASSERT(pdu.data_iov[0].iov_len == 2048);
nvme_tcp_pdu_set_data_buf(&pdu, iov, 3, 2048, 2048 + 512 * 3);
CU_ASSERT(pdu.data_iovcnt == 2);
CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF + 2048);
CU_ASSERT(pdu.data_iov[0].iov_len == 2048);
CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF);
CU_ASSERT(pdu.data_iov[1].iov_len == 512 * 3);
nvme_tcp_pdu_set_data_buf(&pdu, iov, 3, 4096 + 512 * 3, 512 * 4 + 4096 * 2);
CU_ASSERT(pdu.data_iovcnt == 2);
CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xFEEDBEEF + 512 * 3);
CU_ASSERT(pdu.data_iov[0].iov_len == 512 * 4);
CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xF00DF00D);
CU_ASSERT(pdu.data_iov[1].iov_len == 4096 * 2);
/* 3rd case: Number of input SGL entries is equal to the number of PDU SGL
* entries.
*/
data_len = 0;
for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
iov[i].iov_base = (void *)(0xDEADBEEF + i);
iov[i].iov_len = 512 * (i + 1);
data_len += 512 * (i + 1);
}
nvme_tcp_pdu_set_data_buf(&pdu, iov, NVME_TCP_MAX_SGL_DESCRIPTORS, 0, data_len);
CU_ASSERT(pdu.data_iovcnt == NVME_TCP_MAX_SGL_DESCRIPTORS);
for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
CU_ASSERT((uint64_t)pdu.data_iov[i].iov_base == 0xDEADBEEF + i);
CU_ASSERT(pdu.data_iov[i].iov_len == 512 * (i + 1));
}
}
static void
test_nvme_tcp_build_iovs(void)
{
const uintptr_t pdu_iov_len = 4096;
struct nvme_tcp_pdu pdu = {};
struct iovec iovs[5] = {};
uint32_t mapped_length = 0;
int rc;
pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
pdu.hdr.common.plen = pdu.hdr.common.hlen + SPDK_NVME_TCP_DIGEST_LEN + pdu_iov_len * 2 +
SPDK_NVME_TCP_DIGEST_LEN;
pdu.data_len = pdu_iov_len * 2;
pdu.padding_len = 0;
pdu.data_iov[0].iov_base = (void *)0xDEADBEEF;
pdu.data_iov[0].iov_len = pdu_iov_len;
pdu.data_iov[1].iov_base = (void *)(0xDEADBEEF + pdu_iov_len);
pdu.data_iov[1].iov_len = pdu_iov_len;
pdu.data_iovcnt = 2;
rc = nvme_tcp_build_iovs(iovs, 5, &pdu, true, true, &mapped_length);
CU_ASSERT(rc == 4);
CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr.raw);
CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN);
CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF);
CU_ASSERT(iovs[1].iov_len == pdu_iov_len);
CU_ASSERT(iovs[2].iov_base == (void *)(0xDEADBEEF + pdu_iov_len));
CU_ASSERT(iovs[2].iov_len == pdu_iov_len);
CU_ASSERT(iovs[3].iov_base == (void *)pdu.data_digest);
CU_ASSERT(iovs[3].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
CU_ASSERT(mapped_length == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN +
pdu_iov_len * 2 + SPDK_NVME_TCP_DIGEST_LEN);
/* Add a new data_iov entry, update pdu iov count and data length */
pdu.data_iov[2].iov_base = (void *)(0xBAADF00D);
pdu.data_iov[2].iov_len = 123;
pdu.data_iovcnt = 3;
pdu.data_len += 123;
pdu.hdr.common.plen += 123;
rc = nvme_tcp_build_iovs(iovs, 5, &pdu, true, true, &mapped_length);
CU_ASSERT(rc == 5);
CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr.raw);
CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN);
CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF);
CU_ASSERT(iovs[1].iov_len == pdu_iov_len);
CU_ASSERT(iovs[2].iov_base == (void *)(0xDEADBEEF + pdu_iov_len));
CU_ASSERT(iovs[2].iov_len == pdu_iov_len);
CU_ASSERT(iovs[3].iov_base == (void *)(0xBAADF00D));
CU_ASSERT(iovs[3].iov_len == 123);
CU_ASSERT(iovs[4].iov_base == (void *)pdu.data_digest);
CU_ASSERT(iovs[4].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
CU_ASSERT(mapped_length == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN +
pdu_iov_len * 2 + SPDK_NVME_TCP_DIGEST_LEN + 123);
}
struct nvme_tcp_ut_bdev_io {
struct iovec iovs[NVME_TCP_MAX_SGL_DESCRIPTORS];
int iovpos;
};
/* essentially a simplification of bdev_nvme_next_sge and bdev_nvme_reset_sgl */
static void
nvme_tcp_ut_reset_sgl(void *cb_arg, uint32_t offset)
{
struct nvme_tcp_ut_bdev_io *bio = cb_arg;
struct iovec *iov;
for (bio->iovpos = 0; bio->iovpos < NVME_TCP_MAX_SGL_DESCRIPTORS; bio->iovpos++) {
iov = &bio->iovs[bio->iovpos];
/* Offset must be aligned with the start of any SGL entry */
if (offset == 0) {
break;
}
SPDK_CU_ASSERT_FATAL(offset >= iov->iov_len);
offset -= iov->iov_len;
}
SPDK_CU_ASSERT_FATAL(offset == 0);
SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_TCP_MAX_SGL_DESCRIPTORS);
}
static int
nvme_tcp_ut_next_sge(void *cb_arg, void **address, uint32_t *length)
{
struct nvme_tcp_ut_bdev_io *bio = cb_arg;
struct iovec *iov;
SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_TCP_MAX_SGL_DESCRIPTORS);
iov = &bio->iovs[bio->iovpos];
*address = iov->iov_base;
*length = iov->iov_len;
bio->iovpos++;
return 0;
}
static void
test_nvme_tcp_build_sgl_request(void)
{
struct nvme_tcp_qpair tqpair;
struct spdk_nvme_ctrlr ctrlr = {0};
struct nvme_tcp_req tcp_req = {0};
struct nvme_request req = {{0}};
struct nvme_tcp_ut_bdev_io bio;
uint64_t i;
int rc;
ctrlr.max_sges = NVME_TCP_MAX_SGL_DESCRIPTORS;
tqpair.qpair.ctrlr = &ctrlr;
tcp_req.req = &req;
req.payload.reset_sgl_fn = nvme_tcp_ut_reset_sgl;
req.payload.next_sge_fn = nvme_tcp_ut_next_sge;
req.payload.contig_or_cb_arg = &bio;
req.qpair = &tqpair.qpair;
for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
bio.iovs[i].iov_base = (void *)(0xFEEDB000 + i * 0x1000);
bio.iovs[i].iov_len = 0;
}
/* Test case 1: Single SGL. Expected: PASS */
bio.iovpos = 0;
req.payload_offset = 0;
req.payload_size = 0x1000;
bio.iovs[0].iov_len = 0x1000;
rc = nvme_tcp_build_sgl_request(&tqpair, &tcp_req);
SPDK_CU_ASSERT_FATAL(rc == 0);
CU_ASSERT(bio.iovpos == 1);
CU_ASSERT((uint64_t)tcp_req.iov[0].iov_base == (uint64_t)bio.iovs[0].iov_base);
CU_ASSERT(tcp_req.iov[0].iov_len == bio.iovs[0].iov_len);
CU_ASSERT(tcp_req.iovcnt == 1);
/* Test case 2: Multiple SGL. Expected: PASS */
bio.iovpos = 0;
req.payload_offset = 0;
req.payload_size = 0x4000;
for (i = 0; i < 4; i++) {
bio.iovs[i].iov_len = 0x1000;
}
rc = nvme_tcp_build_sgl_request(&tqpair, &tcp_req);
SPDK_CU_ASSERT_FATAL(rc == 0);
CU_ASSERT(bio.iovpos == 4);
CU_ASSERT(tcp_req.iovcnt == 4);
for (i = 0; i < 4; i++) {
CU_ASSERT(tcp_req.iov[i].iov_len == bio.iovs[i].iov_len);
CU_ASSERT((uint64_t)tcp_req.iov[i].iov_base == (uint64_t)bio.iovs[i].iov_base);
}
/* Test case 3: Payload is bigger than SGL. Expected: FAIL */
bio.iovpos = 0;
req.payload_offset = 0;
req.payload_size = 0x17000;
for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
bio.iovs[i].iov_len = 0x1000;
}
rc = nvme_tcp_build_sgl_request(&tqpair, &tcp_req);
SPDK_CU_ASSERT_FATAL(rc != 0);
CU_ASSERT(bio.iovpos == NVME_TCP_MAX_SGL_DESCRIPTORS);
for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
CU_ASSERT(tcp_req.iov[i].iov_len == bio.iovs[i].iov_len);
CU_ASSERT((uint64_t)tcp_req.iov[i].iov_base == (uint64_t)bio.iovs[i].iov_base);
}
}
static void
test_nvme_tcp_pdu_set_data_buf_with_md(void)
{
struct nvme_tcp_pdu pdu = {};
struct iovec iovs[7] = {};
struct spdk_dif_ctx dif_ctx = {};
int rc;
pdu.dif_ctx = &dif_ctx;
rc = spdk_dif_ctx_init(&dif_ctx, 520, 8, true, false, SPDK_DIF_DISABLE, 0,
0, 0, 0, 0, 0);
CU_ASSERT(rc == 0);
/* Single iovec case */
iovs[0].iov_base = (void *)0xDEADBEEF;
iovs[0].iov_len = 2080;
nvme_tcp_pdu_set_data_buf(&pdu, iovs, 1, 0, 500);
CU_ASSERT(dif_ctx.data_offset == 0);
CU_ASSERT(pdu.data_len == 500);
CU_ASSERT(pdu.data_iovcnt == 1);
CU_ASSERT(pdu.data_iov[0].iov_base == (void *)0xDEADBEEF);
CU_ASSERT(pdu.data_iov[0].iov_len == 500);
nvme_tcp_pdu_set_data_buf(&pdu, iovs, 1, 500, 1000);
CU_ASSERT(dif_ctx.data_offset == 500);
CU_ASSERT(pdu.data_len == 1000);
CU_ASSERT(pdu.data_iovcnt == 1);
CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 500));
CU_ASSERT(pdu.data_iov[0].iov_len == 1016);
nvme_tcp_pdu_set_data_buf(&pdu, iovs, 1, 1500, 548);
CU_ASSERT(dif_ctx.data_offset == 1500);
CU_ASSERT(pdu.data_len == 548);
CU_ASSERT(pdu.data_iovcnt == 1);
CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 1516));
CU_ASSERT(pdu.data_iov[0].iov_len == 564);
/* Multiple iovecs case */
iovs[0].iov_base = (void *)0xDEADBEEF;
iovs[0].iov_len = 256;
iovs[1].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x1000));
iovs[1].iov_len = 256 + 1;
iovs[2].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x2000));
iovs[2].iov_len = 4;
iovs[3].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x3000));
iovs[3].iov_len = 3 + 123;
iovs[4].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x4000));
iovs[4].iov_len = 389 + 6;
iovs[5].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x5000));
iovs[5].iov_len = 2 + 512 + 8 + 432;
iovs[6].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x6000));
iovs[6].iov_len = 80 + 8;
nvme_tcp_pdu_set_data_buf(&pdu, iovs, 7, 0, 500);
CU_ASSERT(dif_ctx.data_offset == 0);
CU_ASSERT(pdu.data_len == 500);
CU_ASSERT(pdu.data_iovcnt == 2);
CU_ASSERT(pdu.data_iov[0].iov_base == (void *)0xDEADBEEF);
CU_ASSERT(pdu.data_iov[0].iov_len == 256);
CU_ASSERT(pdu.data_iov[1].iov_base == (void *)(0xDEADBEEF + 0x1000));
CU_ASSERT(pdu.data_iov[1].iov_len == 244);
nvme_tcp_pdu_set_data_buf(&pdu, iovs, 7, 500, 1000);
CU_ASSERT(dif_ctx.data_offset == 500);
CU_ASSERT(pdu.data_len == 1000);
CU_ASSERT(pdu.data_iovcnt == 5);
CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 0x1000 + 244));
CU_ASSERT(pdu.data_iov[0].iov_len == 13);
CU_ASSERT(pdu.data_iov[1].iov_base == (void *)(0xDEADBEEF + 0x2000));
CU_ASSERT(pdu.data_iov[1].iov_len == 4);
CU_ASSERT(pdu.data_iov[2].iov_base == (void *)(0xDEADBEEF + 0x3000));
CU_ASSERT(pdu.data_iov[2].iov_len == 3 + 123);
CU_ASSERT(pdu.data_iov[3].iov_base == (void *)(0xDEADBEEF + 0x4000));
CU_ASSERT(pdu.data_iov[3].iov_len == 395);
CU_ASSERT(pdu.data_iov[4].iov_base == (void *)(0xDEADBEEF + 0x5000));
CU_ASSERT(pdu.data_iov[4].iov_len == 478);
nvme_tcp_pdu_set_data_buf(&pdu, iovs, 7, 1500, 548);
CU_ASSERT(dif_ctx.data_offset == 1500);
CU_ASSERT(pdu.data_len == 548);
CU_ASSERT(pdu.data_iovcnt == 2);
CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 0x5000 + 478));
CU_ASSERT(pdu.data_iov[0].iov_len == 476);
CU_ASSERT(pdu.data_iov[1].iov_base == (void *)(0xDEADBEEF + 0x6000));
CU_ASSERT(pdu.data_iov[1].iov_len == 88);
}
static void
test_nvme_tcp_build_iovs_with_md(void)
{
struct nvme_tcp_pdu pdu = {};
struct iovec iovs[11] = {};
struct spdk_dif_ctx dif_ctx = {};
uint32_t mapped_length = 0;
int rc;
rc = spdk_dif_ctx_init(&dif_ctx, 520, 8, true, false, SPDK_DIF_DISABLE, 0,
0, 0, 0, 0, 0);
CU_ASSERT(rc == 0);
pdu.dif_ctx = &dif_ctx;
pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
pdu.hdr.common.plen = pdu.hdr.common.hlen + SPDK_NVME_TCP_DIGEST_LEN + 512 * 8 +
SPDK_NVME_TCP_DIGEST_LEN;
pdu.data_len = 512 * 8;
pdu.padding_len = 0;
pdu.data_iov[0].iov_base = (void *)0xDEADBEEF;
pdu.data_iov[0].iov_len = (512 + 8) * 8;
pdu.data_iovcnt = 1;
rc = nvme_tcp_build_iovs(iovs, 11, &pdu, true, true, &mapped_length);
CU_ASSERT(rc == 10);
CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr.raw);
CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN);
CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF);
CU_ASSERT(iovs[1].iov_len == 512);
CU_ASSERT(iovs[2].iov_base == (void *)(0xDEADBEEF + 520));
CU_ASSERT(iovs[2].iov_len == 512);
CU_ASSERT(iovs[3].iov_base == (void *)(0xDEADBEEF + 520 * 2));
CU_ASSERT(iovs[3].iov_len == 512);
CU_ASSERT(iovs[4].iov_base == (void *)(0xDEADBEEF + 520 * 3));
CU_ASSERT(iovs[4].iov_len == 512);
CU_ASSERT(iovs[5].iov_base == (void *)(0xDEADBEEF + 520 * 4));
CU_ASSERT(iovs[5].iov_len == 512);
CU_ASSERT(iovs[6].iov_base == (void *)(0xDEADBEEF + 520 * 5));
CU_ASSERT(iovs[6].iov_len == 512);
CU_ASSERT(iovs[7].iov_base == (void *)(0xDEADBEEF + 520 * 6));
CU_ASSERT(iovs[7].iov_len == 512);
CU_ASSERT(iovs[8].iov_base == (void *)(0xDEADBEEF + 520 * 7));
CU_ASSERT(iovs[8].iov_len == 512);
CU_ASSERT(iovs[9].iov_base == (void *)pdu.data_digest);
CU_ASSERT(iovs[9].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
CU_ASSERT(mapped_length == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN +
512 * 8 + SPDK_NVME_TCP_DIGEST_LEN);
}
/* Just define, nothing to do */
static void
ut_nvme_complete_request(void *arg, const struct spdk_nvme_cpl *cpl)
{
return;
}
static void
test_nvme_tcp_req_complete_safe(void)
{
bool rc;
struct nvme_tcp_req tcp_req = {0};
struct nvme_request req = {{0}};
struct nvme_tcp_qpair tqpair = {{0}};
tcp_req.req = &req;
tcp_req.req->qpair = &tqpair.qpair;
tcp_req.req->cb_fn = ut_nvme_complete_request;
tcp_req.tqpair = &tqpair;
tcp_req.state = NVME_TCP_REQ_ACTIVE;
TAILQ_INIT(&tcp_req.tqpair->outstanding_reqs);
/* Test case 1: send operation and transfer completed. Expect: PASS */
tcp_req.state = NVME_TCP_REQ_ACTIVE;
tcp_req.ordering.bits.send_ack = 1;
tcp_req.ordering.bits.data_recv = 1;
TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
rc = nvme_tcp_req_complete_safe(&tcp_req);
CU_ASSERT(rc == true);
/* Test case 2: send operation not completed. Expect: FAIL */
tcp_req.ordering.raw = 0;
tcp_req.state = NVME_TCP_REQ_ACTIVE;
TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
rc = nvme_tcp_req_complete_safe(&tcp_req);
SPDK_CU_ASSERT_FATAL(rc != true);
TAILQ_REMOVE(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
/* Test case 3: in completion context. Expect: PASS */
tqpair.qpair.in_completion_context = 1;
tqpair.async_complete = 0;
tcp_req.ordering.bits.send_ack = 1;
tcp_req.ordering.bits.data_recv = 1;
tcp_req.state = NVME_TCP_REQ_ACTIVE;
TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
rc = nvme_tcp_req_complete_safe(&tcp_req);
CU_ASSERT(rc == true);
CU_ASSERT(tcp_req.tqpair->async_complete == 0);
/* Test case 4: in async complete. Expect: PASS */
tqpair.qpair.in_completion_context = 0;
tcp_req.ordering.bits.send_ack = 1;
tcp_req.ordering.bits.data_recv = 1;
tcp_req.state = NVME_TCP_REQ_ACTIVE;
TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
rc = nvme_tcp_req_complete_safe(&tcp_req);
CU_ASSERT(rc == true);
CU_ASSERT(tcp_req.tqpair->async_complete);
}
static void
test_nvme_tcp_req_init(void)
{
struct nvme_tcp_qpair tqpair = {0};
struct nvme_request req = {0};
struct nvme_tcp_req tcp_req = {0};
struct spdk_nvme_ctrlr ctrlr = {0};
struct nvme_tcp_ut_bdev_io bio = {0};
int rc;
tqpair.qpair.ctrlr = &ctrlr;
req.qpair = &tqpair.qpair;
tcp_req.cid = 1;
req.payload.next_sge_fn = nvme_tcp_ut_next_sge;
req.payload.contig_or_cb_arg = &bio;
req.payload_offset = 0;
req.payload_size = 4096;
ctrlr.max_sges = NVME_TCP_MAX_SGL_DESCRIPTORS;
ctrlr.ioccsz_bytes = 1024;
bio.iovpos = 0;
bio.iovs[0].iov_len = 8192;
bio.iovs[0].iov_base = (void *)0xDEADBEEF;
/* Test case1: payload type SGL. Expect: PASS */
req.cmd.opc = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
req.payload.reset_sgl_fn = nvme_tcp_ut_reset_sgl;
rc = nvme_tcp_req_init(&tqpair, &req, &tcp_req);
CU_ASSERT(rc == 0);
CU_ASSERT(tcp_req.req == &req);
CU_ASSERT(tcp_req.in_capsule_data == true);
CU_ASSERT(tcp_req.iovcnt == 1);
CU_ASSERT(tcp_req.iov[0].iov_len == req.payload_size);
CU_ASSERT(tcp_req.iov[0].iov_base == bio.iovs[0].iov_base);
CU_ASSERT(req.cmd.cid == tcp_req.cid);
CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG);
CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
/* Test case2: payload type CONTIG. Expect: PASS */
memset(&req.cmd, 0, sizeof(req.cmd));
memset(&tcp_req, 0, sizeof(tcp_req));
tcp_req.cid = 1;
req.payload.reset_sgl_fn = NULL;
req.cmd.opc = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
rc = nvme_tcp_req_init(&tqpair, &req, &tcp_req);
CU_ASSERT(rc == 0);
CU_ASSERT(tcp_req.req == &req);
CU_ASSERT(tcp_req.in_capsule_data == true);
CU_ASSERT(tcp_req.iov[0].iov_len == req.payload_size);
CU_ASSERT(tcp_req.iov[0].iov_base == &bio);
CU_ASSERT(tcp_req.iovcnt == 1);
CU_ASSERT(req.cmd.cid == tcp_req.cid);
CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG);
CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
}
static void
test_nvme_tcp_req_get(void)
{
struct nvme_tcp_req tcp_req = {0};
struct nvme_tcp_qpair tqpair = {0};
struct nvme_tcp_pdu send_pdu = {0};
tcp_req.send_pdu = &send_pdu;
tcp_req.state = NVME_TCP_REQ_FREE;
TAILQ_INIT(&tqpair.free_reqs);
TAILQ_INIT(&tqpair.outstanding_reqs);
TAILQ_INSERT_HEAD(&tqpair.free_reqs, &tcp_req, link);
CU_ASSERT(nvme_tcp_req_get(&tqpair) == &tcp_req);
CU_ASSERT(tcp_req.state == NVME_TCP_REQ_ACTIVE);
CU_ASSERT(tcp_req.datao == 0);
CU_ASSERT(tcp_req.req == NULL);
CU_ASSERT(tcp_req.in_capsule_data == false);
CU_ASSERT(tcp_req.r2tl_remain == 0);
CU_ASSERT(tcp_req.iovcnt == 0);
CU_ASSERT(tcp_req.ordering.raw == 0);
CU_ASSERT(!TAILQ_EMPTY(&tqpair.outstanding_reqs));
CU_ASSERT(TAILQ_EMPTY(&tqpair.free_reqs));
/* No tcp request available, expect fail */
SPDK_CU_ASSERT_FATAL(nvme_tcp_req_get(&tqpair) == NULL);
}
static void
test_nvme_tcp_qpair_capsule_cmd_send(void)
{
struct nvme_tcp_qpair tqpair = {};
struct nvme_tcp_req tcp_req = {};
struct nvme_tcp_pdu pdu = {};
struct nvme_request req = {};
char iov_base0[4096];
char iov_base1[4096];
uint32_t plen;
uint8_t pdo;
memset(iov_base0, 0xFF, 4096);
memset(iov_base1, 0xFF, 4096);
tcp_req.req = &req;
tcp_req.send_pdu = &pdu;
TAILQ_INIT(&tqpair.send_queue);
tcp_req.iov[0].iov_base = (void *)iov_base0;
tcp_req.iov[0].iov_len = 4096;
tcp_req.iov[1].iov_base = (void *)iov_base1;
tcp_req.iov[1].iov_len = 4096;
tcp_req.iovcnt = 2;
tcp_req.req->payload_size = 8192;
tcp_req.in_capsule_data = true;
tqpair.cpda = NVME_TCP_HPDA_DEFAULT;
/* Test case 1: host hdgst and ddgst enable. Expect: PASS */
tqpair.flags.host_hdgst_enable = 1;
tqpair.flags.host_ddgst_enable = 1;
pdo = plen = sizeof(struct spdk_nvme_tcp_cmd) +
SPDK_NVME_TCP_DIGEST_LEN;
plen += tcp_req.req->payload_size;
plen += SPDK_NVME_TCP_DIGEST_LEN;
nvme_tcp_qpair_capsule_cmd_send(&tqpair, &tcp_req);
TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
CU_ASSERT(pdu.hdr.capsule_cmd.common.flags
& SPDK_NVME_TCP_CH_FLAGS_HDGSTF);
CU_ASSERT(pdu.hdr.capsule_cmd.common.flags
& SPDK_NVME_TCP_CH_FLAGS_DDGSTF);
CU_ASSERT(pdu.hdr.capsule_cmd.common.pdu_type ==
SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD);
CU_ASSERT(pdu.hdr.capsule_cmd.common.pdo == pdo);
CU_ASSERT(pdu.hdr.capsule_cmd.common.plen == plen);
CU_ASSERT(pdu.data_iov[0].iov_base == tcp_req.iov[0].iov_base);
CU_ASSERT(pdu.data_iov[0].iov_len == tcp_req.iov[0].iov_len);
CU_ASSERT(pdu.data_iov[1].iov_base == tcp_req.iov[1].iov_base);
CU_ASSERT(pdu.data_iov[1].iov_len == tcp_req.iov[0].iov_len);
/* Test case 2: host hdgst and ddgst disable. Expect: PASS */
memset(&pdu, 0, sizeof(pdu));
tqpair.flags.host_hdgst_enable = 0;
tqpair.flags.host_ddgst_enable = 0;
pdo = plen = sizeof(struct spdk_nvme_tcp_cmd);
plen += tcp_req.req->payload_size;
nvme_tcp_qpair_capsule_cmd_send(&tqpair, &tcp_req);
TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
CU_ASSERT(pdu.hdr.capsule_cmd.common.flags == 0)
CU_ASSERT(pdu.hdr.capsule_cmd.common.pdu_type ==
SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD);
CU_ASSERT(pdu.hdr.capsule_cmd.common.pdo == pdo);
CU_ASSERT(pdu.hdr.capsule_cmd.common.plen == plen);
CU_ASSERT(pdu.data_iov[0].iov_base == tcp_req.iov[0].iov_base);
CU_ASSERT(pdu.data_iov[0].iov_len == tcp_req.iov[0].iov_len);
CU_ASSERT(pdu.data_iov[1].iov_base == tcp_req.iov[1].iov_base);
CU_ASSERT(pdu.data_iov[1].iov_len == tcp_req.iov[0].iov_len);
/* Test case 3: padding available. Expect: PASS */
memset(&pdu, 0, sizeof(pdu));
tqpair.flags.host_hdgst_enable = 1;
tqpair.flags.host_ddgst_enable = 1;
tqpair.cpda = SPDK_NVME_TCP_CPDA_MAX;
pdo = plen = (SPDK_NVME_TCP_CPDA_MAX + 1) << 2;
plen += tcp_req.req->payload_size;
plen += SPDK_NVME_TCP_DIGEST_LEN;
nvme_tcp_qpair_capsule_cmd_send(&tqpair, &tcp_req);
TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
CU_ASSERT(pdu.hdr.capsule_cmd.common.flags
& SPDK_NVME_TCP_CH_FLAGS_HDGSTF);
CU_ASSERT(pdu.hdr.capsule_cmd.common.flags
& SPDK_NVME_TCP_CH_FLAGS_DDGSTF);
CU_ASSERT(pdu.hdr.capsule_cmd.common.pdu_type ==
SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD);
CU_ASSERT(pdu.hdr.capsule_cmd.common.pdo == pdo);
CU_ASSERT(pdu.hdr.capsule_cmd.common.plen == plen);
CU_ASSERT(pdu.data_iov[0].iov_base == tcp_req.iov[0].iov_base);
CU_ASSERT(pdu.data_iov[0].iov_len == tcp_req.iov[0].iov_len);
CU_ASSERT(pdu.data_iov[1].iov_base == tcp_req.iov[1].iov_base);
CU_ASSERT(pdu.data_iov[1].iov_len == tcp_req.iov[0].iov_len);
}
/* Just define, nothing to do */
static void
ut_nvme_tcp_qpair_xfer_complete_cb(void *cb_arg)
{
return;
}
static void
test_nvme_tcp_qpair_write_pdu(void)
{
struct nvme_tcp_qpair tqpair = {};
struct nvme_tcp_pdu pdu = {};
void *cb_arg = (void *)0xDEADBEEF;
char iov_base0[4096];
char iov_base1[4096];
memset(iov_base0, 0xFF, 4096);
memset(iov_base1, 0xFF, 4096);
pdu.data_len = 4096 * 2;
pdu.padding_len = 0;
pdu.data_iov[0].iov_base = (void *)iov_base0;
pdu.data_iov[0].iov_len = 4096;
pdu.data_iov[1].iov_base = (void *)iov_base1;
pdu.data_iov[1].iov_len = 4096;
pdu.data_iovcnt = 2;
TAILQ_INIT(&tqpair.send_queue);
/* Test case1: host hdgst and ddgst enable Expect: PASS */
memset(pdu.hdr.raw, 0, SPDK_NVME_TCP_TERM_REQ_PDU_MAX_SIZE);
memset(pdu.data_digest, 0, SPDK_NVME_TCP_DIGEST_LEN);
pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
pdu.hdr.common.plen = pdu.hdr.common.hlen +
SPDK_NVME_TCP_DIGEST_LEN * 2 ;
pdu.hdr.common.plen += pdu.data_len;
tqpair.flags.host_hdgst_enable = 1;
tqpair.flags.host_ddgst_enable = 1;
nvme_tcp_qpair_write_pdu(&tqpair,
&pdu,
ut_nvme_tcp_qpair_xfer_complete_cb,
cb_arg);
TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
/* Check the crc data of header digest filled into raw */
CU_ASSERT(pdu.hdr.raw[pdu.hdr.common.hlen]);
CU_ASSERT(pdu.data_digest[0]);
CU_ASSERT(pdu.sock_req.iovcnt == 4);
CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw);
CU_ASSERT(pdu.iov[0].iov_len == (sizeof(struct spdk_nvme_tcp_cmd) +
SPDK_NVME_TCP_DIGEST_LEN));
CU_ASSERT(pdu.iov[1].iov_base == pdu.data_iov[0].iov_base);
CU_ASSERT(pdu.iov[1].iov_len == pdu.data_iov[0].iov_len);
CU_ASSERT(pdu.iov[2].iov_base == pdu.data_iov[1].iov_base);
CU_ASSERT(pdu.iov[2].iov_len == pdu.data_iov[1].iov_len);
CU_ASSERT(pdu.iov[3].iov_base == &pdu.data_digest);
CU_ASSERT(pdu.iov[3].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
CU_ASSERT(pdu.cb_fn == ut_nvme_tcp_qpair_xfer_complete_cb);
CU_ASSERT(pdu.cb_arg == cb_arg);
CU_ASSERT(pdu.qpair == &tqpair);
CU_ASSERT(pdu.sock_req.cb_arg == (void *)&pdu);
/* Test case2: host hdgst and ddgst disable Expect: PASS */
memset(pdu.hdr.raw, 0, SPDK_NVME_TCP_TERM_REQ_PDU_MAX_SIZE);
memset(pdu.data_digest, 0, SPDK_NVME_TCP_DIGEST_LEN);
pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
pdu.hdr.common.plen = pdu.hdr.common.hlen + pdu.data_len;
tqpair.flags.host_hdgst_enable = 0;
tqpair.flags.host_ddgst_enable = 0;
nvme_tcp_qpair_write_pdu(&tqpair,
&pdu,
ut_nvme_tcp_qpair_xfer_complete_cb,
cb_arg);
TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
CU_ASSERT(pdu.hdr.raw[pdu.hdr.common.hlen] == 0);
CU_ASSERT(pdu.data_digest[0] == 0);
CU_ASSERT(pdu.sock_req.iovcnt == 3);
CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw);
CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd));
CU_ASSERT(pdu.iov[1].iov_base == pdu.data_iov[0].iov_base);
CU_ASSERT(pdu.iov[1].iov_len == pdu.data_iov[0].iov_len);
CU_ASSERT(pdu.iov[2].iov_base == pdu.data_iov[1].iov_base);
CU_ASSERT(pdu.iov[2].iov_len == pdu.data_iov[1].iov_len);
CU_ASSERT(pdu.cb_fn == ut_nvme_tcp_qpair_xfer_complete_cb);
CU_ASSERT(pdu.cb_arg == cb_arg);
CU_ASSERT(pdu.qpair == &tqpair);
CU_ASSERT(pdu.sock_req.cb_arg == (void *)&pdu);
}
int main(int argc, char **argv)
{
CU_pSuite suite = NULL;
unsigned int num_failures;
CU_set_error_action(CUEA_ABORT);
CU_initialize_registry();
suite = CU_add_suite("nvme_tcp", NULL, NULL);
CU_ADD_TEST(suite, test_nvme_tcp_pdu_set_data_buf);
CU_ADD_TEST(suite, test_nvme_tcp_build_iovs);
CU_ADD_TEST(suite, test_nvme_tcp_build_sgl_request);
CU_ADD_TEST(suite, test_nvme_tcp_pdu_set_data_buf_with_md);
CU_ADD_TEST(suite, test_nvme_tcp_build_iovs_with_md);
CU_ADD_TEST(suite, test_nvme_tcp_req_complete_safe);
CU_ADD_TEST(suite, test_nvme_tcp_req_get);
CU_ADD_TEST(suite, test_nvme_tcp_req_init);
CU_ADD_TEST(suite, test_nvme_tcp_qpair_capsule_cmd_send);
CU_ADD_TEST(suite, test_nvme_tcp_qpair_write_pdu);
CU_basic_set_mode(CU_BRM_VERBOSE);
CU_basic_run_tests();
num_failures = CU_get_number_of_failures();
CU_cleanup_registry();
return num_failures;
}