Spdk/test/unit/lib/nvme/nvme_rdma.c/nvme_rdma_ut.c
Konrad Sztyber 3816dbf89b test/unit: remove unnecessary mock definitions
Added -ffunction-sections / --gc-sections to allow the linker to remove
unused functions when linking test executables. This reduces the number
of required mock definitions to those actually required by tested code.

Change-Id: I22725ce82e22675a7c2d28c8227e586f2cd65023
Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Reviewed-on: https://review.gerrithub.io/c/436319
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
2019-01-13 00:48:20 +00:00

222 lines
7.0 KiB
C

/*-
* BSD LICENSE
*
* Copyright (c) Intel Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "spdk/stdinc.h"
#include "spdk_cunit.h"
#include "common/lib/test_env.c"
#include "nvme/nvme_rdma.c"
SPDK_LOG_REGISTER_COMPONENT("nvme", SPDK_LOG_NVME)
DEFINE_STUB(spdk_mem_map_set_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
uint64_t size, uint64_t translation), 0);
DEFINE_STUB(spdk_mem_map_clear_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
uint64_t size), 0);
/* used to mock out having to split an SGL over a memory region */
uint64_t g_mr_size;
struct ibv_mr g_nvme_rdma_mr;
uint64_t
spdk_mem_map_translate(const struct spdk_mem_map *map, uint64_t vaddr, uint64_t *size)
{
if (g_mr_size != 0) {
*(uint32_t *)size = g_mr_size;
}
return (uint64_t)&g_nvme_rdma_mr;
}
struct nvme_rdma_ut_bdev_io {
struct iovec iovs[NVME_RDMA_MAX_SGL_DESCRIPTORS];
int iovpos;
};
/* essentially a simplification of bdev_nvme_next_sge and bdev_nvme_reset_sgl */
static void nvme_rdma_ut_reset_sgl(void *cb_arg, uint32_t offset)
{
struct nvme_rdma_ut_bdev_io *bio = cb_arg;
struct iovec *iov;
for (bio->iovpos = 0; bio->iovpos < NVME_RDMA_MAX_SGL_DESCRIPTORS; bio->iovpos++) {
iov = &bio->iovs[bio->iovpos];
/* Only provide offsets at the beginning of an iov */
if (offset == 0) {
break;
}
offset -= iov->iov_len;
}
SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_RDMA_MAX_SGL_DESCRIPTORS);
}
static int nvme_rdma_ut_next_sge(void *cb_arg, void **address, uint32_t *length)
{
struct nvme_rdma_ut_bdev_io *bio = cb_arg;
struct iovec *iov;
SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_RDMA_MAX_SGL_DESCRIPTORS);
iov = &bio->iovs[bio->iovpos];
*address = iov->iov_base;
*length = iov->iov_len;
bio->iovpos++;
return 0;
}
static void
test_nvme_rdma_build_sgl_request(void)
{
struct nvme_rdma_qpair rqpair;
struct spdk_nvme_ctrlr ctrlr = {0};
struct spdk_nvmf_cmd cmd = {{0}};
struct spdk_nvme_rdma_req rdma_req = {0};
struct nvme_request req = {{0}};
struct nvme_rdma_ut_bdev_io bio;
struct spdk_nvme_rdma_mr_map rmap = {0};
struct spdk_mem_map *map = NULL;
uint64_t i;
int rc;
rmap.map = map;
ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
ctrlr.cdata.nvmf_specific.msdbd = 16;
rqpair.mr_map = &rmap;
rqpair.qpair.ctrlr = &ctrlr;
rqpair.cmds = &cmd;
cmd.sgl[0].address = 0x1111;
rdma_req.id = 0;
rdma_req.req = &req;
req.payload.reset_sgl_fn = nvme_rdma_ut_reset_sgl;
req.payload.next_sge_fn = nvme_rdma_ut_next_sge;
req.payload.contig_or_cb_arg = &bio;
req.qpair = &rqpair.qpair;
g_nvme_rdma_mr.rkey = 1;
for (i = 0; i < NVME_RDMA_MAX_SGL_DESCRIPTORS; i++) {
bio.iovs[i].iov_base = (void *)i;
bio.iovs[i].iov_len = 0;
}
/* Test case 1: single SGL. Expected: PASS */
bio.iovpos = 0;
req.payload_offset = 0;
req.payload_size = 0x1000;
bio.iovs[0].iov_len = 0x1000;
rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
SPDK_CU_ASSERT_FATAL(rc == 0);
CU_ASSERT(bio.iovpos == 1);
CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
CU_ASSERT(req.cmd.dptr.sgl1.keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
CU_ASSERT(req.cmd.dptr.sgl1.keyed.length == req.payload_size);
CU_ASSERT(req.cmd.dptr.sgl1.keyed.key == g_nvme_rdma_mr.rkey);
CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)bio.iovs[0].iov_base);
CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
/* Test case 2: multiple SGL. Expected: PASS */
bio.iovpos = 0;
req.payload_offset = 0;
req.payload_size = 0x4000;
for (i = 0; i < 4; i++) {
bio.iovs[i].iov_len = 0x1000;
}
rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
SPDK_CU_ASSERT_FATAL(rc == 0);
CU_ASSERT(bio.iovpos == 4);
CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT);
CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 4 * sizeof(struct spdk_nvme_sgl_descriptor));
CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)0);
CU_ASSERT(rdma_req.send_sgl[0].length == 4 * sizeof(struct spdk_nvme_sgl_descriptor) + sizeof(
struct spdk_nvme_cmd))
for (i = 0; i < 4; i++) {
CU_ASSERT(cmd.sgl[i].keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
CU_ASSERT(cmd.sgl[i].keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
CU_ASSERT(cmd.sgl[i].keyed.length == bio.iovs[i].iov_len);
CU_ASSERT(cmd.sgl[i].keyed.key == g_nvme_rdma_mr.rkey);
CU_ASSERT(cmd.sgl[i].address == (uint64_t)bio.iovs[i].iov_base);
}
/* Test case 3: Multiple SGL, SGL 2X mr size. Expected: FAIL */
bio.iovpos = 0;
req.payload_offset = 0;
g_mr_size = 0x800;
rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
SPDK_CU_ASSERT_FATAL(rc != 0);
CU_ASSERT(bio.iovpos == 1);
/* Test case 4: Multiple SGL, SGL size smaller than I/O size */
bio.iovpos = 0;
req.payload_offset = 0;
req.payload_size = 0x6000;
g_mr_size = 0x0;
rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
SPDK_CU_ASSERT_FATAL(rc != 0);
CU_ASSERT(bio.iovpos == NVME_RDMA_MAX_SGL_DESCRIPTORS);
}
int main(int argc, char **argv)
{
CU_pSuite suite = NULL;
unsigned int num_failures;
if (CU_initialize_registry() != CUE_SUCCESS) {
return CU_get_error();
}
suite = CU_add_suite("nvme_rdma", NULL, NULL);
if (suite == NULL) {
CU_cleanup_registry();
return CU_get_error();
}
if (CU_add_test(suite, "build_sgl_request", test_nvme_rdma_build_sgl_request) == NULL) {
CU_cleanup_registry();
return CU_get_error();
}
CU_basic_set_mode(CU_BRM_VERBOSE);
CU_basic_run_tests();
num_failures = CU_get_number_of_failures();
CU_cleanup_registry();
return num_failures;
}