2018-10-03 21:02:54 +00:00
|
|
|
/*-
|
|
|
|
* BSD LICENSE
|
|
|
|
*
|
|
|
|
* Copyright (c) Intel Corporation.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
*
|
|
|
|
* * Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* * Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in
|
|
|
|
* the documentation and/or other materials provided with the
|
|
|
|
* distribution.
|
|
|
|
* * Neither the name of Intel Corporation nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "spdk/stdinc.h"
|
|
|
|
#include "spdk_cunit.h"
|
|
|
|
#include "nvme/nvme_rdma.c"
|
2020-01-07 21:18:38 +00:00
|
|
|
#include "common/lib/nvme/common_stubs.h"
|
2020-03-14 09:20:44 +00:00
|
|
|
#include "common/lib/test_rdma.c"
|
2018-10-03 21:02:54 +00:00
|
|
|
|
2020-09-04 11:27:29 +00:00
|
|
|
SPDK_LOG_REGISTER_COMPONENT(nvme)
|
2018-10-03 21:02:54 +00:00
|
|
|
|
|
|
|
DEFINE_STUB(spdk_mem_map_set_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
|
|
|
|
uint64_t size, uint64_t translation), 0);
|
|
|
|
DEFINE_STUB(spdk_mem_map_clear_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
|
|
|
|
uint64_t size), 0);
|
|
|
|
|
2019-12-26 17:10:02 +00:00
|
|
|
DEFINE_STUB(spdk_mem_map_alloc, struct spdk_mem_map *, (uint64_t default_translation,
|
|
|
|
const struct spdk_mem_map_ops *ops, void *cb_ctx), NULL);
|
|
|
|
DEFINE_STUB_V(spdk_mem_map_free, (struct spdk_mem_map **pmap));
|
|
|
|
|
2020-03-05 21:47:49 +00:00
|
|
|
DEFINE_STUB(nvme_poll_group_connect_qpair, int, (struct spdk_nvme_qpair *qpair), 0);
|
|
|
|
|
|
|
|
DEFINE_STUB_V(nvme_qpair_resubmit_requests, (struct spdk_nvme_qpair *qpair, uint32_t num_requests));
|
|
|
|
DEFINE_STUB(spdk_nvme_poll_group_process_completions, int64_t, (struct spdk_nvme_poll_group *group,
|
2021-03-03 16:32:37 +00:00
|
|
|
uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb), 0);
|
2020-03-05 21:47:49 +00:00
|
|
|
|
2021-03-04 01:12:37 +00:00
|
|
|
DEFINE_STUB(rdma_ack_cm_event, int, (struct rdma_cm_event *event), 0);
|
2021-03-11 01:53:02 +00:00
|
|
|
DEFINE_STUB_V(rdma_free_devices, (struct ibv_context **list));
|
|
|
|
DEFINE_STUB(fcntl, int, (int fd, int cmd, ...), 0);
|
|
|
|
DEFINE_STUB_V(rdma_destroy_event_channel, (struct rdma_event_channel *channel));
|
2021-03-04 01:12:37 +00:00
|
|
|
|
2018-10-03 21:02:54 +00:00
|
|
|
struct nvme_rdma_ut_bdev_io {
|
|
|
|
struct iovec iovs[NVME_RDMA_MAX_SGL_DESCRIPTORS];
|
|
|
|
int iovpos;
|
|
|
|
};
|
|
|
|
|
2021-03-11 01:53:02 +00:00
|
|
|
DEFINE_RETURN_MOCK(rdma_get_devices, struct ibv_context **);
|
|
|
|
struct ibv_context **
|
|
|
|
rdma_get_devices(int *num_devices)
|
|
|
|
{
|
|
|
|
static struct ibv_context *_contexts[] = {
|
|
|
|
(struct ibv_context *)0xDEADBEEF,
|
|
|
|
(struct ibv_context *)0xFEEDBEEF,
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
|
|
|
HANDLE_RETURN_MOCK(rdma_get_devices);
|
|
|
|
return _contexts;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFINE_RETURN_MOCK(rdma_create_event_channel, struct rdma_event_channel *);
|
|
|
|
struct rdma_event_channel *
|
|
|
|
rdma_create_event_channel(void)
|
|
|
|
{
|
|
|
|
HANDLE_RETURN_MOCK(rdma_create_event_channel);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFINE_RETURN_MOCK(ibv_query_device, int);
|
|
|
|
int
|
|
|
|
ibv_query_device(struct ibv_context *context,
|
|
|
|
struct ibv_device_attr *device_attr)
|
|
|
|
{
|
|
|
|
if (device_attr) {
|
|
|
|
device_attr->max_sge = NVME_RDMA_MAX_SGL_DESCRIPTORS;
|
|
|
|
}
|
|
|
|
HANDLE_RETURN_MOCK(ibv_query_device);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-10-03 21:02:54 +00:00
|
|
|
/* essentially a simplification of bdev_nvme_next_sge and bdev_nvme_reset_sgl */
|
|
|
|
static void nvme_rdma_ut_reset_sgl(void *cb_arg, uint32_t offset)
|
|
|
|
{
|
|
|
|
struct nvme_rdma_ut_bdev_io *bio = cb_arg;
|
|
|
|
struct iovec *iov;
|
|
|
|
|
|
|
|
for (bio->iovpos = 0; bio->iovpos < NVME_RDMA_MAX_SGL_DESCRIPTORS; bio->iovpos++) {
|
|
|
|
iov = &bio->iovs[bio->iovpos];
|
|
|
|
/* Only provide offsets at the beginning of an iov */
|
|
|
|
if (offset == 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
offset -= iov->iov_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_RDMA_MAX_SGL_DESCRIPTORS);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nvme_rdma_ut_next_sge(void *cb_arg, void **address, uint32_t *length)
|
|
|
|
{
|
|
|
|
struct nvme_rdma_ut_bdev_io *bio = cb_arg;
|
|
|
|
struct iovec *iov;
|
|
|
|
|
|
|
|
SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_RDMA_MAX_SGL_DESCRIPTORS);
|
|
|
|
|
|
|
|
iov = &bio->iovs[bio->iovpos];
|
|
|
|
|
|
|
|
*address = iov->iov_base;
|
|
|
|
*length = iov->iov_len;
|
|
|
|
bio->iovpos++;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
test_nvme_rdma_build_sgl_request(void)
|
|
|
|
{
|
|
|
|
struct nvme_rdma_qpair rqpair;
|
|
|
|
struct spdk_nvme_ctrlr ctrlr = {0};
|
|
|
|
struct spdk_nvmf_cmd cmd = {{0}};
|
|
|
|
struct spdk_nvme_rdma_req rdma_req = {0};
|
|
|
|
struct nvme_request req = {{0}};
|
|
|
|
struct nvme_rdma_ut_bdev_io bio;
|
|
|
|
uint64_t i;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
|
2018-10-11 23:21:05 +00:00
|
|
|
ctrlr.cdata.nvmf_specific.msdbd = 16;
|
2020-09-09 13:19:39 +00:00
|
|
|
ctrlr.ioccsz_bytes = 4096;
|
2018-10-03 21:02:54 +00:00
|
|
|
|
2020-11-10 17:37:41 +00:00
|
|
|
rqpair.mr_map = (struct spdk_rdma_mem_map *)0xdeadbeef;
|
|
|
|
rqpair.rdma_qp = (struct spdk_rdma_qp *)0xdeadbeef;
|
2018-10-03 21:02:54 +00:00
|
|
|
rqpair.qpair.ctrlr = &ctrlr;
|
|
|
|
rqpair.cmds = &cmd;
|
|
|
|
cmd.sgl[0].address = 0x1111;
|
|
|
|
rdma_req.id = 0;
|
|
|
|
rdma_req.req = &req;
|
|
|
|
|
|
|
|
req.payload.reset_sgl_fn = nvme_rdma_ut_reset_sgl;
|
|
|
|
req.payload.next_sge_fn = nvme_rdma_ut_next_sge;
|
|
|
|
req.payload.contig_or_cb_arg = &bio;
|
|
|
|
req.qpair = &rqpair.qpair;
|
|
|
|
|
|
|
|
for (i = 0; i < NVME_RDMA_MAX_SGL_DESCRIPTORS; i++) {
|
|
|
|
bio.iovs[i].iov_base = (void *)i;
|
|
|
|
bio.iovs[i].iov_len = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Test case 1: single SGL. Expected: PASS */
|
|
|
|
bio.iovpos = 0;
|
|
|
|
req.payload_offset = 0;
|
|
|
|
req.payload_size = 0x1000;
|
|
|
|
bio.iovs[0].iov_len = 0x1000;
|
|
|
|
rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
|
|
|
|
SPDK_CU_ASSERT_FATAL(rc == 0);
|
|
|
|
CU_ASSERT(bio.iovpos == 1);
|
|
|
|
CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
|
|
|
|
CU_ASSERT(req.cmd.dptr.sgl1.keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
|
|
|
|
CU_ASSERT(req.cmd.dptr.sgl1.keyed.length == req.payload_size);
|
2020-11-10 17:37:41 +00:00
|
|
|
CU_ASSERT(req.cmd.dptr.sgl1.keyed.key == RDMA_UT_RKEY);
|
2018-10-03 21:02:54 +00:00
|
|
|
CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)bio.iovs[0].iov_base);
|
|
|
|
CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
|
|
|
|
|
|
|
|
/* Test case 2: multiple SGL. Expected: PASS */
|
|
|
|
bio.iovpos = 0;
|
|
|
|
req.payload_offset = 0;
|
|
|
|
req.payload_size = 0x4000;
|
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
bio.iovs[i].iov_len = 0x1000;
|
|
|
|
}
|
|
|
|
rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
|
|
|
|
SPDK_CU_ASSERT_FATAL(rc == 0);
|
|
|
|
CU_ASSERT(bio.iovpos == 4);
|
|
|
|
CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT);
|
|
|
|
CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
|
|
|
|
CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 4 * sizeof(struct spdk_nvme_sgl_descriptor));
|
|
|
|
CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)0);
|
|
|
|
CU_ASSERT(rdma_req.send_sgl[0].length == 4 * sizeof(struct spdk_nvme_sgl_descriptor) + sizeof(
|
|
|
|
struct spdk_nvme_cmd))
|
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
CU_ASSERT(cmd.sgl[i].keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
|
|
|
|
CU_ASSERT(cmd.sgl[i].keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
|
|
|
|
CU_ASSERT(cmd.sgl[i].keyed.length == bio.iovs[i].iov_len);
|
2020-11-10 17:37:41 +00:00
|
|
|
CU_ASSERT(cmd.sgl[i].keyed.key == RDMA_UT_RKEY);
|
2018-10-03 21:02:54 +00:00
|
|
|
CU_ASSERT(cmd.sgl[i].address == (uint64_t)bio.iovs[i].iov_base);
|
|
|
|
}
|
|
|
|
|
2018-10-11 23:21:05 +00:00
|
|
|
/* Test case 3: Multiple SGL, SGL 2X mr size. Expected: FAIL */
|
2018-10-03 21:02:54 +00:00
|
|
|
bio.iovpos = 0;
|
|
|
|
req.payload_offset = 0;
|
2018-10-11 23:21:05 +00:00
|
|
|
g_mr_size = 0x800;
|
2018-10-03 21:02:54 +00:00
|
|
|
rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
|
|
|
|
SPDK_CU_ASSERT_FATAL(rc != 0);
|
|
|
|
CU_ASSERT(bio.iovpos == 1);
|
|
|
|
|
2020-06-25 12:19:49 +00:00
|
|
|
/* Test case 4: Multiple SGL, SGL size smaller than I/O size. Expected: FAIL */
|
2018-10-03 21:02:54 +00:00
|
|
|
bio.iovpos = 0;
|
|
|
|
req.payload_offset = 0;
|
|
|
|
req.payload_size = 0x6000;
|
|
|
|
g_mr_size = 0x0;
|
|
|
|
rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
|
|
|
|
SPDK_CU_ASSERT_FATAL(rc != 0);
|
|
|
|
CU_ASSERT(bio.iovpos == NVME_RDMA_MAX_SGL_DESCRIPTORS);
|
2020-06-25 12:19:49 +00:00
|
|
|
|
|
|
|
/* Test case 5: SGL length exceeds 3 bytes. Expected: FAIL */
|
|
|
|
req.payload_size = 0x1000 + (1 << 24);
|
|
|
|
bio.iovs[0].iov_len = 0x1000;
|
|
|
|
bio.iovs[1].iov_len = 1 << 24;
|
|
|
|
rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
|
|
|
|
SPDK_CU_ASSERT_FATAL(rc != 0);
|
2020-09-09 13:19:39 +00:00
|
|
|
|
|
|
|
/* Test case 6: 4 SGL descriptors, size of SGL descriptors exceeds ICD. Expected: FAIL */
|
|
|
|
ctrlr.ioccsz_bytes = 60;
|
|
|
|
bio.iovpos = 0;
|
|
|
|
req.payload_offset = 0;
|
|
|
|
req.payload_size = 0x4000;
|
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
bio.iovs[i].iov_len = 0x1000;
|
|
|
|
}
|
|
|
|
rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
|
|
|
|
SPDK_CU_ASSERT_FATAL(rc == -1);
|
2020-06-25 12:19:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
test_nvme_rdma_build_sgl_inline_request(void)
|
|
|
|
{
|
|
|
|
struct nvme_rdma_qpair rqpair;
|
|
|
|
struct spdk_nvme_ctrlr ctrlr = {0};
|
|
|
|
struct spdk_nvmf_cmd cmd = {{0}};
|
|
|
|
struct spdk_nvme_rdma_req rdma_req = {0};
|
|
|
|
struct nvme_request req = {{0}};
|
|
|
|
struct nvme_rdma_ut_bdev_io bio;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
|
|
|
|
ctrlr.cdata.nvmf_specific.msdbd = 16;
|
|
|
|
|
2020-11-10 17:37:41 +00:00
|
|
|
rqpair.mr_map = (struct spdk_rdma_mem_map *)0xdeadbeef;
|
|
|
|
rqpair.rdma_qp = (struct spdk_rdma_qp *)0xdeadbeef;
|
2020-06-25 12:19:49 +00:00
|
|
|
rqpair.qpair.ctrlr = &ctrlr;
|
|
|
|
rqpair.cmds = &cmd;
|
|
|
|
cmd.sgl[0].address = 0x1111;
|
|
|
|
rdma_req.id = 0;
|
|
|
|
rdma_req.req = &req;
|
|
|
|
|
|
|
|
req.payload.reset_sgl_fn = nvme_rdma_ut_reset_sgl;
|
|
|
|
req.payload.next_sge_fn = nvme_rdma_ut_next_sge;
|
|
|
|
req.payload.contig_or_cb_arg = &bio;
|
|
|
|
req.qpair = &rqpair.qpair;
|
|
|
|
|
|
|
|
/* Test case 1: single inline SGL. Expected: PASS */
|
|
|
|
bio.iovpos = 0;
|
|
|
|
req.payload_offset = 0;
|
|
|
|
req.payload_size = 0x1000;
|
|
|
|
bio.iovs[0].iov_base = (void *)0xdeadbeef;
|
|
|
|
bio.iovs[0].iov_len = 0x1000;
|
|
|
|
rc = nvme_rdma_build_sgl_inline_request(&rqpair, &rdma_req);
|
|
|
|
SPDK_CU_ASSERT_FATAL(rc == 0);
|
|
|
|
CU_ASSERT(bio.iovpos == 1);
|
|
|
|
CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
|
|
|
|
CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
|
|
|
|
CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
|
|
|
|
CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
|
|
|
|
CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
|
|
|
|
CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
|
|
|
|
CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)bio.iovs[0].iov_base);
|
2020-11-10 17:37:41 +00:00
|
|
|
CU_ASSERT(rdma_req.send_sgl[1].lkey == RDMA_UT_LKEY);
|
2020-06-25 12:19:49 +00:00
|
|
|
|
|
|
|
/* Test case 2: SGL length exceeds 3 bytes. Expected: PASS */
|
|
|
|
bio.iovpos = 0;
|
|
|
|
req.payload_offset = 0;
|
|
|
|
req.payload_size = 1 << 24;
|
|
|
|
bio.iovs[0].iov_len = 1 << 24;
|
|
|
|
rc = nvme_rdma_build_sgl_inline_request(&rqpair, &rdma_req);
|
|
|
|
SPDK_CU_ASSERT_FATAL(rc == 0);
|
|
|
|
CU_ASSERT(bio.iovpos == 1);
|
|
|
|
CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
|
|
|
|
CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
|
|
|
|
CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
|
|
|
|
CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
|
|
|
|
CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
|
|
|
|
CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
|
|
|
|
CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)bio.iovs[0].iov_base);
|
2020-11-10 17:37:41 +00:00
|
|
|
CU_ASSERT(rdma_req.send_sgl[1].lkey == RDMA_UT_LKEY);
|
2020-06-25 12:19:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
test_nvme_rdma_build_contig_request(void)
|
|
|
|
{
|
|
|
|
struct nvme_rdma_qpair rqpair;
|
|
|
|
struct spdk_nvme_ctrlr ctrlr = {0};
|
|
|
|
struct spdk_nvmf_cmd cmd = {{0}};
|
|
|
|
struct spdk_nvme_rdma_req rdma_req = {0};
|
|
|
|
struct nvme_request req = {{0}};
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
|
|
|
|
ctrlr.cdata.nvmf_specific.msdbd = 16;
|
|
|
|
|
2020-11-10 17:37:41 +00:00
|
|
|
rqpair.mr_map = (struct spdk_rdma_mem_map *)0xdeadbeef;
|
|
|
|
rqpair.rdma_qp = (struct spdk_rdma_qp *)0xdeadbeef;
|
2020-06-25 12:19:49 +00:00
|
|
|
rqpair.qpair.ctrlr = &ctrlr;
|
|
|
|
rqpair.cmds = &cmd;
|
|
|
|
cmd.sgl[0].address = 0x1111;
|
|
|
|
rdma_req.id = 0;
|
|
|
|
rdma_req.req = &req;
|
|
|
|
|
|
|
|
req.payload.contig_or_cb_arg = (void *)0xdeadbeef;
|
|
|
|
req.qpair = &rqpair.qpair;
|
|
|
|
|
|
|
|
/* Test case 1: contig request. Expected: PASS */
|
|
|
|
req.payload_offset = 0;
|
|
|
|
req.payload_size = 0x1000;
|
|
|
|
rc = nvme_rdma_build_contig_request(&rqpair, &rdma_req);
|
|
|
|
SPDK_CU_ASSERT_FATAL(rc == 0);
|
|
|
|
CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
|
|
|
|
CU_ASSERT(req.cmd.dptr.sgl1.keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
|
|
|
|
CU_ASSERT(req.cmd.dptr.sgl1.keyed.length == req.payload_size);
|
2020-11-10 17:37:41 +00:00
|
|
|
CU_ASSERT(req.cmd.dptr.sgl1.keyed.key == RDMA_UT_RKEY);
|
2020-06-25 12:19:49 +00:00
|
|
|
CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)req.payload.contig_or_cb_arg);
|
|
|
|
CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
|
|
|
|
|
|
|
|
/* Test case 2: SGL length exceeds 3 bytes. Expected: FAIL */
|
|
|
|
req.payload_offset = 0;
|
|
|
|
req.payload_size = 1 << 24;
|
|
|
|
rc = nvme_rdma_build_contig_request(&rqpair, &rdma_req);
|
|
|
|
SPDK_CU_ASSERT_FATAL(rc != 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
test_nvme_rdma_build_contig_inline_request(void)
|
|
|
|
{
|
|
|
|
struct nvme_rdma_qpair rqpair;
|
|
|
|
struct spdk_nvme_ctrlr ctrlr = {0};
|
|
|
|
struct spdk_nvmf_cmd cmd = {{0}};
|
|
|
|
struct spdk_nvme_rdma_req rdma_req = {0};
|
|
|
|
struct nvme_request req = {{0}};
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
|
|
|
|
ctrlr.cdata.nvmf_specific.msdbd = 16;
|
|
|
|
|
2020-11-10 17:37:41 +00:00
|
|
|
rqpair.mr_map = (struct spdk_rdma_mem_map *)0xdeadbeef;
|
|
|
|
rqpair.rdma_qp = (struct spdk_rdma_qp *)0xdeadbeef;
|
2020-06-25 12:19:49 +00:00
|
|
|
rqpair.qpair.ctrlr = &ctrlr;
|
|
|
|
rqpair.cmds = &cmd;
|
|
|
|
cmd.sgl[0].address = 0x1111;
|
|
|
|
rdma_req.id = 0;
|
|
|
|
rdma_req.req = &req;
|
|
|
|
|
|
|
|
req.payload.contig_or_cb_arg = (void *)0xdeadbeef;
|
|
|
|
req.qpair = &rqpair.qpair;
|
|
|
|
|
|
|
|
/* Test case 1: single inline SGL. Expected: PASS */
|
|
|
|
req.payload_offset = 0;
|
|
|
|
req.payload_size = 0x1000;
|
|
|
|
rc = nvme_rdma_build_contig_inline_request(&rqpair, &rdma_req);
|
|
|
|
SPDK_CU_ASSERT_FATAL(rc == 0);
|
|
|
|
CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
|
|
|
|
CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
|
|
|
|
CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
|
|
|
|
CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
|
|
|
|
CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
|
|
|
|
CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
|
|
|
|
CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)req.payload.contig_or_cb_arg);
|
2020-11-10 17:37:41 +00:00
|
|
|
CU_ASSERT(rdma_req.send_sgl[1].lkey == RDMA_UT_LKEY);
|
2020-06-25 12:19:49 +00:00
|
|
|
|
|
|
|
/* Test case 2: SGL length exceeds 3 bytes. Expected: PASS */
|
|
|
|
req.payload_offset = 0;
|
|
|
|
req.payload_size = 1 << 24;
|
|
|
|
rc = nvme_rdma_build_contig_inline_request(&rqpair, &rdma_req);
|
|
|
|
SPDK_CU_ASSERT_FATAL(rc == 0);
|
|
|
|
CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
|
|
|
|
CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
|
|
|
|
CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
|
|
|
|
CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
|
|
|
|
CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
|
|
|
|
CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
|
|
|
|
CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)req.payload.contig_or_cb_arg);
|
2020-11-10 17:37:41 +00:00
|
|
|
CU_ASSERT(rdma_req.send_sgl[1].lkey == RDMA_UT_LKEY);
|
2018-10-03 21:02:54 +00:00
|
|
|
}
|
|
|
|
|
2021-01-15 06:03:58 +00:00
|
|
|
static void
|
|
|
|
test_nvme_rdma_alloc_reqs(void)
|
|
|
|
{
|
2021-02-05 17:46:16 +00:00
|
|
|
struct nvme_rdma_qpair rqpair = {};
|
2021-01-15 06:03:58 +00:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
memset(&g_nvme_hooks, 0, sizeof(g_nvme_hooks));
|
|
|
|
|
|
|
|
/* Test case 1: zero entry. Expect: FAIL */
|
|
|
|
rqpair.num_entries = 0;
|
|
|
|
|
|
|
|
rc = nvme_rdma_alloc_reqs(&rqpair);
|
|
|
|
CU_ASSERT(rqpair.rdma_reqs == NULL);
|
|
|
|
SPDK_CU_ASSERT_FATAL(rc == -ENOMEM);
|
|
|
|
|
|
|
|
/* Test case 2: single entry. Expect: PASS */
|
|
|
|
memset(&rqpair, 0, sizeof(rqpair));
|
|
|
|
rqpair.num_entries = 1;
|
|
|
|
|
|
|
|
rc = nvme_rdma_alloc_reqs(&rqpair);
|
|
|
|
CU_ASSERT(rc == 0);
|
|
|
|
CU_ASSERT(rqpair.rdma_reqs[0].send_sgl[0].addr
|
|
|
|
== (uint64_t)&rqpair.cmds[0]);
|
|
|
|
CU_ASSERT(rqpair.rdma_reqs[0].send_wr.wr_id
|
|
|
|
== (uint64_t)&rqpair.rdma_reqs[0].rdma_wr);
|
|
|
|
CU_ASSERT(rqpair.rdma_reqs[0].send_wr.next == NULL);
|
|
|
|
CU_ASSERT(rqpair.rdma_reqs[0].send_wr.opcode == IBV_WR_SEND);
|
|
|
|
CU_ASSERT(rqpair.rdma_reqs[0].send_wr.send_flags == IBV_SEND_SIGNALED);
|
|
|
|
CU_ASSERT(rqpair.rdma_reqs[0].send_wr.sg_list
|
|
|
|
== rqpair.rdma_reqs[0].send_sgl);
|
|
|
|
CU_ASSERT(rqpair.rdma_reqs[0].send_wr.imm_data == 0);
|
|
|
|
spdk_free(rqpair.rdma_reqs);
|
|
|
|
spdk_free(rqpair.cmds);
|
|
|
|
|
|
|
|
/* Test case 3: multiple entries. Expect: PASS */
|
|
|
|
memset(&rqpair, 0, sizeof(rqpair));
|
|
|
|
rqpair.num_entries = 5;
|
|
|
|
|
|
|
|
rc = nvme_rdma_alloc_reqs(&rqpair);
|
|
|
|
CU_ASSERT(rc == 0);
|
|
|
|
for (int i = 0; i < 5; i++) {
|
|
|
|
CU_ASSERT(rqpair.rdma_reqs[i].send_sgl[0].addr
|
|
|
|
== (uint64_t)&rqpair.cmds[i]);
|
|
|
|
CU_ASSERT(rqpair.rdma_reqs[i].send_wr.wr_id
|
|
|
|
== (uint64_t)&rqpair.rdma_reqs[i].rdma_wr);
|
|
|
|
CU_ASSERT(rqpair.rdma_reqs[i].send_wr.next == NULL);
|
|
|
|
CU_ASSERT(rqpair.rdma_reqs[i].send_wr.opcode == IBV_WR_SEND);
|
|
|
|
CU_ASSERT(rqpair.rdma_reqs[i].send_wr.send_flags
|
|
|
|
== IBV_SEND_SIGNALED);
|
|
|
|
CU_ASSERT(rqpair.rdma_reqs[i].send_wr.sg_list
|
|
|
|
== rqpair.rdma_reqs[i].send_sgl);
|
|
|
|
CU_ASSERT(rqpair.rdma_reqs[i].send_wr.imm_data == 0);
|
|
|
|
}
|
|
|
|
spdk_free(rqpair.rdma_reqs);
|
|
|
|
spdk_free(rqpair.cmds);
|
|
|
|
}
|
|
|
|
|
2021-01-28 01:00:12 +00:00
|
|
|
static void
|
|
|
|
test_nvme_rdma_alloc_rsps(void)
|
|
|
|
{
|
2021-02-09 22:23:43 +00:00
|
|
|
struct nvme_rdma_qpair rqpair = {};
|
2021-01-28 01:00:12 +00:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
memset(&g_nvme_hooks, 0, sizeof(g_nvme_hooks));
|
|
|
|
|
|
|
|
/* Test case 1 calloc false */
|
|
|
|
rqpair.num_entries = 0;
|
|
|
|
rc = nvme_rdma_alloc_rsps(&rqpair);
|
|
|
|
CU_ASSERT(rqpair.rsp_sgls == NULL);
|
|
|
|
SPDK_CU_ASSERT_FATAL(rc == -ENOMEM);
|
|
|
|
|
|
|
|
/* Test case 2 calloc success */
|
|
|
|
memset(&rqpair, 0, sizeof(rqpair));
|
|
|
|
rqpair.num_entries = 1;
|
|
|
|
|
|
|
|
rc = nvme_rdma_alloc_rsps(&rqpair);
|
|
|
|
CU_ASSERT(rc == 0);
|
|
|
|
CU_ASSERT(rqpair.rsp_sgls != NULL);
|
|
|
|
CU_ASSERT(rqpair.rsp_recv_wrs != NULL);
|
|
|
|
CU_ASSERT(rqpair.rsps != NULL);
|
|
|
|
nvme_rdma_free_rsps(&rqpair);
|
|
|
|
}
|
|
|
|
|
2021-02-19 02:02:48 +00:00
|
|
|
static void
|
|
|
|
test_nvme_rdma_ctrlr_create_qpair(void)
|
|
|
|
{
|
|
|
|
struct spdk_nvme_ctrlr ctrlr = {};
|
|
|
|
uint16_t qid, qsize;
|
|
|
|
struct spdk_nvme_qpair *qpair;
|
|
|
|
struct nvme_rdma_qpair *rqpair;
|
|
|
|
|
|
|
|
/* Test case 1: max qsize. Expect: PASS */
|
|
|
|
qsize = 0xffff;
|
|
|
|
qid = 1;
|
|
|
|
|
|
|
|
qpair = nvme_rdma_ctrlr_create_qpair(&ctrlr, qid, qsize,
|
|
|
|
SPDK_NVME_QPRIO_URGENT, 1,
|
|
|
|
false);
|
|
|
|
CU_ASSERT(qpair != NULL);
|
|
|
|
rqpair = SPDK_CONTAINEROF(qpair, struct nvme_rdma_qpair, qpair);
|
|
|
|
CU_ASSERT(qpair == &rqpair->qpair);
|
|
|
|
CU_ASSERT(rqpair->num_entries == qsize);
|
|
|
|
CU_ASSERT(rqpair->delay_cmd_submit == false);
|
|
|
|
CU_ASSERT(rqpair->rsp_sgls != NULL);
|
|
|
|
CU_ASSERT(rqpair->rsp_recv_wrs != NULL);
|
|
|
|
CU_ASSERT(rqpair->rsps != NULL);
|
|
|
|
|
|
|
|
nvme_rdma_free_reqs(rqpair);
|
|
|
|
nvme_rdma_free_rsps(rqpair);
|
|
|
|
nvme_rdma_free(rqpair);
|
|
|
|
rqpair = NULL;
|
|
|
|
|
|
|
|
/* Test case 2: queue qsize zero. ExpectL FAIL */
|
|
|
|
qsize = 0;
|
|
|
|
|
|
|
|
qpair = nvme_rdma_ctrlr_create_qpair(&ctrlr, qid, qsize,
|
|
|
|
SPDK_NVME_QPRIO_URGENT, 1,
|
|
|
|
false);
|
|
|
|
SPDK_CU_ASSERT_FATAL(qpair == NULL);
|
|
|
|
}
|
|
|
|
|
2021-02-26 02:15:01 +00:00
|
|
|
DEFINE_STUB(ibv_create_cq, struct ibv_cq *, (struct ibv_context *context, int cqe, void *cq_context,
|
|
|
|
struct ibv_comp_channel *channel, int comp_vector), (struct ibv_cq *)0xFEEDBEEF);
|
|
|
|
DEFINE_STUB(ibv_destroy_cq, int, (struct ibv_cq *cq), 0);
|
|
|
|
|
|
|
|
static void
|
|
|
|
test_nvme_rdma_poller_create(void)
|
|
|
|
{
|
|
|
|
struct nvme_rdma_poll_group group = {};
|
|
|
|
struct ibv_context *contexts = (struct ibv_context *)0xDEADBEEF;
|
|
|
|
|
|
|
|
/* Case: calloc and ibv not need to fail test */
|
|
|
|
STAILQ_INIT(&group.pollers);
|
|
|
|
group.num_pollers = 1;
|
|
|
|
int rc = nvme_rdma_poller_create(&group, contexts);
|
|
|
|
|
|
|
|
CU_ASSERT(rc == 0);
|
|
|
|
CU_ASSERT(group.num_pollers = 2);
|
|
|
|
CU_ASSERT(&group.pollers != NULL);
|
|
|
|
CU_ASSERT(group.pollers.stqh_first->device == contexts);
|
|
|
|
CU_ASSERT(group.pollers.stqh_first->cq == (struct ibv_cq *)0xFEEDBEEF);
|
|
|
|
CU_ASSERT(group.pollers.stqh_first->current_num_wc == DEFAULT_NVME_RDMA_CQ_SIZE);
|
|
|
|
CU_ASSERT(group.pollers.stqh_first->required_num_wc == 0);
|
|
|
|
|
|
|
|
nvme_rdma_poll_group_free_pollers(&group);
|
|
|
|
}
|
|
|
|
|
2021-03-04 01:12:37 +00:00
|
|
|
static void
|
|
|
|
test_nvme_rdma_qpair_process_cm_event(void)
|
|
|
|
{
|
|
|
|
struct nvme_rdma_qpair rqpair = {};
|
|
|
|
struct rdma_cm_event event = {};
|
|
|
|
struct spdk_nvmf_rdma_accept_private_data accept_data = {};
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
/* case1: event == RDMA_CM_EVENT_ADDR_RESOLVED */
|
|
|
|
rqpair.evt = &event;
|
|
|
|
event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
|
|
|
|
rc = nvme_rdma_qpair_process_cm_event(&rqpair);
|
|
|
|
CU_ASSERT(rc == 0);
|
|
|
|
|
|
|
|
/* case2: event == RDMA_CM_EVENT_CONNECT_REQUEST */
|
|
|
|
rqpair.evt = &event;
|
|
|
|
event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
|
|
|
|
rc = nvme_rdma_qpair_process_cm_event(&rqpair);
|
|
|
|
CU_ASSERT(rc == 0);
|
|
|
|
|
|
|
|
/* case3: event == RDMA_CM_EVENT_CONNECT_ERROR */
|
|
|
|
rqpair.evt = &event;
|
|
|
|
event.event = RDMA_CM_EVENT_CONNECT_ERROR;
|
|
|
|
rc = nvme_rdma_qpair_process_cm_event(&rqpair);
|
|
|
|
CU_ASSERT(rc == 0);
|
|
|
|
|
|
|
|
/* case4: event == RDMA_CM_EVENT_UNREACHABLE */
|
|
|
|
rqpair.evt = &event;
|
|
|
|
event.event = RDMA_CM_EVENT_UNREACHABLE;
|
|
|
|
rc = nvme_rdma_qpair_process_cm_event(&rqpair);
|
|
|
|
CU_ASSERT(rc == 0);
|
|
|
|
|
|
|
|
/* case5: event == RDMA_CM_EVENT_CONNECT_RESPONSE */
|
|
|
|
rqpair.evt = &event;
|
|
|
|
event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
|
|
|
|
event.param.conn.private_data = NULL;
|
|
|
|
rc = nvme_rdma_qpair_process_cm_event(&rqpair);
|
|
|
|
CU_ASSERT(rc == -1);
|
|
|
|
|
|
|
|
rqpair.evt = &event;
|
|
|
|
event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
|
|
|
|
event.param.conn.private_data = &accept_data;
|
|
|
|
accept_data.crqsize = 512;
|
|
|
|
rqpair.num_entries = 1024;
|
|
|
|
rc = nvme_rdma_qpair_process_cm_event(&rqpair);
|
|
|
|
CU_ASSERT(rc == 0);
|
|
|
|
CU_ASSERT(rqpair.num_entries == 512);
|
|
|
|
|
|
|
|
/* case6: event == RDMA_CM_EVENT_DISCONNECTED */
|
|
|
|
rqpair.evt = &event;
|
|
|
|
event.event = RDMA_CM_EVENT_DISCONNECTED;
|
|
|
|
rc = nvme_rdma_qpair_process_cm_event(&rqpair);
|
|
|
|
CU_ASSERT(rc == 0);
|
|
|
|
CU_ASSERT(rqpair.qpair.transport_failure_reason == SPDK_NVME_QPAIR_FAILURE_REMOTE);
|
|
|
|
|
|
|
|
/* case7: event == RDMA_CM_EVENT_DEVICE_REMOVAL */
|
|
|
|
rqpair.evt = &event;
|
|
|
|
event.event = RDMA_CM_EVENT_DEVICE_REMOVAL;
|
|
|
|
rc = nvme_rdma_qpair_process_cm_event(&rqpair);
|
|
|
|
CU_ASSERT(rc == 0);
|
|
|
|
CU_ASSERT(rqpair.qpair.transport_failure_reason == SPDK_NVME_QPAIR_FAILURE_LOCAL);
|
|
|
|
|
|
|
|
/* case8: event == RDMA_CM_EVENT_MULTICAST_JOIN */
|
|
|
|
rqpair.evt = &event;
|
|
|
|
event.event = RDMA_CM_EVENT_MULTICAST_JOIN;
|
|
|
|
rc = nvme_rdma_qpair_process_cm_event(&rqpair);
|
|
|
|
CU_ASSERT(rc == 0);
|
|
|
|
|
|
|
|
/* case9: event == RDMA_CM_EVENT_ADDR_CHANGE */
|
|
|
|
rqpair.evt = &event;
|
|
|
|
event.event = RDMA_CM_EVENT_ADDR_CHANGE;
|
|
|
|
rc = nvme_rdma_qpair_process_cm_event(&rqpair);
|
|
|
|
CU_ASSERT(rc == 0);
|
|
|
|
CU_ASSERT(rqpair.qpair.transport_failure_reason == SPDK_NVME_QPAIR_FAILURE_LOCAL);
|
|
|
|
|
|
|
|
/* case10: event == RDMA_CM_EVENT_TIMEWAIT_EXIT */
|
|
|
|
rqpair.evt = &event;
|
|
|
|
event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT;
|
|
|
|
rc = nvme_rdma_qpair_process_cm_event(&rqpair);
|
|
|
|
CU_ASSERT(rc == 0);
|
|
|
|
|
|
|
|
/* case11: default event == 0xFF */
|
|
|
|
rqpair.evt = &event;
|
|
|
|
event.event = 0xFF;
|
|
|
|
rc = nvme_rdma_qpair_process_cm_event(&rqpair);
|
|
|
|
CU_ASSERT(rc == 0);
|
|
|
|
}
|
|
|
|
|
2021-03-11 01:53:02 +00:00
|
|
|
static void
|
|
|
|
test_nvme_rdma_mr_get_lkey(void)
|
|
|
|
{
|
|
|
|
union nvme_rdma_mr mr = {};
|
|
|
|
struct ibv_mr ibv_mr = {};
|
|
|
|
uint64_t mr_key;
|
|
|
|
uint32_t lkey;
|
|
|
|
|
|
|
|
memset(&g_nvme_hooks, 0, sizeof(g_nvme_hooks));
|
|
|
|
ibv_mr.lkey = 1;
|
|
|
|
mr_key = 2;
|
|
|
|
|
|
|
|
/* Case 1: get key form key address */
|
|
|
|
mr.key = (uint64_t)&mr_key;
|
|
|
|
g_nvme_hooks.get_rkey = (void *)0xAEADBEEF;
|
|
|
|
|
|
|
|
lkey = nvme_rdma_mr_get_lkey(&mr);
|
|
|
|
CU_ASSERT(lkey == mr_key);
|
|
|
|
|
|
|
|
/* Case 2: Get key from ibv_mr */
|
|
|
|
g_nvme_hooks.get_rkey = NULL;
|
|
|
|
mr.mr = &ibv_mr;
|
|
|
|
|
|
|
|
lkey = nvme_rdma_mr_get_lkey(&mr);
|
|
|
|
CU_ASSERT(lkey == ibv_mr.lkey);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
test_nvme_rdma_ctrlr_construct(void)
|
|
|
|
{
|
|
|
|
struct spdk_nvme_ctrlr *ctrlr;
|
|
|
|
struct spdk_nvme_transport_id trid = {};
|
|
|
|
struct spdk_nvme_ctrlr_opts opts = {};
|
|
|
|
struct nvme_rdma_qpair *rqpair = NULL;
|
|
|
|
struct nvme_rdma_ctrlr *rctrlr = NULL;
|
|
|
|
struct rdma_event_channel cm_channel = {};
|
|
|
|
void *devhandle = NULL;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
opts.transport_retry_count = NVME_RDMA_CTRLR_MAX_TRANSPORT_RETRY_COUNT + 1;
|
|
|
|
opts.transport_ack_timeout = NVME_RDMA_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT + 1;
|
|
|
|
opts.admin_queue_size = 0xFFFF;
|
|
|
|
trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
|
|
|
|
trid.adrfam = SPDK_NVMF_ADRFAM_IPV4;
|
|
|
|
MOCK_SET(rdma_create_event_channel, &cm_channel);
|
|
|
|
|
|
|
|
ctrlr = nvme_rdma_ctrlr_construct(&trid, &opts, devhandle);
|
|
|
|
SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
|
|
|
|
CU_ASSERT(ctrlr->opts.transport_retry_count ==
|
|
|
|
NVME_RDMA_CTRLR_MAX_TRANSPORT_RETRY_COUNT);
|
|
|
|
CU_ASSERT(ctrlr->opts.transport_ack_timeout ==
|
|
|
|
NVME_RDMA_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT);
|
|
|
|
CU_ASSERT(ctrlr->opts.admin_queue_size == opts.admin_queue_size);
|
|
|
|
rctrlr = SPDK_CONTAINEROF(ctrlr, struct nvme_rdma_ctrlr, ctrlr);
|
|
|
|
CU_ASSERT(rctrlr->max_sge == NVME_RDMA_MAX_SGL_DESCRIPTORS);
|
|
|
|
CU_ASSERT(rctrlr->cm_channel == &cm_channel);
|
|
|
|
CU_ASSERT(!strncmp((char *)&rctrlr->ctrlr.trid,
|
|
|
|
(char *)&trid, sizeof(trid)));
|
|
|
|
|
|
|
|
SPDK_CU_ASSERT_FATAL(ctrlr->adminq != NULL);
|
|
|
|
rqpair = SPDK_CONTAINEROF(ctrlr->adminq, struct nvme_rdma_qpair, qpair);
|
|
|
|
CU_ASSERT(rqpair->num_entries == opts.admin_queue_size);
|
|
|
|
CU_ASSERT(rqpair->delay_cmd_submit == false);
|
|
|
|
CU_ASSERT(rqpair->rsp_sgls != NULL);
|
|
|
|
CU_ASSERT(rqpair->rsp_recv_wrs != NULL);
|
|
|
|
CU_ASSERT(rqpair->rsps != NULL);
|
|
|
|
MOCK_CLEAR(rdma_create_event_channel);
|
|
|
|
|
|
|
|
/* Hardcode the trtype, because nvme_qpair_init() is stub function. */
|
|
|
|
rqpair->qpair.trtype = SPDK_NVME_TRANSPORT_RDMA;
|
|
|
|
rc = nvme_rdma_ctrlr_destruct(ctrlr);
|
|
|
|
CU_ASSERT(rc == 0);
|
|
|
|
}
|
|
|
|
|
2018-10-03 21:02:54 +00:00
|
|
|
int main(int argc, char **argv)
|
|
|
|
{
|
|
|
|
CU_pSuite suite = NULL;
|
|
|
|
unsigned int num_failures;
|
|
|
|
|
2020-03-11 17:59:24 +00:00
|
|
|
CU_set_error_action(CUEA_ABORT);
|
|
|
|
CU_initialize_registry();
|
2018-10-03 21:02:54 +00:00
|
|
|
|
|
|
|
suite = CU_add_suite("nvme_rdma", NULL, NULL);
|
2020-03-11 19:15:39 +00:00
|
|
|
CU_ADD_TEST(suite, test_nvme_rdma_build_sgl_request);
|
2020-06-25 12:19:49 +00:00
|
|
|
CU_ADD_TEST(suite, test_nvme_rdma_build_sgl_inline_request);
|
|
|
|
CU_ADD_TEST(suite, test_nvme_rdma_build_contig_request);
|
|
|
|
CU_ADD_TEST(suite, test_nvme_rdma_build_contig_inline_request);
|
2021-01-15 06:03:58 +00:00
|
|
|
CU_ADD_TEST(suite, test_nvme_rdma_alloc_reqs);
|
2021-01-28 01:00:12 +00:00
|
|
|
CU_ADD_TEST(suite, test_nvme_rdma_alloc_rsps);
|
2021-02-19 02:02:48 +00:00
|
|
|
CU_ADD_TEST(suite, test_nvme_rdma_ctrlr_create_qpair);
|
2021-02-26 02:15:01 +00:00
|
|
|
CU_ADD_TEST(suite, test_nvme_rdma_poller_create);
|
2021-03-04 01:12:37 +00:00
|
|
|
CU_ADD_TEST(suite, test_nvme_rdma_qpair_process_cm_event);
|
2021-03-11 01:53:02 +00:00
|
|
|
CU_ADD_TEST(suite, test_nvme_rdma_mr_get_lkey);
|
|
|
|
CU_ADD_TEST(suite, test_nvme_rdma_ctrlr_construct);
|
2018-10-03 21:02:54 +00:00
|
|
|
|
|
|
|
CU_basic_set_mode(CU_BRM_VERBOSE);
|
|
|
|
CU_basic_run_tests();
|
|
|
|
num_failures = CU_get_number_of_failures();
|
|
|
|
CU_cleanup_registry();
|
|
|
|
return num_failures;
|
|
|
|
}
|