Spdk/test/unit/lib/nvmf/vfio_user.c/vfio_user_ut.c

273 lines
9.7 KiB
C
Raw Normal View History

/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) Intel Corporation. All rights reserved.
*/
#include "spdk/stdinc.h"
#include "spdk_cunit.h"
#include "common/lib/test_env.c"
#include "nvmf/vfio_user.c"
#include "nvmf/transport.c"
DEFINE_STUB(spdk_nvmf_ctrlr_get_regs, const struct spdk_nvmf_registers *,
(struct spdk_nvmf_ctrlr *ctrlr), NULL);
DEFINE_STUB(spdk_mem_register, int, (void *vaddr, size_t len), 0);
DEFINE_STUB(spdk_mem_unregister, int, (void *vaddr, size_t len), 0);
DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req));
DEFINE_STUB_V(spdk_nvmf_request_exec_fabrics, (struct spdk_nvmf_request *req));
DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), 0);
DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair));
DEFINE_STUB(nvmf_ctrlr_abort_request, int, (struct spdk_nvmf_request *req), 0);
DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair,
nvmf_qpair_disconnect_cb cb_fn, void *ctx), 0);
DEFINE_STUB(spdk_nvmf_subsystem_get_nqn, const char *,
(const struct spdk_nvmf_subsystem *subsystem), NULL);
DEFINE_STUB(spdk_bdev_get_block_size, uint32_t, (const struct spdk_bdev *bdev), 512);
DEFINE_STUB(spdk_nvmf_subsystem_pause, int, (struct spdk_nvmf_subsystem *subsystem,
uint32_t nsid, spdk_nvmf_subsystem_state_change_done cb_fn, void *cb_arg), 0);
DEFINE_STUB(spdk_nvmf_subsystem_resume, int, (struct spdk_nvmf_subsystem *subsystem,
spdk_nvmf_subsystem_state_change_done cb_fn, void *cb_arg), 0);
DEFINE_STUB_V(nvmf_ctrlr_abort_aer, (struct spdk_nvmf_ctrlr *ctrlr));
DEFINE_STUB(nvmf_ctrlr_async_event_error_event, int, (struct spdk_nvmf_ctrlr *ctrlr,
union spdk_nvme_async_event_completion event), 0);
DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL);
DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, int, (struct spdk_nvmf_qpair *qpair,
struct spdk_nvme_transport_id *trid), 0);
DEFINE_STUB(spdk_nvme_transport_id_compare, int, (const struct spdk_nvme_transport_id *trid1,
const struct spdk_nvme_transport_id *trid2), 0);
DEFINE_STUB(nvmf_subsystem_get_ctrlr, struct spdk_nvmf_ctrlr *,
(struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), NULL);
nvmf/vfio-user: add NVMe live migration support finally VFIO in QEMU uses region 9 as the PCI passthrough devices' migration channel. The format of the region 9 migration region is as follows: ------------------------------------------------------------------ |vfio_device_migration_info| data section | ------------------------------------------------------------------ QEMU will access vfio_device_migration_info to controll the migration process. For SPDK vfio-user target, we also implement the BAR9 via libvfio-user, and we also define the NVMe device specific migration data stored in data section of BAR9. QEMU doesn't care about the format in data section, it will help us to gather the NVMe specific migration data in source VM and then restore the migration date to data section of BAR9 in destination VM. The core idea to implement live migration will following the device state change which is controlled by QEMU. First QEMU will try to STOP the device in the source VM, and set the destination VM to RESUME state, SPDK will save NVMe devic state data structure to BAR9 in the source VM once the subsystem is paused, then QEMU will read BAR9 in source VM and restore the content of BAR9 in destination VM, finally in the destination VM, we will restore the NVMe device state include BARs/PCI CFG/queue pairs in the destination VM. Change-Id: I42e38f28c3ff59831be63290038b50d199d06658 Signed-off-by: Changpeng Liu <changpeng.liu@intel.com> Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/7617 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com> Community-CI: Mellanox Build Bot Reviewed-by: Thanos Makatos <thanos.makatos@nutanix.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com>
2021-04-27 14:35:37 +00:00
DEFINE_STUB(nvmf_ctrlr_save_aers, int, (struct spdk_nvmf_ctrlr *ctrlr, uint16_t *aer_cids,
uint16_t max_aers), 0);
DEFINE_STUB(nvmf_ctrlr_save_migr_data, int, (struct spdk_nvmf_ctrlr *ctrlr,
struct nvmf_ctrlr_migr_data *data), 0);
DEFINE_STUB(nvmf_ctrlr_restore_migr_data, int, (struct spdk_nvmf_ctrlr *ctrlr,
struct nvmf_ctrlr_migr_data *data), 0);
DEFINE_STUB_V(nvmf_ctrlr_set_fatal_status, (struct spdk_nvmf_ctrlr *ctrlr));
static void *
gpa_to_vva(void *prv, uint64_t addr, uint64_t len, int prot)
{
return (void *)(uintptr_t)addr;
}
static void
test_nvme_cmd_map_prps(void)
{
struct spdk_nvme_cmd cmd = {};
struct iovec iovs[33];
uint64_t phy_addr, *prp;
uint32_t len;
void *buf, *prps;
int i, ret;
size_t mps = 4096;
buf = spdk_zmalloc(132 * 1024, 4096, &phy_addr, 0, 0);
CU_ASSERT(buf != NULL);
prps = spdk_zmalloc(4096, 4096, &phy_addr, 0, 0);
CU_ASSERT(prps != NULL);
/* test case 1: 4KiB with PRP1 only */
cmd.dptr.prp.prp1 = (uint64_t)(uintptr_t)buf;
len = 4096;
ret = nvme_cmd_map_prps(NULL, &cmd, iovs, 33, len, mps, gpa_to_vva);
CU_ASSERT(ret == 1);
CU_ASSERT(iovs[0].iov_base == (void *)(uintptr_t)cmd.dptr.prp.prp1);
CU_ASSERT(iovs[0].iov_len == len);
/* test case 2: 4KiB with PRP1 and PRP2, 1KiB in first iov, and 3KiB in second iov */
cmd.dptr.prp.prp1 = (uint64_t)(uintptr_t)buf + 1024 * 3;
cmd.dptr.prp.prp2 = (uint64_t)(uintptr_t)buf + 4096;
len = 4096;
ret = nvme_cmd_map_prps(NULL, &cmd, iovs, 1, len, mps, gpa_to_vva);
CU_ASSERT(ret == -ERANGE);
ret = nvme_cmd_map_prps(NULL, &cmd, iovs, 33, len, mps, gpa_to_vva);
CU_ASSERT(ret == 2);
CU_ASSERT(iovs[0].iov_base == (void *)(uintptr_t)cmd.dptr.prp.prp1);
CU_ASSERT(iovs[0].iov_len == 1024);
CU_ASSERT(iovs[1].iov_base == (void *)(uintptr_t)cmd.dptr.prp.prp2);
CU_ASSERT(iovs[1].iov_len == 1024 * 3);
/* test case 3: 128KiB with PRP list, 1KiB in first iov, 3KiB in last iov */
cmd.dptr.prp.prp1 = (uint64_t)(uintptr_t)buf + 1024 * 3;
cmd.dptr.prp.prp2 = (uint64_t)(uintptr_t)prps;
len = 128 * 1024;
prp = prps;
for (i = 1; i < 33; i++) {
*prp = (uint64_t)(uintptr_t)buf + i * 4096;
prp++;
}
ret = nvme_cmd_map_prps(NULL, &cmd, iovs, 33, len, mps, gpa_to_vva);
CU_ASSERT(ret == 33);
CU_ASSERT(iovs[0].iov_base == (void *)(uintptr_t)cmd.dptr.prp.prp1);
CU_ASSERT(iovs[0].iov_len == 1024);
for (i = 1; i < 32; i++) {
CU_ASSERT(iovs[i].iov_base == (void *)((uintptr_t)buf + i * 4096));
CU_ASSERT(iovs[i].iov_len == 4096);
}
CU_ASSERT(iovs[32].iov_base == (void *)((uintptr_t)buf + 32 * 4096));
CU_ASSERT(iovs[32].iov_len == 1024 * 3);
/* test case 4: 256KiB with PRP list, not enough iovs */
cmd.dptr.prp.prp1 = (uint64_t)(uintptr_t)buf + 1024 * 3;
cmd.dptr.prp.prp2 = (uint64_t)(uintptr_t)prps;
len = 256 * 1024;
ret = nvme_cmd_map_prps(NULL, &cmd, iovs, 33, len, mps, gpa_to_vva);
CU_ASSERT(ret == -ERANGE);
spdk_free(buf);
spdk_free(prps);
}
static void
test_nvme_cmd_map_sgls(void)
{
struct spdk_nvme_cmd cmd = {};
struct iovec iovs[33];
uint64_t phy_addr;
uint32_t len;
void *buf, *sgls;
struct spdk_nvme_sgl_descriptor *sgl;
int i, ret;
size_t mps = 4096;
buf = spdk_zmalloc(132 * 1024, 4096, &phy_addr, 0, 0);
CU_ASSERT(buf != NULL);
sgls = spdk_zmalloc(4096, 4096, &phy_addr, 0, 0);
CU_ASSERT(sgls != NULL);
/* test case 1: 8KiB with 1 data block */
len = 8192;
cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
cmd.dptr.sgl1.unkeyed.length = len;
cmd.dptr.sgl1.address = (uint64_t)(uintptr_t)buf;
ret = nvme_cmd_map_sgls(NULL, &cmd, iovs, 33, len, mps, gpa_to_vva);
CU_ASSERT(ret == 1);
CU_ASSERT(iovs[0].iov_base == buf);
CU_ASSERT(iovs[0].iov_len == 8192);
/* test case 2: 8KiB with 2 data blocks and 1 last segment */
sgl = (struct spdk_nvme_sgl_descriptor *)sgls;
sgl[0].unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
sgl[0].unkeyed.length = 2048;
sgl[0].address = (uint64_t)(uintptr_t)buf;
sgl[1].unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
sgl[1].unkeyed.length = len - 2048;
sgl[1].address = (uint64_t)(uintptr_t)buf + 16 * 1024;
cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT;
cmd.dptr.sgl1.unkeyed.length = 2 * sizeof(*sgl);
cmd.dptr.sgl1.address = (uint64_t)(uintptr_t)sgls;
ret = nvme_cmd_map_sgls(NULL, &cmd, iovs, 33, len, mps, gpa_to_vva);
CU_ASSERT(ret == 2);
CU_ASSERT(iovs[0].iov_base == (void *)(uintptr_t)buf);
CU_ASSERT(iovs[0].iov_len == 2048);
CU_ASSERT(iovs[1].iov_base == (void *)((uintptr_t)buf + 16 * 1024));
CU_ASSERT(iovs[1].iov_len == len - 2048);
/* test case 3: 8KiB with 1 segment, 1 last segment and 3 data blocks */
sgl[0].unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
sgl[0].unkeyed.length = 2048;
sgl[0].address = (uint64_t)(uintptr_t)buf;
sgl[1].unkeyed.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT;
sgl[1].unkeyed.length = 2 * sizeof(*sgl);
sgl[1].address = (uint64_t)(uintptr_t)&sgl[9];
sgl[9].unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
sgl[9].unkeyed.length = 4096;
sgl[9].address = (uint64_t)(uintptr_t)buf + 4 * 1024;
sgl[10].unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
sgl[10].unkeyed.length = 2048;
sgl[10].address = (uint64_t)(uintptr_t)buf + 16 * 1024;
cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_SEGMENT;
cmd.dptr.sgl1.unkeyed.length = 2 * sizeof(*sgl);
cmd.dptr.sgl1.address = (uint64_t)(uintptr_t)&sgl[0];
ret = nvme_cmd_map_sgls(NULL, &cmd, iovs, 33, len, mps, gpa_to_vva);
CU_ASSERT(ret == 3);
CU_ASSERT(iovs[0].iov_base == (void *)(uintptr_t)buf);
CU_ASSERT(iovs[0].iov_len == 2048);
CU_ASSERT(iovs[1].iov_base == (void *)((uintptr_t)buf + 4 * 1024));
CU_ASSERT(iovs[1].iov_len == 4096);
CU_ASSERT(iovs[2].iov_base == (void *)((uintptr_t)buf + 16 * 1024));
CU_ASSERT(iovs[2].iov_len == 2048);
/* test case 4: not enough iovs */
len = 12 * 1024;
for (i = 0; i < 6; i++) {
sgl[0].unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
sgl[0].unkeyed.length = 2048;
sgl[0].address = (uint64_t)(uintptr_t)buf + i * 4096;
}
cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT;
cmd.dptr.sgl1.unkeyed.length = 6 * sizeof(*sgl);
cmd.dptr.sgl1.address = (uint64_t)(uintptr_t)sgls;
ret = nvme_cmd_map_sgls(NULL, &cmd, iovs, 4, len, mps, gpa_to_vva);
CU_ASSERT(ret == -ERANGE);
spdk_free(buf);
spdk_free(sgls);
}
static void
ut_transport_destroy_done_cb(void *cb_arg)
{
int *done = cb_arg;
*done = 1;
}
static void
test_nvmf_vfio_user_create_destroy(void)
{
struct spdk_nvmf_transport *transport = NULL;
struct nvmf_vfio_user_transport *vu_transport = NULL;
struct nvmf_vfio_user_endpoint *endpoint = NULL;
struct spdk_nvmf_transport_opts opts = {};
int rc;
int done;
/* Initialize transport_specific NULL to avoid decoding json */
opts.transport_specific = NULL;
transport = nvmf_vfio_user_create(&opts);
CU_ASSERT(transport != NULL);
vu_transport = SPDK_CONTAINEROF(transport, struct nvmf_vfio_user_transport,
transport);
/* Allocate a endpoint for destroy */
endpoint = calloc(1, sizeof(*endpoint));
pthread_mutex_init(&endpoint->lock, NULL);
TAILQ_INSERT_TAIL(&vu_transport->endpoints, endpoint, link);
done = 0;
rc = nvmf_vfio_user_destroy(transport, ut_transport_destroy_done_cb, &done);
CU_ASSERT(rc == 0);
CU_ASSERT(done == 1);
}
int
main(int argc, char **argv)
{
CU_pSuite suite = NULL;
unsigned int num_failures;
CU_set_error_action(CUEA_ABORT);
CU_initialize_registry();
suite = CU_add_suite("vfio_user", NULL, NULL);
CU_ADD_TEST(suite, test_nvme_cmd_map_prps);
CU_ADD_TEST(suite, test_nvme_cmd_map_sgls);
CU_ADD_TEST(suite, test_nvmf_vfio_user_create_destroy);
CU_basic_set_mode(CU_BRM_VERBOSE);
CU_basic_run_tests();
num_failures = CU_get_number_of_failures();
CU_cleanup_registry();
return num_failures;
}