nvme: split out transport-specific ctrlr structure

Change-Id: Icba2a44ff8ff35df09b3f1d8e3282a784e397a06
Signed-off-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
Daniel Verkamp 2016-10-14 14:26:03 -07:00
parent c655efd6a9
commit 62d7cded7a
5 changed files with 109 additions and 90 deletions

View File

@ -47,18 +47,20 @@ int32_t spdk_nvme_retry_count;
static struct spdk_nvme_ctrlr * static struct spdk_nvme_ctrlr *
nvme_attach(void *devhandle) nvme_attach(void *devhandle)
{ {
const struct spdk_nvme_transport *transport;
struct spdk_nvme_ctrlr *ctrlr; struct spdk_nvme_ctrlr *ctrlr;
int status; int status;
uint64_t phys_addr = 0; uint64_t phys_addr = 0;
ctrlr = spdk_zmalloc(sizeof(struct spdk_nvme_ctrlr), transport = &spdk_nvme_transport_pcie;
64, &phys_addr);
ctrlr = spdk_zmalloc(transport->ctrlr_size, 64, &phys_addr);
if (ctrlr == NULL) { if (ctrlr == NULL) {
SPDK_ERRLOG("could not allocate ctrlr\n"); SPDK_ERRLOG("could not allocate ctrlr\n");
return NULL; return NULL;
} }
ctrlr->transport = &spdk_nvme_transport_pcie; ctrlr->transport = transport;
status = nvme_ctrlr_construct(ctrlr, devhandle); status = nvme_ctrlr_construct(ctrlr, devhandle);
if (status != 0) { if (status != 0) {

View File

@ -249,6 +249,12 @@ struct pci_id {
}; };
struct spdk_nvme_transport { struct spdk_nvme_transport {
/*
* Size of the transport-specific extended spdk_nvme_ctrlr structure,
* which must contain spdk_nvme_ctrlr as the first element.
*/
size_t ctrlr_size;
int (*ctrlr_construct)(struct spdk_nvme_ctrlr *ctrlr, void *devhandle); int (*ctrlr_construct)(struct spdk_nvme_ctrlr *ctrlr, void *devhandle);
void (*ctrlr_destruct)(struct spdk_nvme_ctrlr *ctrlr); void (*ctrlr_destruct)(struct spdk_nvme_ctrlr *ctrlr);
@ -417,9 +423,6 @@ enum nvme_ctrlr_state {
struct spdk_nvme_ctrlr { struct spdk_nvme_ctrlr {
/* Hot data (accessed in I/O path) starts here. */ /* Hot data (accessed in I/O path) starts here. */
/** NVMe MMIO register space */
volatile struct spdk_nvme_registers *regs;
const struct spdk_nvme_transport *transport; const struct spdk_nvme_transport *transport;
/** I/O queue pairs */ /** I/O queue pairs */
@ -459,9 +462,6 @@ struct spdk_nvme_ctrlr {
/** minimum page size supported by this controller in bytes */ /** minimum page size supported by this controller in bytes */
uint32_t min_page_size; uint32_t min_page_size;
/** stride in uint32_t units between doorbell registers (1 = 4 bytes, 2 = 8 bytes, ...) */
uint32_t doorbell_stride_u32;
uint32_t num_aers; uint32_t num_aers;
struct nvme_async_event_request aer[NVME_MAX_ASYNC_EVENTS]; struct nvme_async_event_request aer[NVME_MAX_ASYNC_EVENTS];
spdk_nvme_aer_cb aer_cb_fn; spdk_nvme_aer_cb aer_cb_fn;
@ -490,15 +490,6 @@ struct spdk_nvme_ctrlr {
struct spdk_nvme_ctrlr_opts opts; struct spdk_nvme_ctrlr_opts opts;
/** BAR mapping address which contains controller memory buffer */
void *cmb_bar_virt_addr;
/** BAR physical address which contains controller memory buffer */
uint64_t cmb_bar_phys_addr;
/** Controller memory buffer size in Bytes */
uint64_t cmb_size;
/** Current offset of controller memory buffer */
uint64_t cmb_current_offset;
/** PCI address including domain, bus, device and function */ /** PCI address including domain, bus, device and function */
struct spdk_pci_addr pci_addr; struct spdk_pci_addr pci_addr;
}; };

View File

@ -37,6 +37,37 @@
#include "nvme_internal.h" #include "nvme_internal.h"
/* PCIe transport extensions for spdk_nvme_ctrlr */
struct nvme_pcie_ctrlr {
struct spdk_nvme_ctrlr ctrlr;
/** NVMe MMIO register space */
volatile struct spdk_nvme_registers *regs;
/* BAR mapping address which contains controller memory buffer */
void *cmb_bar_virt_addr;
/* BAR physical address which contains controller memory buffer */
uint64_t cmb_bar_phys_addr;
/* Controller memory buffer size in Bytes */
uint64_t cmb_size;
/* Current offset of controller memory buffer */
uint64_t cmb_current_offset;
/** stride in uint32_t units between doorbell registers (1 = 4 bytes, 2 = 8 bytes, ...) */
uint32_t doorbell_stride_u32;
};
SPDK_STATIC_ASSERT(offsetof(struct nvme_pcie_ctrlr, ctrlr) == 0, "ctrlr must be first field");
static inline struct nvme_pcie_ctrlr *
nvme_pcie_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
{
assert(ctrlr->transport == &spdk_nvme_transport_pcie);
return (struct nvme_pcie_ctrlr *)ctrlr;
}
static int static int
nvme_pcie_ctrlr_get_pci_id(struct spdk_nvme_ctrlr *ctrlr, struct pci_id *pci_id) nvme_pcie_ctrlr_get_pci_id(struct spdk_nvme_ctrlr *ctrlr, struct pci_id *pci_id)
{ {
@ -59,7 +90,9 @@ nvme_pcie_ctrlr_get_pci_id(struct spdk_nvme_ctrlr *ctrlr, struct pci_id *pci_id)
static volatile void * static volatile void *
nvme_pcie_reg_addr(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset) nvme_pcie_reg_addr(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset)
{ {
return (volatile void *)((uintptr_t)ctrlr->regs + offset); struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
return (volatile void *)((uintptr_t)pctrlr->regs + offset);
} }
static int static int
@ -97,21 +130,21 @@ nvme_pcie_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64
} }
static int static int
nvme_pcie_ctrlr_get_cmbloc(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cmbloc_register *cmbloc) nvme_pcie_ctrlr_get_cmbloc(struct nvme_pcie_ctrlr *pctrlr, union spdk_nvme_cmbloc_register *cmbloc)
{ {
return nvme_pcie_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cmbloc.raw), return nvme_pcie_ctrlr_get_reg_4(&pctrlr->ctrlr, offsetof(struct spdk_nvme_registers, cmbloc.raw),
&cmbloc->raw); &cmbloc->raw);
} }
static int static int
nvme_pcie_ctrlr_get_cmbsz(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cmbsz_register *cmbsz) nvme_pcie_ctrlr_get_cmbsz(struct nvme_pcie_ctrlr *pctrlr, union spdk_nvme_cmbsz_register *cmbsz)
{ {
return nvme_pcie_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cmbsz.raw), return nvme_pcie_ctrlr_get_reg_4(&pctrlr->ctrlr, offsetof(struct spdk_nvme_registers, cmbsz.raw),
&cmbsz->raw); &cmbsz->raw);
} }
static void static void
nvme_pcie_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr) nvme_pcie_ctrlr_map_cmb(struct nvme_pcie_ctrlr *pctrlr)
{ {
int rc; int rc;
void *addr; void *addr;
@ -120,8 +153,8 @@ nvme_pcie_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr)
union spdk_nvme_cmbloc_register cmbloc; union spdk_nvme_cmbloc_register cmbloc;
uint64_t size, unit_size, offset, bar_size, bar_phys_addr; uint64_t size, unit_size, offset, bar_size, bar_phys_addr;
if (nvme_pcie_ctrlr_get_cmbsz(ctrlr, &cmbsz) || if (nvme_pcie_ctrlr_get_cmbsz(pctrlr, &cmbsz) ||
nvme_pcie_ctrlr_get_cmbloc(ctrlr, &cmbloc)) { nvme_pcie_ctrlr_get_cmbloc(pctrlr, &cmbloc)) {
SPDK_TRACELOG(SPDK_TRACE_NVME, "get registers failed\n"); SPDK_TRACELOG(SPDK_TRACE_NVME, "get registers failed\n");
goto exit; goto exit;
} }
@ -141,7 +174,7 @@ nvme_pcie_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr)
/* controller memory buffer offset from BAR in Bytes */ /* controller memory buffer offset from BAR in Bytes */
offset = unit_size * cmbloc.bits.ofst; offset = unit_size * cmbloc.bits.ofst;
rc = spdk_pci_device_map_bar(ctrlr->devhandle, bir, &addr, rc = spdk_pci_device_map_bar(pctrlr->ctrlr.devhandle, bir, &addr,
&bar_phys_addr, &bar_size); &bar_phys_addr, &bar_size);
if ((rc != 0) || addr == NULL) { if ((rc != 0) || addr == NULL) {
goto exit; goto exit;
@ -155,35 +188,35 @@ nvme_pcie_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr)
goto exit; goto exit;
} }
ctrlr->cmb_bar_virt_addr = addr; pctrlr->cmb_bar_virt_addr = addr;
ctrlr->cmb_bar_phys_addr = bar_phys_addr; pctrlr->cmb_bar_phys_addr = bar_phys_addr;
ctrlr->cmb_size = size; pctrlr->cmb_size = size;
ctrlr->cmb_current_offset = offset; pctrlr->cmb_current_offset = offset;
if (!cmbsz.bits.sqs) { if (!cmbsz.bits.sqs) {
ctrlr->opts.use_cmb_sqs = false; pctrlr->ctrlr.opts.use_cmb_sqs = false;
} }
return; return;
exit: exit:
ctrlr->cmb_bar_virt_addr = NULL; pctrlr->cmb_bar_virt_addr = NULL;
ctrlr->opts.use_cmb_sqs = false; pctrlr->ctrlr.opts.use_cmb_sqs = false;
return; return;
} }
static int static int
nvme_pcie_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr) nvme_pcie_ctrlr_unmap_cmb(struct nvme_pcie_ctrlr *pctrlr)
{ {
int rc = 0; int rc = 0;
union spdk_nvme_cmbloc_register cmbloc; union spdk_nvme_cmbloc_register cmbloc;
void *addr = ctrlr->cmb_bar_virt_addr; void *addr = pctrlr->cmb_bar_virt_addr;
if (addr) { if (addr) {
if (nvme_pcie_ctrlr_get_cmbloc(ctrlr, &cmbloc)) { if (nvme_pcie_ctrlr_get_cmbloc(pctrlr, &cmbloc)) {
SPDK_TRACELOG(SPDK_TRACE_NVME, "get_cmbloc() failed\n"); SPDK_TRACELOG(SPDK_TRACE_NVME, "get_cmbloc() failed\n");
return -EIO; return -EIO;
} }
rc = spdk_pci_device_unmap_bar(ctrlr->devhandle, cmbloc.bits.bir, addr); rc = spdk_pci_device_unmap_bar(pctrlr->ctrlr.devhandle, cmbloc.bits.bir, addr);
} }
return rc; return rc;
} }
@ -192,55 +225,56 @@ static int
nvme_pcie_ctrlr_alloc_cmb(struct spdk_nvme_ctrlr *ctrlr, uint64_t length, uint64_t aligned, nvme_pcie_ctrlr_alloc_cmb(struct spdk_nvme_ctrlr *ctrlr, uint64_t length, uint64_t aligned,
uint64_t *offset) uint64_t *offset)
{ {
struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
uint64_t round_offset; uint64_t round_offset;
round_offset = ctrlr->cmb_current_offset; round_offset = pctrlr->cmb_current_offset;
round_offset = (round_offset + (aligned - 1)) & ~(aligned - 1); round_offset = (round_offset + (aligned - 1)) & ~(aligned - 1);
if (round_offset + length > ctrlr->cmb_size) if (round_offset + length > pctrlr->cmb_size)
return -1; return -1;
*offset = round_offset; *offset = round_offset;
ctrlr->cmb_current_offset = round_offset + length; pctrlr->cmb_current_offset = round_offset + length;
return 0; return 0;
} }
static int static int
nvme_pcie_ctrlr_allocate_bars(struct spdk_nvme_ctrlr *ctrlr) nvme_pcie_ctrlr_allocate_bars(struct nvme_pcie_ctrlr *pctrlr)
{ {
int rc; int rc;
void *addr; void *addr;
uint64_t phys_addr, size; uint64_t phys_addr, size;
rc = spdk_pci_device_map_bar(ctrlr->devhandle, 0, &addr, rc = spdk_pci_device_map_bar(pctrlr->ctrlr.devhandle, 0, &addr,
&phys_addr, &size); &phys_addr, &size);
ctrlr->regs = (volatile struct spdk_nvme_registers *)addr; pctrlr->regs = (volatile struct spdk_nvme_registers *)addr;
if ((ctrlr->regs == NULL) || (rc != 0)) { if ((pctrlr->regs == NULL) || (rc != 0)) {
SPDK_ERRLOG("nvme_pcicfg_map_bar failed with rc %d or bar %p\n", SPDK_ERRLOG("nvme_pcicfg_map_bar failed with rc %d or bar %p\n",
rc, ctrlr->regs); rc, pctrlr->regs);
return -1; return -1;
} }
nvme_pcie_ctrlr_map_cmb(ctrlr); nvme_pcie_ctrlr_map_cmb(pctrlr);
return 0; return 0;
} }
static int static int
nvme_pcie_ctrlr_free_bars(struct spdk_nvme_ctrlr *ctrlr) nvme_pcie_ctrlr_free_bars(struct nvme_pcie_ctrlr *pctrlr)
{ {
int rc = 0; int rc = 0;
void *addr = (void *)ctrlr->regs; void *addr = (void *)pctrlr->regs;
rc = nvme_pcie_ctrlr_unmap_cmb(ctrlr); rc = nvme_pcie_ctrlr_unmap_cmb(pctrlr);
if (rc != 0) { if (rc != 0) {
SPDK_ERRLOG("nvme_ctrlr_unmap_cmb failed with error code %d\n", rc); SPDK_ERRLOG("nvme_ctrlr_unmap_cmb failed with error code %d\n", rc);
return -1; return -1;
} }
if (addr) { if (addr) {
rc = spdk_pci_device_unmap_bar(ctrlr->devhandle, 0, addr); rc = spdk_pci_device_unmap_bar(pctrlr->ctrlr.devhandle, 0, addr);
} }
return rc; return rc;
} }
@ -248,11 +282,12 @@ nvme_pcie_ctrlr_free_bars(struct spdk_nvme_ctrlr *ctrlr)
static int static int
nvme_pcie_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr, void *devhandle) nvme_pcie_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
{ {
struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
union spdk_nvme_cap_register cap; union spdk_nvme_cap_register cap;
uint32_t cmd_reg; uint32_t cmd_reg;
int rc; int rc;
rc = nvme_pcie_ctrlr_allocate_bars(ctrlr); rc = nvme_pcie_ctrlr_allocate_bars(pctrlr);
if (rc != 0) { if (rc != 0) {
return rc; return rc;
} }
@ -269,7 +304,7 @@ nvme_pcie_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
/* Doorbell stride is 2 ^ (dstrd + 2), /* Doorbell stride is 2 ^ (dstrd + 2),
* but we want multiples of 4, so drop the + 2 */ * but we want multiples of 4, so drop the + 2 */
ctrlr->doorbell_stride_u32 = 1 << cap.bits.dstrd; pctrlr->doorbell_stride_u32 = 1 << cap.bits.dstrd;
/* Save the PCI address */ /* Save the PCI address */
ctrlr->pci_addr.domain = spdk_pci_device_get_domain(devhandle); ctrlr->pci_addr.domain = spdk_pci_device_get_domain(devhandle);
@ -283,7 +318,9 @@ nvme_pcie_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
static void static void
nvme_pcie_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr) nvme_pcie_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
{ {
nvme_pcie_ctrlr_free_bars(ctrlr); struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
nvme_pcie_ctrlr_free_bars(pctrlr);
} }
static void static void
@ -318,6 +355,7 @@ static int
nvme_pcie_qpair_construct(struct spdk_nvme_qpair *qpair) nvme_pcie_qpair_construct(struct spdk_nvme_qpair *qpair)
{ {
struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr; struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
struct nvme_tracker *tr; struct nvme_tracker *tr;
uint16_t i; uint16_t i;
volatile uint32_t *doorbell_base; volatile uint32_t *doorbell_base;
@ -344,8 +382,8 @@ nvme_pcie_qpair_construct(struct spdk_nvme_qpair *qpair)
if (ctrlr->opts.use_cmb_sqs) { if (ctrlr->opts.use_cmb_sqs) {
if (nvme_pcie_ctrlr_alloc_cmb(ctrlr, qpair->num_entries * sizeof(struct spdk_nvme_cmd), if (nvme_pcie_ctrlr_alloc_cmb(ctrlr, qpair->num_entries * sizeof(struct spdk_nvme_cmd),
0x1000, &offset) == 0) { 0x1000, &offset) == 0) {
qpair->cmd = ctrlr->cmb_bar_virt_addr + offset; qpair->cmd = pctrlr->cmb_bar_virt_addr + offset;
qpair->cmd_bus_addr = ctrlr->cmb_bar_phys_addr + offset; qpair->cmd_bus_addr = pctrlr->cmb_bar_phys_addr + offset;
qpair->sq_in_cmb = true; qpair->sq_in_cmb = true;
} }
} }
@ -367,9 +405,9 @@ nvme_pcie_qpair_construct(struct spdk_nvme_qpair *qpair)
return -ENOMEM; return -ENOMEM;
} }
doorbell_base = &ctrlr->regs->doorbell[0].sq_tdbl; doorbell_base = &pctrlr->regs->doorbell[0].sq_tdbl;
qpair->sq_tdbl = doorbell_base + (2 * qpair->id + 0) * ctrlr->doorbell_stride_u32; qpair->sq_tdbl = doorbell_base + (2 * qpair->id + 0) * pctrlr->doorbell_stride_u32;
qpair->cq_hdbl = doorbell_base + (2 * qpair->id + 1) * ctrlr->doorbell_stride_u32; qpair->cq_hdbl = doorbell_base + (2 * qpair->id + 1) * pctrlr->doorbell_stride_u32;
/* /*
* Reserve space for all of the trackers in a single allocation. * Reserve space for all of the trackers in a single allocation.
@ -1174,6 +1212,8 @@ nvme_pcie_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_
} }
const struct spdk_nvme_transport spdk_nvme_transport_pcie = { const struct spdk_nvme_transport spdk_nvme_transport_pcie = {
.ctrlr_size = sizeof(struct nvme_pcie_ctrlr),
.ctrlr_construct = nvme_pcie_ctrlr_construct, .ctrlr_construct = nvme_pcie_ctrlr_construct,
.ctrlr_destruct = nvme_pcie_ctrlr_destruct, .ctrlr_destruct = nvme_pcie_ctrlr_destruct,

View File

@ -60,8 +60,6 @@ __thread int nvme_thread_ioq_index = -1;
static int static int
ut_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr, void *devhandle) ut_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
{ {
ctrlr->regs = &g_ut_nvme_regs;
return 0; return 0;
} }
@ -89,7 +87,7 @@ static int
ut_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value) ut_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value)
{ {
SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 4); SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 4);
*(uint32_t *)((uintptr_t)ctrlr->regs + offset) = value; *(uint32_t *)((uintptr_t)&g_ut_nvme_regs + offset) = value;
return 0; return 0;
} }
@ -97,7 +95,7 @@ static int
ut_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value) ut_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value)
{ {
SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 8); SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 8);
*(uint64_t *)((uintptr_t)ctrlr->regs + offset) = value; *(uint64_t *)((uintptr_t)&g_ut_nvme_regs + offset) = value;
return 0; return 0;
} }
@ -105,7 +103,7 @@ static int
ut_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value) ut_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value)
{ {
SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 4); SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 4);
*value = *(uint32_t *)((uintptr_t)ctrlr->regs + offset); *value = *(uint32_t *)((uintptr_t)&g_ut_nvme_regs + offset);
return 0; return 0;
} }
@ -113,7 +111,7 @@ static int
ut_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value) ut_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value)
{ {
SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 8); SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 8);
*value = *(uint64_t *)((uintptr_t)ctrlr->regs + offset); *value = *(uint64_t *)((uintptr_t)&g_ut_nvme_regs + offset);
return 0; return 0;
} }

View File

@ -217,11 +217,9 @@ static const struct spdk_nvme_transport nvme_qpair_ut_transport = {
static void static void
prepare_submit_request_test(struct spdk_nvme_qpair *qpair, prepare_submit_request_test(struct spdk_nvme_qpair *qpair,
struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ctrlr *ctrlr)
struct spdk_nvme_registers *regs)
{ {
memset(ctrlr, 0, sizeof(*ctrlr)); memset(ctrlr, 0, sizeof(*ctrlr));
ctrlr->regs = regs;
ctrlr->transport = &nvme_qpair_ut_transport; ctrlr->transport = &nvme_qpair_ut_transport;
TAILQ_INIT(&ctrlr->free_io_qpairs); TAILQ_INIT(&ctrlr->free_io_qpairs);
TAILQ_INIT(&ctrlr->active_io_qpairs); TAILQ_INIT(&ctrlr->active_io_qpairs);
@ -282,9 +280,8 @@ test3(void)
struct spdk_nvme_qpair qpair = {}; struct spdk_nvme_qpair qpair = {};
struct nvme_request *req; struct nvme_request *req;
struct spdk_nvme_ctrlr ctrlr = {}; struct spdk_nvme_ctrlr ctrlr = {};
struct spdk_nvme_registers regs = {};
prepare_submit_request_test(&qpair, &ctrlr, &regs); prepare_submit_request_test(&qpair, &ctrlr);
req = nvme_allocate_request_null(expected_success_callback, NULL); req = nvme_allocate_request_null(expected_success_callback, NULL);
SPDK_CU_ASSERT_FATAL(req != NULL); SPDK_CU_ASSERT_FATAL(req != NULL);
@ -305,10 +302,9 @@ test4(void)
struct spdk_nvme_qpair qpair = {}; struct spdk_nvme_qpair qpair = {};
struct nvme_request *req; struct nvme_request *req;
struct spdk_nvme_ctrlr ctrlr = {}; struct spdk_nvme_ctrlr ctrlr = {};
struct spdk_nvme_registers regs = {};
char payload[4096]; char payload[4096];
prepare_submit_request_test(&qpair, &ctrlr, &regs); prepare_submit_request_test(&qpair, &ctrlr);
req = nvme_allocate_request_contig(payload, sizeof(payload), expected_failure_callback, NULL); req = nvme_allocate_request_contig(payload, sizeof(payload), expected_failure_callback, NULL);
SPDK_CU_ASSERT_FATAL(req != NULL); SPDK_CU_ASSERT_FATAL(req != NULL);
@ -335,7 +331,6 @@ test_sgl_req(void)
struct spdk_nvme_qpair qpair = {}; struct spdk_nvme_qpair qpair = {};
struct nvme_request *req; struct nvme_request *req;
struct spdk_nvme_ctrlr ctrlr = {}; struct spdk_nvme_ctrlr ctrlr = {};
struct spdk_nvme_registers regs = {};
struct nvme_payload payload = {}; struct nvme_payload payload = {};
struct nvme_tracker *sgl_tr = NULL; struct nvme_tracker *sgl_tr = NULL;
uint64_t i; uint64_t i;
@ -346,7 +341,7 @@ test_sgl_req(void)
payload.u.sgl.next_sge_fn = nvme_request_next_sge; payload.u.sgl.next_sge_fn = nvme_request_next_sge;
payload.u.sgl.cb_arg = &io_req; payload.u.sgl.cb_arg = &io_req;
prepare_submit_request_test(&qpair, &ctrlr, &regs); prepare_submit_request_test(&qpair, &ctrlr);
req = nvme_allocate_request(&payload, PAGE_SIZE, NULL, &io_req); req = nvme_allocate_request(&payload, PAGE_SIZE, NULL, &io_req);
SPDK_CU_ASSERT_FATAL(req != NULL); SPDK_CU_ASSERT_FATAL(req != NULL);
req->cmd.opc = SPDK_NVME_OPC_WRITE; req->cmd.opc = SPDK_NVME_OPC_WRITE;
@ -358,7 +353,7 @@ test_sgl_req(void)
CU_ASSERT(qpair.sq_tail == 0); CU_ASSERT(qpair.sq_tail == 0);
cleanup_submit_request_test(&qpair); cleanup_submit_request_test(&qpair);
prepare_submit_request_test(&qpair, &ctrlr, &regs); prepare_submit_request_test(&qpair, &ctrlr);
req = nvme_allocate_request(&payload, PAGE_SIZE, NULL, &io_req); req = nvme_allocate_request(&payload, PAGE_SIZE, NULL, &io_req);
SPDK_CU_ASSERT_FATAL(req != NULL); SPDK_CU_ASSERT_FATAL(req != NULL);
req->cmd.opc = SPDK_NVME_OPC_WRITE; req->cmd.opc = SPDK_NVME_OPC_WRITE;
@ -373,7 +368,7 @@ test_sgl_req(void)
fail_next_sge = false; fail_next_sge = false;
prepare_submit_request_test(&qpair, &ctrlr, &regs); prepare_submit_request_test(&qpair, &ctrlr);
req = nvme_allocate_request(&payload, 2 * PAGE_SIZE, NULL, &io_req); req = nvme_allocate_request(&payload, 2 * PAGE_SIZE, NULL, &io_req);
SPDK_CU_ASSERT_FATAL(req != NULL); SPDK_CU_ASSERT_FATAL(req != NULL);
req->cmd.opc = SPDK_NVME_OPC_WRITE; req->cmd.opc = SPDK_NVME_OPC_WRITE;
@ -385,7 +380,7 @@ test_sgl_req(void)
CU_ASSERT(qpair.sq_tail == 0); CU_ASSERT(qpair.sq_tail == 0);
cleanup_submit_request_test(&qpair); cleanup_submit_request_test(&qpair);
prepare_submit_request_test(&qpair, &ctrlr, &regs); prepare_submit_request_test(&qpair, &ctrlr);
req = nvme_allocate_request(&payload, (NVME_MAX_PRP_LIST_ENTRIES + 1) * PAGE_SIZE, NULL, &io_req); req = nvme_allocate_request(&payload, (NVME_MAX_PRP_LIST_ENTRIES + 1) * PAGE_SIZE, NULL, &io_req);
SPDK_CU_ASSERT_FATAL(req != NULL); SPDK_CU_ASSERT_FATAL(req != NULL);
req->cmd.opc = SPDK_NVME_OPC_WRITE; req->cmd.opc = SPDK_NVME_OPC_WRITE;
@ -414,7 +409,6 @@ test_hw_sgl_req(void)
struct spdk_nvme_qpair qpair = {}; struct spdk_nvme_qpair qpair = {};
struct nvme_request *req; struct nvme_request *req;
struct spdk_nvme_ctrlr ctrlr = {}; struct spdk_nvme_ctrlr ctrlr = {};
struct spdk_nvme_registers regs = {};
struct nvme_payload payload = {}; struct nvme_payload payload = {};
struct nvme_tracker *sgl_tr = NULL; struct nvme_tracker *sgl_tr = NULL;
uint64_t i; uint64_t i;
@ -425,7 +419,7 @@ test_hw_sgl_req(void)
payload.u.sgl.next_sge_fn = nvme_request_next_sge; payload.u.sgl.next_sge_fn = nvme_request_next_sge;
payload.u.sgl.cb_arg = &io_req; payload.u.sgl.cb_arg = &io_req;
prepare_submit_request_test(&qpair, &ctrlr, &regs); prepare_submit_request_test(&qpair, &ctrlr);
req = nvme_allocate_request(&payload, PAGE_SIZE, NULL, &io_req); req = nvme_allocate_request(&payload, PAGE_SIZE, NULL, &io_req);
SPDK_CU_ASSERT_FATAL(req != NULL); SPDK_CU_ASSERT_FATAL(req != NULL);
req->cmd.opc = SPDK_NVME_OPC_WRITE; req->cmd.opc = SPDK_NVME_OPC_WRITE;
@ -447,7 +441,7 @@ test_hw_sgl_req(void)
cleanup_submit_request_test(&qpair); cleanup_submit_request_test(&qpair);
nvme_free_request(req); nvme_free_request(req);
prepare_submit_request_test(&qpair, &ctrlr, &regs); prepare_submit_request_test(&qpair, &ctrlr);
req = nvme_allocate_request(&payload, NVME_MAX_SGL_DESCRIPTORS * PAGE_SIZE, NULL, &io_req); req = nvme_allocate_request(&payload, NVME_MAX_SGL_DESCRIPTORS * PAGE_SIZE, NULL, &io_req);
SPDK_CU_ASSERT_FATAL(req != NULL); SPDK_CU_ASSERT_FATAL(req != NULL);
req->cmd.opc = SPDK_NVME_OPC_WRITE; req->cmd.opc = SPDK_NVME_OPC_WRITE;
@ -479,10 +473,9 @@ test_ctrlr_failed(void)
struct spdk_nvme_qpair qpair = {}; struct spdk_nvme_qpair qpair = {};
struct nvme_request *req; struct nvme_request *req;
struct spdk_nvme_ctrlr ctrlr = {}; struct spdk_nvme_ctrlr ctrlr = {};
struct spdk_nvme_registers regs = {};
char payload[4096]; char payload[4096];
prepare_submit_request_test(&qpair, &ctrlr, &regs); prepare_submit_request_test(&qpair, &ctrlr);
req = nvme_allocate_request_contig(payload, sizeof(payload), expected_failure_callback, NULL); req = nvme_allocate_request_contig(payload, sizeof(payload), expected_failure_callback, NULL);
SPDK_CU_ASSERT_FATAL(req != NULL); SPDK_CU_ASSERT_FATAL(req != NULL);
@ -519,10 +512,9 @@ static void test_nvme_qpair_fail(void)
struct spdk_nvme_qpair qpair = {}; struct spdk_nvme_qpair qpair = {};
struct nvme_request *req = NULL; struct nvme_request *req = NULL;
struct spdk_nvme_ctrlr ctrlr = {}; struct spdk_nvme_ctrlr ctrlr = {};
struct spdk_nvme_registers regs = {};
struct nvme_tracker *tr_temp; struct nvme_tracker *tr_temp;
prepare_submit_request_test(&qpair, &ctrlr, &regs); prepare_submit_request_test(&qpair, &ctrlr);
tr_temp = LIST_FIRST(&qpair.free_tr); tr_temp = LIST_FIRST(&qpair.free_tr);
SPDK_CU_ASSERT_FATAL(tr_temp != NULL); SPDK_CU_ASSERT_FATAL(tr_temp != NULL);
@ -550,9 +542,8 @@ static void test_nvme_qpair_process_completions(void)
{ {
struct spdk_nvme_qpair qpair = {}; struct spdk_nvme_qpair qpair = {};
struct spdk_nvme_ctrlr ctrlr = {}; struct spdk_nvme_ctrlr ctrlr = {};
struct spdk_nvme_registers regs = {};
prepare_submit_request_test(&qpair, &ctrlr, &regs); prepare_submit_request_test(&qpair, &ctrlr);
qpair.is_enabled = false; qpair.is_enabled = false;
qpair.ctrlr->is_resetting = true; qpair.ctrlr->is_resetting = true;
@ -566,9 +557,8 @@ test_nvme_qpair_process_completions_limit(void)
{ {
struct spdk_nvme_qpair qpair = {}; struct spdk_nvme_qpair qpair = {};
struct spdk_nvme_ctrlr ctrlr = {}; struct spdk_nvme_ctrlr ctrlr = {};
struct spdk_nvme_registers regs = {};
prepare_submit_request_test(&qpair, &ctrlr, &regs); prepare_submit_request_test(&qpair, &ctrlr);
qpair.is_enabled = true; qpair.is_enabled = true;
/* Insert 4 entries into the completion queue */ /* Insert 4 entries into the completion queue */
@ -597,11 +587,9 @@ static void test_nvme_qpair_destroy(void)
{ {
struct spdk_nvme_qpair qpair = {}; struct spdk_nvme_qpair qpair = {};
struct spdk_nvme_ctrlr ctrlr = {}; struct spdk_nvme_ctrlr ctrlr = {};
struct spdk_nvme_registers regs = {};
struct nvme_tracker *tr_temp; struct nvme_tracker *tr_temp;
memset(&ctrlr, 0, sizeof(ctrlr)); memset(&ctrlr, 0, sizeof(ctrlr));
ctrlr.regs = &regs;
TAILQ_INIT(&ctrlr.free_io_qpairs); TAILQ_INIT(&ctrlr.free_io_qpairs);
TAILQ_INIT(&ctrlr.active_io_qpairs); TAILQ_INIT(&ctrlr.active_io_qpairs);