nvme: move ctrlr alloction to transport

Make the transport ctrlr_construct callback responsible for allocating
its own controller.

Change-Id: I5102ee233df23e27349410ed063cde8bfdce4c67
Signed-off-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
Daniel Verkamp 2016-10-18 12:50:43 -07:00
parent eaecf47ebd
commit 823958551b
7 changed files with 59 additions and 84 deletions

View File

@ -49,24 +49,10 @@ nvme_attach(void *devhandle)
{
const struct spdk_nvme_transport *transport;
struct spdk_nvme_ctrlr *ctrlr;
int status;
uint64_t phys_addr = 0;
transport = &spdk_nvme_transport_pcie;
ctrlr = spdk_zmalloc(transport->ctrlr_size, 64, &phys_addr);
if (ctrlr == NULL) {
SPDK_ERRLOG("could not allocate ctrlr\n");
return NULL;
}
ctrlr->transport = transport;
status = nvme_ctrlr_construct(ctrlr, devhandle);
if (status != 0) {
spdk_free(ctrlr);
return NULL;
}
ctrlr = transport->ctrlr_construct(devhandle);
return ctrlr;
}
@ -76,9 +62,8 @@ spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
{
pthread_mutex_lock(&g_spdk_nvme_driver->lock);
nvme_ctrlr_destruct(ctrlr);
TAILQ_REMOVE(&g_spdk_nvme_driver->attached_ctrlrs, ctrlr, tailq);
spdk_free(ctrlr);
nvme_ctrlr_destruct(ctrlr);
pthread_mutex_unlock(&g_spdk_nvme_driver->lock);
return 0;

View File

@ -998,20 +998,14 @@ pthread_mutex_init_recursive(pthread_mutex_t *mtx)
}
int
nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr)
{
union spdk_nvme_cap_register cap;
int rc;
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, NVME_TIMEOUT_INFINITE);
ctrlr->devhandle = devhandle;
ctrlr->flags = 0;
rc = ctrlr->transport->ctrlr_construct(ctrlr, devhandle);
if (rc) {
return rc;
}
if (nvme_ctrlr_get_cap(ctrlr, &cap)) {
SPDK_TRACELOG(SPDK_TRACE_NVME, "get_cap() failed\n");
return -EIO;

View File

@ -249,13 +249,7 @@ struct pci_id {
};
struct spdk_nvme_transport {
/*
* Size of the transport-specific extended spdk_nvme_ctrlr structure,
* which must contain spdk_nvme_ctrlr as the first element.
*/
size_t ctrlr_size;
int (*ctrlr_construct)(struct spdk_nvme_ctrlr *ctrlr, void *devhandle);
struct spdk_nvme_ctrlr *(*ctrlr_construct)(void *devhandle);
void (*ctrlr_destruct)(struct spdk_nvme_ctrlr *ctrlr);
int (*ctrlr_get_pci_id)(struct spdk_nvme_ctrlr *ctrlr, struct pci_id *pci_id);
@ -572,7 +566,7 @@ int nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
spdk_nvme_cmd_cb cb_fn, void *cb_arg);
void nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl);
int nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr, void *devhandle);
int nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr);
void nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr);
int nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr);
int nvme_ctrlr_start(struct spdk_nvme_ctrlr *ctrlr);

View File

@ -59,13 +59,12 @@ struct nvme_pcie_ctrlr {
/** stride in uint32_t units between doorbell registers (1 = 4 bytes, 2 = 8 bytes, ...) */
uint32_t doorbell_stride_u32;
};
SPDK_STATIC_ASSERT(offsetof(struct nvme_pcie_ctrlr, ctrlr) == 0, "ctrlr must be first field");
static inline struct nvme_pcie_ctrlr *
nvme_pcie_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
{
assert(ctrlr->transport == &spdk_nvme_transport_pcie);
return (struct nvme_pcie_ctrlr *)ctrlr;
return (struct nvme_pcie_ctrlr *)((uintptr_t)ctrlr - offsetof(struct nvme_pcie_ctrlr, ctrlr));
}
static int
@ -279,27 +278,38 @@ nvme_pcie_ctrlr_free_bars(struct nvme_pcie_ctrlr *pctrlr)
return rc;
}
static int
nvme_pcie_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
static struct spdk_nvme_ctrlr *nvme_pcie_ctrlr_construct(void *devhandle)
{
struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
struct spdk_pci_device *pci_dev = devhandle;
struct nvme_pcie_ctrlr *pctrlr;
union spdk_nvme_cap_register cap;
uint32_t cmd_reg;
int rc;
pctrlr = spdk_zmalloc(sizeof(struct nvme_pcie_ctrlr), 64, NULL);
if (pctrlr == NULL) {
SPDK_ERRLOG("could not allocate ctrlr\n");
return NULL;
}
pctrlr->ctrlr.transport = &spdk_nvme_transport_pcie;
pctrlr->ctrlr.devhandle = devhandle;
rc = nvme_pcie_ctrlr_allocate_bars(pctrlr);
if (rc != 0) {
return rc;
spdk_free(pctrlr);
return NULL;
}
/* Enable PCI busmaster and disable INTx */
spdk_pci_device_cfg_read32(devhandle, &cmd_reg, 4);
spdk_pci_device_cfg_read32(pci_dev, &cmd_reg, 4);
cmd_reg |= 0x404;
spdk_pci_device_cfg_write32(devhandle, cmd_reg, 4);
spdk_pci_device_cfg_write32(pci_dev, cmd_reg, 4);
if (nvme_ctrlr_get_cap(ctrlr, &cap)) {
if (nvme_ctrlr_get_cap(&pctrlr->ctrlr, &cap)) {
SPDK_TRACELOG(SPDK_TRACE_NVME, "get_cap() failed\n");
return -EIO;
spdk_free(pctrlr);
return NULL;
}
/* Doorbell stride is 2 ^ (dstrd + 2),
@ -307,12 +317,18 @@ nvme_pcie_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
pctrlr->doorbell_stride_u32 = 1 << cap.bits.dstrd;
/* Save the PCI address */
ctrlr->pci_addr.domain = spdk_pci_device_get_domain(devhandle);
ctrlr->pci_addr.bus = spdk_pci_device_get_bus(devhandle);
ctrlr->pci_addr.dev = spdk_pci_device_get_dev(devhandle);
ctrlr->pci_addr.func = spdk_pci_device_get_func(devhandle);
pctrlr->ctrlr.pci_addr.domain = spdk_pci_device_get_domain(pci_dev);
pctrlr->ctrlr.pci_addr.bus = spdk_pci_device_get_bus(pci_dev);
pctrlr->ctrlr.pci_addr.dev = spdk_pci_device_get_dev(pci_dev);
pctrlr->ctrlr.pci_addr.func = spdk_pci_device_get_func(pci_dev);
return 0;
rc = nvme_ctrlr_construct(&pctrlr->ctrlr);
if (rc != 0) {
spdk_free(pctrlr);
return NULL;
}
return &pctrlr->ctrlr;
}
static void
@ -321,6 +337,7 @@ nvme_pcie_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
nvme_pcie_ctrlr_free_bars(pctrlr);
spdk_free(pctrlr);
}
static void
@ -1212,8 +1229,6 @@ nvme_pcie_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_
}
const struct spdk_nvme_transport spdk_nvme_transport_pcie = {
.ctrlr_size = sizeof(struct nvme_pcie_ctrlr),
.ctrlr_construct = nvme_pcie_ctrlr_construct,
.ctrlr_destruct = nvme_pcie_ctrlr_destruct,

View File

@ -50,12 +50,6 @@ spdk_pci_enumerate(enum spdk_pci_device_type type,
return -1;
}
int
nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
{
return 0;
}
void
nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
{

View File

@ -57,10 +57,9 @@ struct spdk_nvme_registers g_ut_nvme_regs = {};
__thread int nvme_thread_ioq_index = -1;
static int
ut_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
static struct spdk_nvme_ctrlr *ut_ctrlr_construct(void *devhandle)
{
return 0;
return NULL;
}
static void
@ -370,7 +369,7 @@ test_nvme_ctrlr_init_en_1_rdy_0(void)
g_ut_nvme_regs.cc.bits.en = 1;
g_ut_nvme_regs.csts.bits.rdy = 0;
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr, NULL) == 0);
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
ctrlr.cdata.nn = 1;
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
@ -420,7 +419,7 @@ test_nvme_ctrlr_init_en_1_rdy_1(void)
g_ut_nvme_regs.cc.bits.en = 1;
g_ut_nvme_regs.csts.bits.rdy = 1;
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr, NULL) == 0);
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
ctrlr.cdata.nn = 1;
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
@ -467,7 +466,7 @@ test_nvme_ctrlr_init_en_0_rdy_0_ams_rr(void)
*/
g_ut_nvme_regs.cap.bits.ams = 0x0;
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr, NULL) == 0);
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
ctrlr.cdata.nn = 1;
/*
* Case 1: default round robin arbitration mechanism selected
@ -490,7 +489,7 @@ test_nvme_ctrlr_init_en_0_rdy_0_ams_rr(void)
/*
* Case 2: weighted round robin arbitration mechanism selected
*/
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr, NULL) == 0);
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
ctrlr.cdata.nn = 1;
ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
@ -508,7 +507,7 @@ test_nvme_ctrlr_init_en_0_rdy_0_ams_rr(void)
/*
* Case 3: vendor specific arbitration mechanism selected
*/
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr, NULL) == 0);
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
ctrlr.cdata.nn = 1;
ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
@ -526,7 +525,7 @@ test_nvme_ctrlr_init_en_0_rdy_0_ams_rr(void)
/*
* Case 4: invalid arbitration mechanism selected
*/
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr, NULL) == 0);
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
ctrlr.cdata.nn = 1;
ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
@ -544,7 +543,7 @@ test_nvme_ctrlr_init_en_0_rdy_0_ams_rr(void)
/*
* Case 5: reset to default round robin arbitration mechanism
*/
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr, NULL) == 0);
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
ctrlr.cdata.nn = 1;
ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
@ -586,7 +585,7 @@ test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr(void)
*/
g_ut_nvme_regs.cap.bits.ams = SPDK_NVME_CAP_AMS_WRR;
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr, NULL) == 0);
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
ctrlr.cdata.nn = 1;
/*
* Case 1: default round robin arbitration mechanism selected
@ -609,7 +608,7 @@ test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr(void)
/*
* Case 2: weighted round robin arbitration mechanism selected
*/
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr, NULL) == 0);
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
ctrlr.cdata.nn = 1;
ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
@ -629,7 +628,7 @@ test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr(void)
/*
* Case 3: vendor specific arbitration mechanism selected
*/
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr, NULL) == 0);
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
ctrlr.cdata.nn = 1;
ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
@ -647,7 +646,7 @@ test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr(void)
/*
* Case 4: invalid arbitration mechanism selected
*/
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr, NULL) == 0);
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
ctrlr.cdata.nn = 1;
ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
@ -665,7 +664,7 @@ test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr(void)
/*
* Case 5: reset to weighted round robin arbitration mechanism
*/
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr, NULL) == 0);
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
ctrlr.cdata.nn = 1;
ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
@ -706,7 +705,7 @@ test_nvme_ctrlr_init_en_0_rdy_0_ams_vs(void)
*/
g_ut_nvme_regs.cap.bits.ams = SPDK_NVME_CAP_AMS_VS;
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr, NULL) == 0);
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
ctrlr.cdata.nn = 1;
/*
* Case 1: default round robin arbitration mechanism selected
@ -729,7 +728,7 @@ test_nvme_ctrlr_init_en_0_rdy_0_ams_vs(void)
/*
* Case 2: weighted round robin arbitration mechanism selected
*/
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr, NULL) == 0);
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
ctrlr.cdata.nn = 1;
ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
@ -747,7 +746,7 @@ test_nvme_ctrlr_init_en_0_rdy_0_ams_vs(void)
/*
* Case 3: vendor specific arbitration mechanism selected
*/
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr, NULL) == 0);
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
ctrlr.cdata.nn = 1;
ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
@ -767,7 +766,7 @@ test_nvme_ctrlr_init_en_0_rdy_0_ams_vs(void)
/*
* Case 4: invalid arbitration mechanism selected
*/
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr, NULL) == 0);
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
ctrlr.cdata.nn = 1;
ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
@ -785,7 +784,7 @@ test_nvme_ctrlr_init_en_0_rdy_0_ams_vs(void)
/*
* Case 5: reset to vendor specific arbitration mechanism
*/
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr, NULL) == 0);
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
ctrlr.cdata.nn = 1;
ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
@ -822,7 +821,7 @@ test_nvme_ctrlr_init_en_0_rdy_0(void)
g_ut_nvme_regs.cc.bits.en = 0;
g_ut_nvme_regs.csts.bits.rdy = 0;
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr, NULL) == 0);
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
ctrlr.cdata.nn = 1;
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
@ -854,7 +853,7 @@ test_nvme_ctrlr_init_en_0_rdy_1(void)
g_ut_nvme_regs.cc.bits.en = 0;
g_ut_nvme_regs.csts.bits.rdy = 1;
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr, NULL) == 0);
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
ctrlr.cdata.nn = 1;
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
@ -885,7 +884,7 @@ setup_qpairs(struct spdk_nvme_ctrlr *ctrlr, uint32_t num_io_queues)
{
ctrlr->transport = &nvme_ctrlr_ut_transport;
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(ctrlr, NULL) == 0);
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(ctrlr) == 0);
/* Fake out the parts of ctrlr needed for I/O qpair allocation */
ctrlr->opts.num_io_queues = num_io_queues;

View File

@ -60,12 +60,6 @@ static int nvme_request_next_sge(void *cb_arg, uint64_t *address, uint32_t *leng
return 0;
}
int
nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
{
return 0;
}
void
nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
{