nvme: add ctrlr construct/destruct to transport

Change-Id: I66842497a02bdb586d38ddc4a38d5b444a9d5dad
Signed-off-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
Daniel Verkamp 2016-10-13 16:08:22 -07:00
parent 03aead3903
commit a5790100f2
5 changed files with 218 additions and 240 deletions

View File

@ -51,7 +51,7 @@ nvme_ctrlr_get_csts(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_csts_register
&csts->raw);
}
static int
int
nvme_ctrlr_get_cap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cap_register *cap)
{
return ctrlr->transport->ctrlr_get_reg_8(ctrlr, offsetof(struct spdk_nvme_registers, cap.raw),
@ -93,20 +93,6 @@ nvme_ctrlr_set_aqa(struct spdk_nvme_ctrlr *ctrlr, const union spdk_nvme_aqa_regi
aqa->raw);
}
static int
nvme_ctrlr_get_cmbloc(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cmbloc_register *cmbloc)
{
return ctrlr->transport->ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cmbloc.raw),
&cmbloc->raw);
}
static int
nvme_ctrlr_get_cmbsz(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cmbsz_register *cmbsz)
{
return ctrlr->transport->ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cmbsz.raw),
&cmbsz->raw);
}
void
spdk_nvme_ctrlr_opts_set_defaults(struct spdk_nvme_ctrlr_opts *opts)
{
@ -994,141 +980,6 @@ nvme_ctrlr_start(struct spdk_nvme_ctrlr *ctrlr)
return 0;
}
static void
nvme_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr)
{
int rc;
void *addr;
uint32_t bir;
union spdk_nvme_cmbsz_register cmbsz;
union spdk_nvme_cmbloc_register cmbloc;
uint64_t size, unit_size, offset, bar_size, bar_phys_addr;
if (nvme_ctrlr_get_cmbsz(ctrlr, &cmbsz) ||
nvme_ctrlr_get_cmbloc(ctrlr, &cmbloc)) {
SPDK_TRACELOG(SPDK_TRACE_NVME, "get registers failed\n");
goto exit;
}
if (!cmbsz.bits.sz)
goto exit;
bir = cmbloc.bits.bir;
/* Values 0 2 3 4 5 are valid for BAR */
if (bir > 5 || bir == 1)
goto exit;
/* unit size for 4KB/64KB/1MB/16MB/256MB/4GB/64GB */
unit_size = (uint64_t)1 << (12 + 4 * cmbsz.bits.szu);
/* controller memory buffer size in Bytes */
size = unit_size * cmbsz.bits.sz;
/* controller memory buffer offset from BAR in Bytes */
offset = unit_size * cmbloc.bits.ofst;
rc = spdk_pci_device_map_bar(ctrlr->devhandle, bir, &addr,
&bar_phys_addr, &bar_size);
if ((rc != 0) || addr == NULL) {
goto exit;
}
if (offset > bar_size) {
goto exit;
}
if (size > bar_size - offset) {
goto exit;
}
ctrlr->cmb_bar_virt_addr = addr;
ctrlr->cmb_bar_phys_addr = bar_phys_addr;
ctrlr->cmb_size = size;
ctrlr->cmb_current_offset = offset;
if (!cmbsz.bits.sqs) {
ctrlr->opts.use_cmb_sqs = false;
}
return;
exit:
ctrlr->cmb_bar_virt_addr = NULL;
ctrlr->opts.use_cmb_sqs = false;
return;
}
static int
nvme_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr)
{
int rc = 0;
union spdk_nvme_cmbloc_register cmbloc;
void *addr = ctrlr->cmb_bar_virt_addr;
if (addr) {
if (nvme_ctrlr_get_cmbloc(ctrlr, &cmbloc)) {
SPDK_TRACELOG(SPDK_TRACE_NVME, "get_cmbloc() failed\n");
return -EIO;
}
rc = spdk_pci_device_unmap_bar(ctrlr->devhandle, cmbloc.bits.bir, addr);
}
return rc;
}
int
nvme_ctrlr_alloc_cmb(struct spdk_nvme_ctrlr *ctrlr, uint64_t length, uint64_t aligned,
uint64_t *offset)
{
uint64_t round_offset;
round_offset = ctrlr->cmb_current_offset;
round_offset = (round_offset + (aligned - 1)) & ~(aligned - 1);
if (round_offset + length > ctrlr->cmb_size)
return -1;
*offset = round_offset;
ctrlr->cmb_current_offset = round_offset + length;
return 0;
}
static int
nvme_ctrlr_allocate_bars(struct spdk_nvme_ctrlr *ctrlr)
{
int rc;
void *addr;
uint64_t phys_addr, size;
rc = spdk_pci_device_map_bar(ctrlr->devhandle, 0, &addr,
&phys_addr, &size);
ctrlr->regs = (volatile struct spdk_nvme_registers *)addr;
if ((ctrlr->regs == NULL) || (rc != 0)) {
SPDK_ERRLOG("nvme_pcicfg_map_bar failed with rc %d or bar %p\n",
rc, ctrlr->regs);
return -1;
}
nvme_ctrlr_map_cmb(ctrlr);
return 0;
}
static int
nvme_ctrlr_free_bars(struct spdk_nvme_ctrlr *ctrlr)
{
int rc = 0;
void *addr = (void *)ctrlr->regs;
rc = nvme_ctrlr_unmap_cmb(ctrlr);
if (rc != 0) {
SPDK_ERRLOG("nvme_ctrlr_unmap_cmb failed with error code %d\n", rc);
return -1;
}
if (addr) {
rc = spdk_pci_device_unmap_bar(ctrlr->devhandle, 0, addr);
}
return rc;
}
static inline int
pthread_mutex_init_recursive(pthread_mutex_t *mtx)
{
@ -1150,33 +1001,22 @@ int
nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
{
union spdk_nvme_cap_register cap;
uint32_t cmd_reg;
int status;
int rc;
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, NVME_TIMEOUT_INFINITE);
ctrlr->devhandle = devhandle;
ctrlr->flags = 0;
status = nvme_ctrlr_allocate_bars(ctrlr);
if (status != 0) {
return status;
rc = ctrlr->transport->ctrlr_construct(ctrlr, devhandle);
if (rc) {
return rc;
}
/* Enable PCI busmaster and disable INTx */
spdk_pci_device_cfg_read32(devhandle, &cmd_reg, 4);
cmd_reg |= 0x404;
spdk_pci_device_cfg_write32(devhandle, cmd_reg, 4);
if (nvme_ctrlr_get_cap(ctrlr, &cap)) {
SPDK_TRACELOG(SPDK_TRACE_NVME, "get_cap() failed\n");
return -EIO;
}
/* Doorbell stride is 2 ^ (dstrd + 2),
* but we want multiples of 4, so drop the + 2 */
ctrlr->doorbell_stride_u32 = 1 << cap.bits.dstrd;
ctrlr->min_page_size = 1 << (12 + cap.bits.mpsmin);
rc = nvme_ctrlr_construct_admin_qpair(ctrlr);
@ -1191,12 +1031,6 @@ nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
pthread_mutex_init_recursive(&ctrlr->ctrlr_lock);
/* Save the PCI address */
ctrlr->pci_addr.domain = spdk_pci_device_get_domain(devhandle);
ctrlr->pci_addr.bus = spdk_pci_device_get_bus(devhandle);
ctrlr->pci_addr.dev = spdk_pci_device_get_dev(devhandle);
ctrlr->pci_addr.func = spdk_pci_device_get_func(devhandle);
return 0;
}
@ -1224,8 +1058,9 @@ nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
nvme_qpair_destroy(&ctrlr->adminq);
nvme_ctrlr_free_bars(ctrlr);
pthread_mutex_destroy(&ctrlr->ctrlr_lock);
ctrlr->transport->ctrlr_destruct(ctrlr);
}
int

View File

@ -249,6 +249,9 @@ struct pci_id {
};
struct spdk_nvme_transport {
int (*ctrlr_construct)(struct spdk_nvme_ctrlr *ctrlr, void *devhandle);
void (*ctrlr_destruct)(struct spdk_nvme_ctrlr *ctrlr);
int (*ctrlr_get_pci_id)(struct spdk_nvme_ctrlr *ctrlr, struct pci_id *pci_id);
int (*ctrlr_set_reg_4)(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value);
@ -595,8 +598,7 @@ int nvme_ctrlr_start(struct spdk_nvme_ctrlr *ctrlr);
int nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr,
struct nvme_request *req);
int nvme_ctrlr_alloc_cmb(struct spdk_nvme_ctrlr *ctrlr, uint64_t length, uint64_t aligned,
uint64_t *offset);
int nvme_ctrlr_get_cap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cap_register *cap);
int nvme_qpair_construct(struct spdk_nvme_qpair *qpair, uint16_t id,
uint16_t num_entries,
struct spdk_nvme_ctrlr *ctrlr);

View File

@ -96,6 +96,196 @@ nvme_pcie_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64
return 0;
}
static int
nvme_pcie_ctrlr_get_cmbloc(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cmbloc_register *cmbloc)
{
return nvme_pcie_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cmbloc.raw),
&cmbloc->raw);
}
static int
nvme_pcie_ctrlr_get_cmbsz(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cmbsz_register *cmbsz)
{
return nvme_pcie_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cmbsz.raw),
&cmbsz->raw);
}
static void
nvme_pcie_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr)
{
int rc;
void *addr;
uint32_t bir;
union spdk_nvme_cmbsz_register cmbsz;
union spdk_nvme_cmbloc_register cmbloc;
uint64_t size, unit_size, offset, bar_size, bar_phys_addr;
if (nvme_pcie_ctrlr_get_cmbsz(ctrlr, &cmbsz) ||
nvme_pcie_ctrlr_get_cmbloc(ctrlr, &cmbloc)) {
SPDK_TRACELOG(SPDK_TRACE_NVME, "get registers failed\n");
goto exit;
}
if (!cmbsz.bits.sz)
goto exit;
bir = cmbloc.bits.bir;
/* Values 0 2 3 4 5 are valid for BAR */
if (bir > 5 || bir == 1)
goto exit;
/* unit size for 4KB/64KB/1MB/16MB/256MB/4GB/64GB */
unit_size = (uint64_t)1 << (12 + 4 * cmbsz.bits.szu);
/* controller memory buffer size in Bytes */
size = unit_size * cmbsz.bits.sz;
/* controller memory buffer offset from BAR in Bytes */
offset = unit_size * cmbloc.bits.ofst;
rc = spdk_pci_device_map_bar(ctrlr->devhandle, bir, &addr,
&bar_phys_addr, &bar_size);
if ((rc != 0) || addr == NULL) {
goto exit;
}
if (offset > bar_size) {
goto exit;
}
if (size > bar_size - offset) {
goto exit;
}
ctrlr->cmb_bar_virt_addr = addr;
ctrlr->cmb_bar_phys_addr = bar_phys_addr;
ctrlr->cmb_size = size;
ctrlr->cmb_current_offset = offset;
if (!cmbsz.bits.sqs) {
ctrlr->opts.use_cmb_sqs = false;
}
return;
exit:
ctrlr->cmb_bar_virt_addr = NULL;
ctrlr->opts.use_cmb_sqs = false;
return;
}
static int
nvme_pcie_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr)
{
int rc = 0;
union spdk_nvme_cmbloc_register cmbloc;
void *addr = ctrlr->cmb_bar_virt_addr;
if (addr) {
if (nvme_pcie_ctrlr_get_cmbloc(ctrlr, &cmbloc)) {
SPDK_TRACELOG(SPDK_TRACE_NVME, "get_cmbloc() failed\n");
return -EIO;
}
rc = spdk_pci_device_unmap_bar(ctrlr->devhandle, cmbloc.bits.bir, addr);
}
return rc;
}
static int
nvme_pcie_ctrlr_alloc_cmb(struct spdk_nvme_ctrlr *ctrlr, uint64_t length, uint64_t aligned,
uint64_t *offset)
{
uint64_t round_offset;
round_offset = ctrlr->cmb_current_offset;
round_offset = (round_offset + (aligned - 1)) & ~(aligned - 1);
if (round_offset + length > ctrlr->cmb_size)
return -1;
*offset = round_offset;
ctrlr->cmb_current_offset = round_offset + length;
return 0;
}
static int
nvme_pcie_ctrlr_allocate_bars(struct spdk_nvme_ctrlr *ctrlr)
{
int rc;
void *addr;
uint64_t phys_addr, size;
rc = spdk_pci_device_map_bar(ctrlr->devhandle, 0, &addr,
&phys_addr, &size);
ctrlr->regs = (volatile struct spdk_nvme_registers *)addr;
if ((ctrlr->regs == NULL) || (rc != 0)) {
SPDK_ERRLOG("nvme_pcicfg_map_bar failed with rc %d or bar %p\n",
rc, ctrlr->regs);
return -1;
}
nvme_pcie_ctrlr_map_cmb(ctrlr);
return 0;
}
static int
nvme_pcie_ctrlr_free_bars(struct spdk_nvme_ctrlr *ctrlr)
{
int rc = 0;
void *addr = (void *)ctrlr->regs;
rc = nvme_pcie_ctrlr_unmap_cmb(ctrlr);
if (rc != 0) {
SPDK_ERRLOG("nvme_ctrlr_unmap_cmb failed with error code %d\n", rc);
return -1;
}
if (addr) {
rc = spdk_pci_device_unmap_bar(ctrlr->devhandle, 0, addr);
}
return rc;
}
static int
nvme_pcie_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
{
union spdk_nvme_cap_register cap;
uint32_t cmd_reg;
int rc;
rc = nvme_pcie_ctrlr_allocate_bars(ctrlr);
if (rc != 0) {
return rc;
}
/* Enable PCI busmaster and disable INTx */
spdk_pci_device_cfg_read32(devhandle, &cmd_reg, 4);
cmd_reg |= 0x404;
spdk_pci_device_cfg_write32(devhandle, cmd_reg, 4);
if (nvme_ctrlr_get_cap(ctrlr, &cap)) {
SPDK_TRACELOG(SPDK_TRACE_NVME, "get_cap() failed\n");
return -EIO;
}
/* Doorbell stride is 2 ^ (dstrd + 2),
* but we want multiples of 4, so drop the + 2 */
ctrlr->doorbell_stride_u32 = 1 << cap.bits.dstrd;
/* Save the PCI address */
ctrlr->pci_addr.domain = spdk_pci_device_get_domain(devhandle);
ctrlr->pci_addr.bus = spdk_pci_device_get_bus(devhandle);
ctrlr->pci_addr.dev = spdk_pci_device_get_dev(devhandle);
ctrlr->pci_addr.func = spdk_pci_device_get_func(devhandle);
return 0;
}
static void
nvme_pcie_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
{
nvme_pcie_ctrlr_free_bars(ctrlr);
}
static void
nvme_qpair_construct_tracker(struct nvme_tracker *tr, uint16_t cid, uint64_t phys_addr)
{
@ -152,8 +342,8 @@ nvme_pcie_qpair_construct(struct spdk_nvme_qpair *qpair)
/* cmd and cpl rings must be aligned on 4KB boundaries. */
if (ctrlr->opts.use_cmb_sqs) {
if (nvme_ctrlr_alloc_cmb(ctrlr, qpair->num_entries * sizeof(struct spdk_nvme_cmd),
0x1000, &offset) == 0) {
if (nvme_pcie_ctrlr_alloc_cmb(ctrlr, qpair->num_entries * sizeof(struct spdk_nvme_cmd),
0x1000, &offset) == 0) {
qpair->cmd = ctrlr->cmb_bar_virt_addr + offset;
qpair->cmd_bus_addr = ctrlr->cmb_bar_phys_addr + offset;
qpair->sq_in_cmb = true;
@ -888,6 +1078,9 @@ nvme_pcie_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_
}
const struct spdk_nvme_transport spdk_nvme_transport_pcie = {
.ctrlr_construct = nvme_pcie_ctrlr_construct,
.ctrlr_destruct = nvme_pcie_ctrlr_destruct,
.ctrlr_get_pci_id = nvme_pcie_ctrlr_get_pci_id,
.ctrlr_set_reg_4 = nvme_pcie_ctrlr_set_reg_4,

View File

@ -51,45 +51,23 @@ static uint16_t g_pci_vendor_id;
static uint16_t g_pci_device_id;
static uint16_t g_pci_subvendor_id;
static uint16_t g_pci_subdevice_id;
static uint16_t g_pci_domain;
static uint8_t g_pci_bus;
static uint8_t g_pci_dev;
static uint8_t g_pci_func;
uint64_t g_ut_tsc = 0;
struct spdk_nvme_registers g_ut_nvme_regs = {};
__thread int nvme_thread_ioq_index = -1;
int
spdk_pci_device_map_bar(struct spdk_pci_device *dev, uint32_t bar,
void **mapped_addr, uint64_t *phys_addr, uint64_t *size)
static int
ut_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
{
*mapped_addr = &g_ut_nvme_regs;
*phys_addr = (uintptr_t)&g_ut_nvme_regs;
*size = sizeof(g_ut_nvme_regs);
ctrlr->regs = &g_ut_nvme_regs;
return 0;
}
int
spdk_pci_device_unmap_bar(struct spdk_pci_device *dev, uint32_t bar, void *addr)
static void
ut_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
{
return 0;
}
int
spdk_pci_device_cfg_read32(struct spdk_pci_device *dev, uint32_t *value,
uint32_t offset)
{
*value = 0xFFFFFFFFu;
return 0;
}
int
spdk_pci_device_cfg_write32(struct spdk_pci_device *dev, uint32_t value,
uint32_t offset)
{
return 0;
}
static int
@ -158,6 +136,9 @@ ut_qpair_reset(struct spdk_nvme_qpair *qpair)
}
static const struct spdk_nvme_transport nvme_ctrlr_ut_transport = {
.ctrlr_construct = ut_ctrlr_construct,
.ctrlr_destruct = ut_ctrlr_destruct,
.ctrlr_get_pci_id = ut_ctrlr_get_pci_id,
.ctrlr_set_reg_4 = ut_ctrlr_set_reg_4,
@ -172,36 +153,6 @@ static const struct spdk_nvme_transport nvme_ctrlr_ut_transport = {
.qpair_reset = ut_qpair_reset,
};
uint16_t
spdk_pci_device_get_domain(struct spdk_pci_device *dev)
{
return g_pci_domain;
}
uint8_t
spdk_pci_device_get_bus(struct spdk_pci_device *dev)
{
return g_pci_bus;
}
uint8_t
spdk_pci_device_get_dev(struct spdk_pci_device *dev)
{
return g_pci_dev;
}
uint8_t
spdk_pci_device_get_func(struct spdk_pci_device *dev)
{
return g_pci_func;
}
bool
spdk_pci_device_compare_addr(struct spdk_pci_device *dev, struct spdk_pci_addr *addr)
{
return true;
}
int nvme_qpair_construct(struct spdk_nvme_qpair *qpair, uint16_t id,
uint16_t num_entries,
struct spdk_nvme_ctrlr *ctrlr)
@ -1218,6 +1169,7 @@ test_nvme_ctrlr_set_supported_features(void)
CU_ASSERT(res == true);
}
#if 0 /* TODO: move to PCIe-specific unit test */
static void
test_nvme_ctrlr_alloc_cmb(void)
{
@ -1245,6 +1197,7 @@ test_nvme_ctrlr_alloc_cmb(void)
rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x8000000, 0x1000, &offset);
CU_ASSERT(rc == -1);
}
#endif
int main(int argc, char **argv)
{
@ -1284,8 +1237,10 @@ int main(int argc, char **argv)
test_nvme_ctrlr_construct_intel_support_log_page_list) == NULL
|| CU_add_test(suite, "test nvme ctrlr function nvme_ctrlr_set_supported_features",
test_nvme_ctrlr_set_supported_features) == NULL
#if 0 /* TODO: move to PCIe-specific unit test */
|| CU_add_test(suite, "test nvme ctrlr function nvme_ctrlr_alloc_cmb",
test_nvme_ctrlr_alloc_cmb) == NULL
#endif
) {
CU_cleanup_registry();
return CU_get_error();

View File

@ -182,13 +182,6 @@ nvme_request_remove_child(struct nvme_request *parent,
TAILQ_REMOVE(&parent->children, child, child_tailq);
}
int
nvme_ctrlr_alloc_cmb(struct spdk_nvme_ctrlr *ctrlr, uint64_t length, uint64_t aligned,
uint64_t *offset)
{
return -1;
}
static int
ut_qpair_construct(struct spdk_nvme_qpair *qpair)
{