nvme/pcie: make sure sq and cq are physically contiguous

The spdk_dma_zmalloc guarantee about physical memory contiguity
is about to be removed soon. For hardware rings that require
physical memory or IOVA contiguity we will now enforce hugepage
alignment and size restrictions to make sure they occupy only
a single hugepage.

Change-Id: Iebaf1e7b701d676be1f04a9189201c5d89dad395
Signed-off-by: Dariusz Stojaczyk <dariuszx.stojaczyk@intel.com>
Reviewed-on: https://review.gerrithub.io/418547
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
This commit is contained in:
Dariusz Stojaczyk 2018-07-08 22:06:00 +02:00 committed by Jim Harris
parent fdec444aa8
commit 564db67415
3 changed files with 16 additions and 4 deletions

View File

@ -1786,6 +1786,7 @@ nvme_ctrlr_init_cap(struct spdk_nvme_ctrlr *ctrlr, const union spdk_nvme_cap_reg
ctrlr->page_size = ctrlr->min_page_size;
ctrlr->opts.io_queue_size = spdk_max(ctrlr->opts.io_queue_size, SPDK_NVME_IO_QUEUE_MIN_ENTRIES);
ctrlr->opts.io_queue_size = spdk_min(ctrlr->opts.io_queue_size, MAX_IO_QUEUE_ENTRIES);
ctrlr->opts.io_queue_size = spdk_min(ctrlr->opts.io_queue_size, ctrlr->cap.bits.mqes + 1u);
ctrlr->opts.io_queue_requests = spdk_max(ctrlr->opts.io_queue_requests, ctrlr->opts.io_queue_size);

View File

@ -126,6 +126,13 @@ extern pid_t g_spdk_nvme_pid;
#define DEFAULT_ADMIN_QUEUE_REQUESTS (32)
#define DEFAULT_IO_QUEUE_REQUESTS (512)
/* We want to fit submission and completion rings each in a single 2MB
* hugepage to ensure physical address contiguity.
*/
#define MAX_IO_QUEUE_ENTRIES (0x200000 / spdk_max( \
sizeof(struct spdk_nvme_cmd), \
sizeof(struct spdk_nvme_cpl)))
enum nvme_payload_type {
NVME_PAYLOAD_TYPE_INVALID = 0,

View File

@ -971,7 +971,7 @@ nvme_pcie_qpair_construct(struct spdk_nvme_qpair *qpair)
volatile uint32_t *doorbell_base;
uint64_t offset;
uint16_t num_trackers;
size_t page_size = sysconf(_SC_PAGESIZE);
size_t page_align = 0x200000;
/*
* Limit the maximum number of completions to return per call to prevent wraparound,
@ -993,15 +993,19 @@ nvme_pcie_qpair_construct(struct spdk_nvme_qpair *qpair)
/* cmd and cpl rings must be aligned on page size boundaries. */
if (ctrlr->opts.use_cmb_sqs) {
if (nvme_pcie_ctrlr_alloc_cmb(ctrlr, pqpair->num_entries * sizeof(struct spdk_nvme_cmd),
page_size, &offset) == 0) {
sysconf(_SC_PAGESIZE), &offset) == 0) {
pqpair->cmd = pctrlr->cmb_bar_virt_addr + offset;
pqpair->cmd_bus_addr = pctrlr->cmb_bar_phys_addr + offset;
pqpair->sq_in_cmb = true;
}
}
/* To ensure physical address contiguity we make each ring occupy
* a single hugepage only. See MAX_IO_QUEUE_ENTRIES.
*/
if (pqpair->sq_in_cmb == false) {
pqpair->cmd = spdk_dma_zmalloc(pqpair->num_entries * sizeof(struct spdk_nvme_cmd),
page_size,
page_align,
&pqpair->cmd_bus_addr);
if (pqpair->cmd == NULL) {
SPDK_ERRLOG("alloc qpair_cmd failed\n");
@ -1010,7 +1014,7 @@ nvme_pcie_qpair_construct(struct spdk_nvme_qpair *qpair)
}
pqpair->cpl = spdk_dma_zmalloc(pqpair->num_entries * sizeof(struct spdk_nvme_cpl),
page_size,
page_align,
&pqpair->cpl_bus_addr);
if (pqpair->cpl == NULL) {
SPDK_ERRLOG("alloc qpair_cpl failed\n");