nvme: spdk_nvme_ctrlr_alloc_io_qpair extensions
Adds fields to structure spdk_nvme_io_qpair_opts. These fields allow specifying the locations of memory buffers used for the submission and/or completion queues. By default, vaddr is set to NULL meaning SPDK will allocate the memory to be used. If vaddr is NULL then paddr must be set to 0. If vaddr is non-NULL, and paddr is zero, SPDK derives the physical address for the NVMe device, in this case the memory must be registered. If a paddr value is non-zero, SPDK uses the vaddr and paddr as passed. SPDK assumes that the memory passed is both virtually and physically contiguous. If these fields are used, SPDK will NOT impose any restriction on the number of elements in the queues. The buffer sizes are in number of bytes, and are used to confirm that the buffers are large enough to contain the appropriate queue. These fields are only used by PCIe attached NVMe devices. They are presently ignored for other transports. Signed-off-by: James Bergsten <jamesx.bergsten@intel.com> Change-Id: Ibfab3939eefe48109335f43a1167082dd4865e7c Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/454074 Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
parent
e58dfd5669
commit
8785d5052d
@ -953,6 +953,34 @@ struct spdk_nvme_io_qpair_opts {
|
|||||||
*
|
*
|
||||||
* This only applies to local PCIe devices. */
|
* This only applies to local PCIe devices. */
|
||||||
bool delay_pcie_doorbell;
|
bool delay_pcie_doorbell;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* These fields allow specifying the memory buffers for the submission and/or
|
||||||
|
* completion queues.
|
||||||
|
* By default, vaddr is set to NULL meaning SPDK will allocate the memory to be used.
|
||||||
|
* If vaddr is NULL then paddr must be set to 0.
|
||||||
|
* If vaddr is non-NULL, and paddr is zero, SPDK derives the physical
|
||||||
|
* address for the NVMe device, in this case the memory must be registered.
|
||||||
|
* If a paddr value is non-zero, SPDK uses the vaddr and paddr as passed
|
||||||
|
* SPDK assumes that the memory passed is both virtually and physically
|
||||||
|
* contiguous.
|
||||||
|
* If these fields are used, SPDK will NOT impose any restriction
|
||||||
|
* on the number of elements in the queues.
|
||||||
|
* The buffer sizes are in number of bytes, and are used to confirm
|
||||||
|
* that the buffers are large enough to contain the appropriate queue.
|
||||||
|
* These fields are only used by PCIe attached NVMe devices. They
|
||||||
|
* are presently ignored for other transports.
|
||||||
|
*/
|
||||||
|
struct {
|
||||||
|
struct spdk_nvme_cmd *vaddr;
|
||||||
|
uint64_t paddr;
|
||||||
|
uint64_t buffer_size;
|
||||||
|
} sq;
|
||||||
|
struct {
|
||||||
|
struct spdk_nvme_cpl *vaddr;
|
||||||
|
uint64_t paddr;
|
||||||
|
uint64_t buffer_size;
|
||||||
|
} cq;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -248,6 +248,30 @@ spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
|
|||||||
opts->delay_pcie_doorbell = false;
|
opts->delay_pcie_doorbell = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (FIELD_OK(sq.vaddr)) {
|
||||||
|
opts->sq.vaddr = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (FIELD_OK(sq.paddr)) {
|
||||||
|
opts->sq.paddr = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (FIELD_OK(sq.buffer_size)) {
|
||||||
|
opts->sq.buffer_size = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (FIELD_OK(cq.vaddr)) {
|
||||||
|
opts->cq.vaddr = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (FIELD_OK(cq.paddr)) {
|
||||||
|
opts->cq.paddr = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (FIELD_OK(cq.buffer_size)) {
|
||||||
|
opts->cq.buffer_size = 0;
|
||||||
|
}
|
||||||
|
|
||||||
#undef FIELD_OK
|
#undef FIELD_OK
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -275,6 +299,22 @@ spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
|
|||||||
spdk_nvme_ctrlr_get_default_io_qpair_opts(ctrlr, &opts, sizeof(opts));
|
spdk_nvme_ctrlr_get_default_io_qpair_opts(ctrlr, &opts, sizeof(opts));
|
||||||
if (user_opts) {
|
if (user_opts) {
|
||||||
memcpy(&opts, user_opts, spdk_min(sizeof(opts), opts_size));
|
memcpy(&opts, user_opts, spdk_min(sizeof(opts), opts_size));
|
||||||
|
|
||||||
|
/* If user passes buffers, make sure they're big enough for the requested queue size */
|
||||||
|
if (opts.sq.vaddr) {
|
||||||
|
if (opts.sq.buffer_size < (opts.io_queue_size * sizeof(struct spdk_nvme_cmd))) {
|
||||||
|
SPDK_ERRLOG("sq buffer size %lx is too small for sq size %lx\n",
|
||||||
|
opts.sq.buffer_size, (opts.io_queue_size * sizeof(struct spdk_nvme_cmd)));
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (opts.cq.vaddr) {
|
||||||
|
if (opts.cq.buffer_size < (opts.io_queue_size * sizeof(struct spdk_nvme_cpl))) {
|
||||||
|
SPDK_ERRLOG("cq buffer size %lx is too small for cq size %lx\n",
|
||||||
|
opts.cq.buffer_size, (opts.io_queue_size * sizeof(struct spdk_nvme_cpl)));
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
|
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
|
||||||
|
@ -198,11 +198,15 @@ struct nvme_pcie_qpair {
|
|||||||
|
|
||||||
uint64_t cmd_bus_addr;
|
uint64_t cmd_bus_addr;
|
||||||
uint64_t cpl_bus_addr;
|
uint64_t cpl_bus_addr;
|
||||||
|
|
||||||
|
struct spdk_nvme_cmd *sq_vaddr;
|
||||||
|
struct spdk_nvme_cpl *cq_vaddr;
|
||||||
};
|
};
|
||||||
|
|
||||||
static int nvme_pcie_ctrlr_attach(struct spdk_nvme_probe_ctx *probe_ctx,
|
static int nvme_pcie_ctrlr_attach(struct spdk_nvme_probe_ctx *probe_ctx,
|
||||||
struct spdk_pci_addr *pci_addr);
|
struct spdk_pci_addr *pci_addr);
|
||||||
static int nvme_pcie_qpair_construct(struct spdk_nvme_qpair *qpair);
|
static int nvme_pcie_qpair_construct(struct spdk_nvme_qpair *qpair,
|
||||||
|
const struct spdk_nvme_io_qpair_opts *opts);
|
||||||
static int nvme_pcie_qpair_destroy(struct spdk_nvme_qpair *qpair);
|
static int nvme_pcie_qpair_destroy(struct spdk_nvme_qpair *qpair);
|
||||||
|
|
||||||
__thread struct nvme_pcie_ctrlr *g_thread_mmio_ctrlr = NULL;
|
__thread struct nvme_pcie_ctrlr *g_thread_mmio_ctrlr = NULL;
|
||||||
@ -710,7 +714,7 @@ nvme_pcie_ctrlr_construct_admin_qpair(struct spdk_nvme_ctrlr *ctrlr)
|
|||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
return nvme_pcie_qpair_construct(ctrlr->adminq);
|
return nvme_pcie_qpair_construct(ctrlr->adminq, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This function must only be called while holding g_spdk_nvme_driver->lock */
|
/* This function must only be called while holding g_spdk_nvme_driver->lock */
|
||||||
@ -983,7 +987,8 @@ nvme_pcie_qpair_reset(struct spdk_nvme_qpair *qpair)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
nvme_pcie_qpair_construct(struct spdk_nvme_qpair *qpair)
|
nvme_pcie_qpair_construct(struct spdk_nvme_qpair *qpair,
|
||||||
|
const struct spdk_nvme_io_qpair_opts *opts)
|
||||||
{
|
{
|
||||||
struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
|
struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
|
||||||
struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
|
struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
|
||||||
@ -995,6 +1000,15 @@ nvme_pcie_qpair_construct(struct spdk_nvme_qpair *qpair)
|
|||||||
uint16_t num_trackers;
|
uint16_t num_trackers;
|
||||||
size_t page_align = VALUE_2MB;
|
size_t page_align = VALUE_2MB;
|
||||||
uint32_t flags = SPDK_MALLOC_DMA;
|
uint32_t flags = SPDK_MALLOC_DMA;
|
||||||
|
uint64_t sq_paddr = 0;
|
||||||
|
uint64_t cq_paddr = 0;
|
||||||
|
|
||||||
|
if (opts) {
|
||||||
|
pqpair->sq_vaddr = opts->sq.vaddr;
|
||||||
|
pqpair->cq_vaddr = opts->cq.vaddr;
|
||||||
|
sq_paddr = opts->sq.paddr;
|
||||||
|
cq_paddr = opts->cq.paddr;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Limit the maximum number of completions to return per call to prevent wraparound,
|
* Limit the maximum number of completions to return per call to prevent wraparound,
|
||||||
@ -1027,39 +1041,55 @@ nvme_pcie_qpair_construct(struct spdk_nvme_qpair *qpair)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* To ensure physical address contiguity we make each ring occupy
|
|
||||||
* a single hugepage only. See MAX_IO_QUEUE_ENTRIES.
|
|
||||||
*/
|
|
||||||
if (pqpair->sq_in_cmb == false) {
|
if (pqpair->sq_in_cmb == false) {
|
||||||
pqpair->cmd = spdk_zmalloc(pqpair->num_entries * sizeof(struct spdk_nvme_cmd),
|
if (pqpair->sq_vaddr) {
|
||||||
|
pqpair->cmd = pqpair->sq_vaddr;
|
||||||
|
} else {
|
||||||
|
/* To ensure physical address contiguity we make each ring occupy
|
||||||
|
* a single hugepage only. See MAX_IO_QUEUE_ENTRIES.
|
||||||
|
*/
|
||||||
|
pqpair->cmd = spdk_zmalloc(pqpair->num_entries * sizeof(struct spdk_nvme_cmd),
|
||||||
|
page_align, NULL,
|
||||||
|
SPDK_ENV_SOCKET_ID_ANY, flags);
|
||||||
|
if (pqpair->cmd == NULL) {
|
||||||
|
SPDK_ERRLOG("alloc qpair_cmd failed\n");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (sq_paddr) {
|
||||||
|
assert(pqpair->sq_vaddr != NULL);
|
||||||
|
pqpair->cmd_bus_addr = sq_paddr;
|
||||||
|
} else {
|
||||||
|
pqpair->cmd_bus_addr = spdk_vtophys(pqpair->cmd, NULL);
|
||||||
|
if (pqpair->cmd_bus_addr == SPDK_VTOPHYS_ERROR) {
|
||||||
|
SPDK_ERRLOG("spdk_vtophys(pqpair->cmd) failed\n");
|
||||||
|
return -EFAULT;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pqpair->cq_vaddr) {
|
||||||
|
pqpair->cpl = pqpair->cq_vaddr;
|
||||||
|
} else {
|
||||||
|
pqpair->cpl = spdk_zmalloc(pqpair->num_entries * sizeof(struct spdk_nvme_cpl),
|
||||||
page_align, NULL,
|
page_align, NULL,
|
||||||
SPDK_ENV_SOCKET_ID_ANY, flags);
|
SPDK_ENV_SOCKET_ID_ANY, flags);
|
||||||
if (pqpair->cmd == NULL) {
|
if (pqpair->cpl == NULL) {
|
||||||
SPDK_ERRLOG("alloc qpair_cmd failed\n");
|
SPDK_ERRLOG("alloc qpair_cpl failed\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
pqpair->cmd_bus_addr = spdk_vtophys(pqpair->cmd, NULL);
|
if (cq_paddr) {
|
||||||
if (pqpair->cmd_bus_addr == SPDK_VTOPHYS_ERROR) {
|
assert(pqpair->cq_vaddr != NULL);
|
||||||
SPDK_ERRLOG("spdk_vtophys(pqpair->cmd) failed\n");
|
pqpair->cpl_bus_addr = cq_paddr;
|
||||||
|
} else {
|
||||||
|
pqpair->cpl_bus_addr = spdk_vtophys(pqpair->cpl, NULL);
|
||||||
|
if (pqpair->cpl_bus_addr == SPDK_VTOPHYS_ERROR) {
|
||||||
|
SPDK_ERRLOG("spdk_vtophys(pqpair->cpl) failed\n");
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pqpair->cpl = spdk_zmalloc(pqpair->num_entries * sizeof(struct spdk_nvme_cpl),
|
|
||||||
page_align, NULL,
|
|
||||||
SPDK_ENV_SOCKET_ID_ANY, flags);
|
|
||||||
if (pqpair->cpl == NULL) {
|
|
||||||
SPDK_ERRLOG("alloc qpair_cpl failed\n");
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
pqpair->cpl_bus_addr = spdk_vtophys(pqpair->cpl, NULL);
|
|
||||||
if (pqpair->cpl_bus_addr == SPDK_VTOPHYS_ERROR) {
|
|
||||||
SPDK_ERRLOG("spdk_vtophys(pqpair->cpl) failed\n");
|
|
||||||
return -EFAULT;
|
|
||||||
}
|
|
||||||
|
|
||||||
doorbell_base = &pctrlr->regs->doorbell[0].sq_tdbl;
|
doorbell_base = &pctrlr->regs->doorbell[0].sq_tdbl;
|
||||||
pqpair->sq_tdbl = doorbell_base + (2 * qpair->id + 0) * pctrlr->doorbell_stride_u32;
|
pqpair->sq_tdbl = doorbell_base + (2 * qpair->id + 0) * pctrlr->doorbell_stride_u32;
|
||||||
pqpair->cq_hdbl = doorbell_base + (2 * qpair->id + 1) * pctrlr->doorbell_stride_u32;
|
pqpair->cq_hdbl = doorbell_base + (2 * qpair->id + 1) * pctrlr->doorbell_stride_u32;
|
||||||
@ -1392,10 +1422,16 @@ nvme_pcie_qpair_destroy(struct spdk_nvme_qpair *qpair)
|
|||||||
if (nvme_qpair_is_admin_queue(qpair)) {
|
if (nvme_qpair_is_admin_queue(qpair)) {
|
||||||
nvme_pcie_admin_qpair_destroy(qpair);
|
nvme_pcie_admin_qpair_destroy(qpair);
|
||||||
}
|
}
|
||||||
if (pqpair->cmd && !pqpair->sq_in_cmb) {
|
/*
|
||||||
|
* We check sq_vaddr and cq_vaddr to see if the user specified the memory
|
||||||
|
* buffers when creating the I/O queue.
|
||||||
|
* If the user specified them, we cannot free that memory.
|
||||||
|
* Nor do we free it if it's in the CMB.
|
||||||
|
*/
|
||||||
|
if (!pqpair->sq_vaddr && pqpair->cmd && !pqpair->sq_in_cmb) {
|
||||||
spdk_free(pqpair->cmd);
|
spdk_free(pqpair->cmd);
|
||||||
}
|
}
|
||||||
if (pqpair->cpl) {
|
if (!pqpair->cq_vaddr && pqpair->cpl) {
|
||||||
spdk_free(pqpair->cpl);
|
spdk_free(pqpair->cpl);
|
||||||
}
|
}
|
||||||
if (pqpair->tr) {
|
if (pqpair->tr) {
|
||||||
@ -1593,7 +1629,8 @@ nvme_pcie_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
rc = nvme_pcie_qpair_construct(qpair);
|
rc = nvme_pcie_qpair_construct(qpair, opts);
|
||||||
|
|
||||||
if (rc != 0) {
|
if (rc != 0) {
|
||||||
nvme_pcie_qpair_destroy(qpair);
|
nvme_pcie_qpair_destroy(qpair);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
Loading…
Reference in New Issue
Block a user