nvme: Change mapping semantics of controller memory buffer

Instead of creating an allocator where the driver manages the space,
now, since using the CMB for queues and data has already been
disallowed, just create functions to map and unmap the entire CMB.
The user can manage the space.

Change-Id: I023994deda3b517e14d2ba464c7375bf22b58456
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/785
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Broadcom CI
This commit is contained in:
Ben Walker 2020-02-10 14:13:53 -07:00 committed by Tomasz Zawadzki
parent 3429f97a89
commit 265a8436f4
9 changed files with 53 additions and 59 deletions

View File

@ -29,8 +29,8 @@ capabilities are given in the table below.
Key Functions | Description
------------------------------------------- | -----------
spdk_nvme_ctrlr_alloc_cmb_io_buffer() | @copybrief spdk_nvme_ctrlr_alloc_cmb_io_buffer()
spdk_nvme_ctrlr_free_cmb_io_buffer() | @copybrief spdk_nvme_ctrlr_free_cmb_io_buffer()
spdk_nvme_ctrlr_map_cmb() | @copybrief spdk_nvme_ctrlr_map_cmb()
spdk_nvme_ctrlr_unmap_cmb() | @copybrief spdk_nvme_ctrlr_unmap_cmb()
spdk_nvme_ctrlr_get_regs_cmbsz() | @copybrief spdk_nvme_ctrlr_get_regs_cmbsz()
# Determining device support {#p2p_support}

View File

@ -109,6 +109,7 @@ cmb_copy(void)
{
int rc = 0, rw;
void *buf;
size_t sz;
/* Allocate QPs for the read and write controllers */
g_config.read.qpair = spdk_nvme_ctrlr_alloc_io_qpair(g_config.read.ctrlr, NULL, 0);
@ -119,8 +120,8 @@ cmb_copy(void)
}
/* Allocate a buffer from our CMB */
buf = spdk_nvme_ctrlr_alloc_cmb_io_buffer(g_config.cmb.ctrlr, g_config.copy_size);
if (buf == NULL) {
buf = spdk_nvme_ctrlr_map_cmb(g_config.cmb.ctrlr, &sz);
if (buf == NULL || sz != g_config.copy_size) {
printf("ERROR: buffer allocation failed\n");
printf("Are you sure %s has a valid CMB?\n",
g_config.cmb.trid.traddr);
@ -162,8 +163,7 @@ cmb_copy(void)
g_config.write.done = 0;
/* Free CMB buffer */
spdk_nvme_ctrlr_free_cmb_io_buffer(g_config.cmb.ctrlr, buf,
g_config.copy_size);
spdk_nvme_ctrlr_unmap_cmb(g_config.cmb.ctrlr);
/* Free the queues */
spdk_nvme_ctrlr_free_io_qpair(g_config.read.qpair);

View File

@ -138,7 +138,7 @@ write_complete(void *arg, const struct spdk_nvme_cpl *completion)
* the data back from the NVMe namespace.
*/
if (sequence->using_cmb_io) {
spdk_nvme_ctrlr_free_cmb_io_buffer(ns_entry->ctrlr, sequence->buf, 0x1000);
spdk_nvme_ctrlr_unmap_cmb(ns_entry->ctrlr);
} else {
spdk_free(sequence->buf);
}
@ -160,6 +160,7 @@ hello_world(void)
struct ns_entry *ns_entry;
struct hello_world_sequence sequence;
int rc;
size_t sz;
ns_entry = g_namespaces;
while (ns_entry != NULL) {
@ -187,8 +188,8 @@ hello_world(void)
* I/O operations.
*/
sequence.using_cmb_io = 1;
sequence.buf = spdk_nvme_ctrlr_alloc_cmb_io_buffer(ns_entry->ctrlr, 0x1000);
if (sequence.buf == NULL) {
sequence.buf = spdk_nvme_ctrlr_map_cmb(ns_entry->ctrlr, &sz);
if (sequence.buf == NULL || sz < 0x1000) {
sequence.using_cmb_io = 0;
sequence.buf = spdk_zmalloc(0x1000, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
}

View File

@ -1911,33 +1911,31 @@ int spdk_nvme_ctrlr_update_firmware(struct spdk_nvme_ctrlr *ctrlr, void *payload
volatile struct spdk_nvme_registers *spdk_nvme_ctrlr_get_registers(struct spdk_nvme_ctrlr *ctrlr);
/**
* Allocate an I/O buffer from the controller memory buffer (Experimental).
* Map the controller memory buffer for use in I/O operations.
*
* This function allocates registered memory which belongs to the Controller
* Memory Buffer (CMB) of the specified NVMe controller. Note that the CMB has
* to support the WDS and RDS capabilities for the allocation to be successful.
* Also, due to vtophys contraints the CMB must be at least 4MiB in size. Free
* memory allocated with this function using spdk_nvme_ctrlr_free_cmb_io_buffer().
* This function maps registered memory which belongs to the Controller
* Memory Buffer (CMB) of the specified NVMe controller so that it is visible
* from the CPU. Note that the CMB has to support the WDS and RDS capabilities
* for the mapping to be successful. Also, due to vtophys contraints the CMB must
* be at least 4MiB in size.
*
* This cannot be used in conjunction with placing submission queues in the
* controller memory buffer.
*
* \param ctrlr Controller from which to allocate memory buffer.
* \param size Size of buffer to allocate in bytes.
* \param size Size of buffer that was mapped. Return value.
*
* \return Pointer to controller memory buffer allocation, or NULL if allocation
* \return Pointer to controller memory buffer, or NULL if mapping
* was not possible.
*/
void *spdk_nvme_ctrlr_alloc_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, size_t size);
void *spdk_nvme_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size);
/**
* Free a controller memory I/O buffer (Experimental).
* Free a controller memory I/O buffer.
*
* Note this function is currently a NOP which is one reason why this and
* spdk_nvme_ctrlr_alloc_cmb_io_buffer() are currently marked as experimental.
*
* \param ctrlr Controller from which the buffer was allocated.
* \param buf Buffer previously allocated by spdk_nvme_ctrlr_alloc_cmb_io_buffer().
* \param size Size of buf in bytes.
* \param ctrlr Controller from which to unmap the memory buffer.
*/
void spdk_nvme_ctrlr_free_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, void *buf, size_t size);
void spdk_nvme_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr);
/**
* Get the transport ID for a given NVMe controller.
@ -3082,9 +3080,9 @@ struct spdk_nvme_transport_ops {
uint16_t (*ctrlr_get_max_sges)(struct spdk_nvme_ctrlr *ctrlr);
void *(*ctrlr_alloc_cmb_io_buffer)(struct spdk_nvme_ctrlr *ctrlr, size_t size);
void *(*ctrlr_map_cmb)(struct spdk_nvme_ctrlr *ctrlr, size_t *size);
int (*ctrlr_free_cmb_io_buffer)(struct spdk_nvme_ctrlr *ctrlr, void *buf, size_t size);
int (*ctrlr_unmap_cmb)(struct spdk_nvme_ctrlr *ctrlr);
struct spdk_nvme_qpair *(*ctrlr_create_io_qpair)(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
const struct spdk_nvme_io_qpair_opts *opts);

View File

@ -3364,7 +3364,7 @@ spdk_nvme_ctrlr_update_firmware(struct spdk_nvme_ctrlr *ctrlr, void *payload, ui
}
void *
spdk_nvme_ctrlr_alloc_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, size_t size)
spdk_nvme_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
{
void *buf;
@ -3373,20 +3373,18 @@ spdk_nvme_ctrlr_alloc_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, size_t size)
}
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
buf = nvme_transport_ctrlr_alloc_cmb_io_buffer(ctrlr, size);
buf = nvme_transport_ctrlr_map_cmb(ctrlr, size);
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
return buf;
}
void
spdk_nvme_ctrlr_free_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, void *buf, size_t size)
spdk_nvme_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr)
{
if (buf && size) {
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
nvme_transport_ctrlr_free_cmb_io_buffer(ctrlr, buf, size);
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
}
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
nvme_transport_ctrlr_unmap_cmb(ctrlr);
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
}
bool

View File

@ -1162,8 +1162,8 @@ uint32_t nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr);
uint16_t nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr);
struct spdk_nvme_qpair *nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
uint16_t qid, const struct spdk_nvme_io_qpair_opts *opts);
void *nvme_transport_ctrlr_alloc_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, size_t size);
int nvme_transport_ctrlr_free_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, void *buf, size_t size);
void *nvme_transport_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size);
int nvme_transport_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr);
int nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
struct spdk_nvme_qpair *qpair);
int nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr,

View File

@ -582,11 +582,13 @@ nvme_pcie_ctrlr_unmap_cmb(struct nvme_pcie_ctrlr *pctrlr)
}
static void *
nvme_pcie_ctrlr_alloc_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, size_t size)
nvme_pcie_ctrlr_map_io_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
{
struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
uint64_t offset;
*size = 0;
if (pctrlr->cmb.bar_va == NULL) {
SPDK_DEBUGLOG(SPDK_LOG_NVME, "CMB not available\n");
return NULL;
@ -604,25 +606,20 @@ nvme_pcie_ctrlr_alloc_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, size_t size)
offset = (pctrlr->cmb.current_offset + (3)) & ~(3);
/* CMB may only consume part of the BAR, calculate accordingly */
if (offset + size > pctrlr->cmb.end) {
SPDK_ERRLOG("Tried to allocate past valid CMB range!\n");
if (offset >= pctrlr->cmb.end) {
return NULL;
}
pctrlr->cmb.current_offset = offset + size;
*size = pctrlr->cmb.end - offset;
pctrlr->cmb.current_offset = pctrlr->cmb.end;
return pctrlr->cmb.bar_va + offset;
}
static int
nvme_pcie_ctrlr_free_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, void *buf, size_t size)
nvme_pcie_ctrlr_unmap_io_cmb(struct spdk_nvme_ctrlr *ctrlr)
{
/*
* Do nothing for now.
* TODO: Track free space so buffers may be reused.
*/
SPDK_ERRLOG("no deallocation for CMB buffers yet!\n");
return 0;
}
@ -2419,8 +2416,8 @@ const struct spdk_nvme_transport_ops pcie_ops = {
.ctrlr_get_max_xfer_size = nvme_pcie_ctrlr_get_max_xfer_size,
.ctrlr_get_max_sges = nvme_pcie_ctrlr_get_max_sges,
.ctrlr_alloc_cmb_io_buffer = nvme_pcie_ctrlr_alloc_cmb_io_buffer,
.ctrlr_free_cmb_io_buffer = nvme_pcie_ctrlr_free_cmb_io_buffer,
.ctrlr_map_cmb = nvme_pcie_ctrlr_map_io_cmb,
.ctrlr_unmap_cmb = nvme_pcie_ctrlr_unmap_io_cmb,
.ctrlr_create_io_qpair = nvme_pcie_ctrlr_create_io_qpair,
.ctrlr_delete_io_qpair = nvme_pcie_ctrlr_delete_io_qpair,

View File

@ -216,26 +216,26 @@ nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
}
void *
nvme_transport_ctrlr_alloc_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, size_t size)
nvme_transport_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
{
const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
assert(transport != NULL);
if (transport->ops.ctrlr_alloc_cmb_io_buffer != NULL) {
return transport->ops.ctrlr_alloc_cmb_io_buffer(ctrlr, size);
if (transport->ops.ctrlr_map_cmb != NULL) {
return transport->ops.ctrlr_map_cmb(ctrlr, size);
}
return NULL;
}
int
nvme_transport_ctrlr_free_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, void *buf, size_t size)
nvme_transport_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr)
{
const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
assert(transport != NULL);
if (transport->ops.ctrlr_free_cmb_io_buffer != NULL) {
return transport->ops.ctrlr_free_cmb_io_buffer(ctrlr, buf, size);
if (transport->ops.ctrlr_unmap_cmb != NULL) {
return transport->ops.ctrlr_unmap_cmb(ctrlr);
}
return 0;

View File

@ -137,13 +137,13 @@ nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
}
void *
nvme_transport_ctrlr_alloc_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, size_t size)
nvme_transport_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
{
return NULL;
}
int
nvme_transport_ctrlr_free_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, void *buf, size_t size)
nvme_transport_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr)
{
return 0;
}