nvme: Allow users to reserve the CMB for data without mapping it

Separate these two operations into different functions. It is
possible that a CMB may not be visible from the CPU, but still
be present and have data transferred to it by some other DMA
engine. Generalize the API to handle that case.

Change-Id: Ifcd282af0db734fe4a6ef2283ae8e8933d017809
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/787
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Ben Walker 2020-02-10 09:27:24 -07:00 committed by Tomasz Zawadzki
parent 6a7aa72edc
commit 7b28450b3f
5 changed files with 77 additions and 16 deletions

View File

@ -1911,22 +1911,27 @@ int spdk_nvme_ctrlr_update_firmware(struct spdk_nvme_ctrlr *ctrlr, void *payload
volatile struct spdk_nvme_registers *spdk_nvme_ctrlr_get_registers(struct spdk_nvme_ctrlr *ctrlr);
/**
* Map the controller memory buffer for use in I/O operations.
* Reserve the controller memory buffer for data transfer use.
*
* This function maps registered memory which belongs to the Controller
* Memory Buffer (CMB) of the specified NVMe controller so that it is visible
* from the CPU. Note that the CMB has to support the WDS and RDS capabilities
* for the mapping to be successful. Also, due to vtophys contraints the CMB must
* be at least 4MiB in size.
* This function reserves the full size of the controller memory buffer
* for use in data transfers. If submission queues or completion queues are
* already placed in the controller memory buffer, this call will fail.
*
* This cannot be used in conjunction with placing submission queues in the
* controller memory buffer.
* \param ctrlr Controller from which to allocate memory buffer
*
* \param ctrlr Controller from which to allocate memory buffer.
* \param size Size of buffer that was mapped. Return value.
* \return The size of the controller memory buffer on success. Negated errno
* on failure.
*/
int spdk_nvme_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr);
/**
* Map a previously reserved controller memory buffer so that it's data is
* visible from the CPU. This operation is not always possible.
*
* \return Pointer to controller memory buffer, or NULL if mapping
* was not possible.
* \param ctrlr Controller that contains the memory buffer
* \param size Size of buffer that was mapped.
*
* \return Pointer to controller memory buffer, or NULL on failure.
*/
void *spdk_nvme_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size);
@ -3080,6 +3085,8 @@ struct spdk_nvme_transport_ops {
uint16_t (*ctrlr_get_max_sges)(struct spdk_nvme_ctrlr *ctrlr);
int (*ctrlr_reserve_cmb)(struct spdk_nvme_ctrlr *ctrlr);
void *(*ctrlr_map_cmb)(struct spdk_nvme_ctrlr *ctrlr, size_t *size);
int (*ctrlr_unmap_cmb)(struct spdk_nvme_ctrlr *ctrlr);

View File

@ -3363,15 +3363,36 @@ spdk_nvme_ctrlr_update_firmware(struct spdk_nvme_ctrlr *ctrlr, void *payload, ui
return spdk_nvme_ctrlr_reset(ctrlr);
}
int
spdk_nvme_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr)
{
int rc, size;
union spdk_nvme_cmbsz_register cmbsz;
cmbsz = spdk_nvme_ctrlr_get_regs_cmbsz(ctrlr);
if (cmbsz.bits.rds == 0 || cmbsz.bits.wds == 0) {
return -ENOTSUP;
}
size = cmbsz.bits.sz * (0x1000 << (cmbsz.bits.szu * 4));
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
rc = nvme_transport_ctrlr_reserve_cmb(ctrlr);
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
if (rc < 0) {
return rc;
}
return size;
}
void *
spdk_nvme_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
{
void *buf;
if (size == 0) {
return NULL;
}
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
buf = nvme_transport_ctrlr_map_cmb(ctrlr, size);
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);

View File

@ -1162,6 +1162,7 @@ uint32_t nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr);
uint16_t nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr);
struct spdk_nvme_qpair *nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
uint16_t qid, const struct spdk_nvme_io_qpair_opts *opts);
int nvme_transport_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr);
void *nvme_transport_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size);
int nvme_transport_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr);
int nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr,

View File

@ -553,6 +553,24 @@ nvme_pcie_ctrlr_unmap_cmb(struct nvme_pcie_ctrlr *pctrlr)
return rc;
}
static int
nvme_pcie_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr)
{
struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
if (pctrlr->cmb.bar_va == NULL) {
SPDK_DEBUGLOG(SPDK_LOG_NVME, "CMB not available\n");
return -ENOTSUP;
}
if (ctrlr->opts.use_cmb_sqs) {
SPDK_ERRLOG("CMB is already in use for submission queues.\n");
return -ENOTSUP;
}
return 0;
}
static void *
nvme_pcie_ctrlr_map_io_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
{
@ -2433,6 +2451,7 @@ const struct spdk_nvme_transport_ops pcie_ops = {
.ctrlr_get_max_xfer_size = nvme_pcie_ctrlr_get_max_xfer_size,
.ctrlr_get_max_sges = nvme_pcie_ctrlr_get_max_sges,
.ctrlr_reserve_cmb = nvme_pcie_ctrlr_reserve_cmb,
.ctrlr_map_cmb = nvme_pcie_ctrlr_map_io_cmb,
.ctrlr_unmap_cmb = nvme_pcie_ctrlr_unmap_io_cmb,

View File

@ -215,6 +215,19 @@ nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
return transport->ops.ctrlr_get_max_sges(ctrlr);
}
int
nvme_transport_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr)
{
const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
assert(transport != NULL);
if (transport->ops.ctrlr_reserve_cmb != NULL) {
return transport->ops.ctrlr_reserve_cmb(ctrlr);
}
return -ENOTSUP;
}
void *
nvme_transport_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
{