diff --git a/include/spdk/nvme.h b/include/spdk/nvme.h index 9153056d0..c816aa701 100644 --- a/include/spdk/nvme.h +++ b/include/spdk/nvme.h @@ -1911,22 +1911,27 @@ int spdk_nvme_ctrlr_update_firmware(struct spdk_nvme_ctrlr *ctrlr, void *payload volatile struct spdk_nvme_registers *spdk_nvme_ctrlr_get_registers(struct spdk_nvme_ctrlr *ctrlr); /** - * Map the controller memory buffer for use in I/O operations. + * Reserve the controller memory buffer for data transfer use. * - * This function maps registered memory which belongs to the Controller - * Memory Buffer (CMB) of the specified NVMe controller so that it is visible - * from the CPU. Note that the CMB has to support the WDS and RDS capabilities - * for the mapping to be successful. Also, due to vtophys contraints the CMB must - * be at least 4MiB in size. + * This function reserves the full size of the controller memory buffer + * for use in data transfers. If submission queues or completion queues are + * already placed in the controller memory buffer, this call will fail. * - * This cannot be used in conjunction with placing submission queues in the - * controller memory buffer. + * \param ctrlr Controller from which to allocate memory buffer * - * \param ctrlr Controller from which to allocate memory buffer. - * \param size Size of buffer that was mapped. Return value. + * \return The size of the controller memory buffer on success. Negated errno + * on failure. + */ +int spdk_nvme_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr); + +/** + * Map a previously reserved controller memory buffer so that it's data is + * visible from the CPU. This operation is not always possible. * - * \return Pointer to controller memory buffer, or NULL if mapping - * was not possible. + * \param ctrlr Controller that contains the memory buffer + * \param size Size of buffer that was mapped. + * + * \return Pointer to controller memory buffer, or NULL on failure. */ void *spdk_nvme_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size); @@ -3080,6 +3085,8 @@ struct spdk_nvme_transport_ops { uint16_t (*ctrlr_get_max_sges)(struct spdk_nvme_ctrlr *ctrlr); + int (*ctrlr_reserve_cmb)(struct spdk_nvme_ctrlr *ctrlr); + void *(*ctrlr_map_cmb)(struct spdk_nvme_ctrlr *ctrlr, size_t *size); int (*ctrlr_unmap_cmb)(struct spdk_nvme_ctrlr *ctrlr); diff --git a/lib/nvme/nvme_ctrlr.c b/lib/nvme/nvme_ctrlr.c index 5d5abdd2c..a8060ae9d 100644 --- a/lib/nvme/nvme_ctrlr.c +++ b/lib/nvme/nvme_ctrlr.c @@ -3363,15 +3363,36 @@ spdk_nvme_ctrlr_update_firmware(struct spdk_nvme_ctrlr *ctrlr, void *payload, ui return spdk_nvme_ctrlr_reset(ctrlr); } +int +spdk_nvme_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr) +{ + int rc, size; + union spdk_nvme_cmbsz_register cmbsz; + + cmbsz = spdk_nvme_ctrlr_get_regs_cmbsz(ctrlr); + + if (cmbsz.bits.rds == 0 || cmbsz.bits.wds == 0) { + return -ENOTSUP; + } + + size = cmbsz.bits.sz * (0x1000 << (cmbsz.bits.szu * 4)); + + nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); + rc = nvme_transport_ctrlr_reserve_cmb(ctrlr); + nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); + + if (rc < 0) { + return rc; + } + + return size; +} + void * spdk_nvme_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size) { void *buf; - if (size == 0) { - return NULL; - } - nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); buf = nvme_transport_ctrlr_map_cmb(ctrlr, size); nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); diff --git a/lib/nvme/nvme_internal.h b/lib/nvme/nvme_internal.h index 19199b461..1d53c6f85 100644 --- a/lib/nvme/nvme_internal.h +++ b/lib/nvme/nvme_internal.h @@ -1162,6 +1162,7 @@ uint32_t nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr); uint16_t nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr); struct spdk_nvme_qpair *nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid, const struct spdk_nvme_io_qpair_opts *opts); +int nvme_transport_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr); void *nvme_transport_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size); int nvme_transport_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr); int nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, diff --git a/lib/nvme/nvme_pcie.c b/lib/nvme/nvme_pcie.c index f02a2ad7c..e35b83a3b 100644 --- a/lib/nvme/nvme_pcie.c +++ b/lib/nvme/nvme_pcie.c @@ -553,6 +553,24 @@ nvme_pcie_ctrlr_unmap_cmb(struct nvme_pcie_ctrlr *pctrlr) return rc; } +static int +nvme_pcie_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr) +{ + struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr); + + if (pctrlr->cmb.bar_va == NULL) { + SPDK_DEBUGLOG(SPDK_LOG_NVME, "CMB not available\n"); + return -ENOTSUP; + } + + if (ctrlr->opts.use_cmb_sqs) { + SPDK_ERRLOG("CMB is already in use for submission queues.\n"); + return -ENOTSUP; + } + + return 0; +} + static void * nvme_pcie_ctrlr_map_io_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size) { @@ -2433,6 +2451,7 @@ const struct spdk_nvme_transport_ops pcie_ops = { .ctrlr_get_max_xfer_size = nvme_pcie_ctrlr_get_max_xfer_size, .ctrlr_get_max_sges = nvme_pcie_ctrlr_get_max_sges, + .ctrlr_reserve_cmb = nvme_pcie_ctrlr_reserve_cmb, .ctrlr_map_cmb = nvme_pcie_ctrlr_map_io_cmb, .ctrlr_unmap_cmb = nvme_pcie_ctrlr_unmap_io_cmb, diff --git a/lib/nvme/nvme_transport.c b/lib/nvme/nvme_transport.c index 0e090869c..9b1a0821b 100644 --- a/lib/nvme/nvme_transport.c +++ b/lib/nvme/nvme_transport.c @@ -215,6 +215,19 @@ nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr) return transport->ops.ctrlr_get_max_sges(ctrlr); } +int +nvme_transport_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr) +{ + const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); + + assert(transport != NULL); + if (transport->ops.ctrlr_reserve_cmb != NULL) { + return transport->ops.ctrlr_reserve_cmb(ctrlr); + } + + return -ENOTSUP; +} + void * nvme_transport_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size) {