nvme/pcie: Don't allow both sq and data in CMB at same time

This is allowed by the specification, but preventing using both
of these features simultaneously will make some upcoming patches
much simpler.

Change-Id: I1abb7d9c02c105a50b1603bfab8eec2025289123
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/782
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
This commit is contained in:
Ben Walker 2020-02-10 14:00:04 -07:00 committed by Tomasz Zawadzki
parent c96579410e
commit c29cca539d

View File

@ -580,28 +580,6 @@ nvme_pcie_ctrlr_unmap_cmb(struct nvme_pcie_ctrlr *pctrlr)
return rc;
}
static int
nvme_pcie_ctrlr_alloc_cmb(struct spdk_nvme_ctrlr *ctrlr, uint64_t length, uint64_t aligned,
uint64_t *offset)
{
struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
uint64_t round_offset;
round_offset = pctrlr->cmb.current_offset;
round_offset = (round_offset + (aligned - 1)) & ~(aligned - 1);
/* CMB may only consume part of the BAR, calculate accordingly */
if (round_offset + length > pctrlr->cmb.end) {
SPDK_ERRLOG("Tried to allocate past valid CMB range!\n");
return -1;
}
*offset = round_offset;
pctrlr->cmb.current_offset = round_offset + length;
return 0;
}
static void *
nvme_pcie_ctrlr_alloc_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, size_t size)
{
@ -618,11 +596,21 @@ nvme_pcie_ctrlr_alloc_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, size_t size)
return NULL;
}
if (nvme_pcie_ctrlr_alloc_cmb(ctrlr, size, 4, &offset) != 0) {
SPDK_DEBUGLOG(SPDK_LOG_NVME, "%zu-byte CMB allocation failed\n", size);
if (ctrlr->opts.use_cmb_sqs) {
SPDK_ERRLOG("CMB is already in use for submission queues.\n");
return NULL;
}
offset = (pctrlr->cmb.current_offset + (3)) & ~(3);
/* CMB may only consume part of the BAR, calculate accordingly */
if (offset + size > pctrlr->cmb.end) {
SPDK_ERRLOG("Tried to allocate past valid CMB range!\n");
return NULL;
}
pctrlr->cmb.current_offset = offset + size;
return pctrlr->cmb.bar_va + offset;
}
@ -979,6 +967,28 @@ nvme_pcie_qpair_reset(struct spdk_nvme_qpair *qpair)
return 0;
}
static int
nvme_pcie_ctrlr_alloc_cmb(struct spdk_nvme_ctrlr *ctrlr, uint64_t length, uint64_t aligned,
uint64_t *offset)
{
struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
uint64_t round_offset;
round_offset = pctrlr->cmb.current_offset;
round_offset = (round_offset + (aligned - 1)) & ~(aligned - 1);
/* CMB may only consume part of the BAR, calculate accordingly */
if (round_offset + length > pctrlr->cmb.end) {
SPDK_ERRLOG("Tried to allocate past valid CMB range!\n");
return -1;
}
*offset = round_offset;
pctrlr->cmb.current_offset = round_offset + length;
return 0;
}
static int
nvme_pcie_qpair_construct(struct spdk_nvme_qpair *qpair,
const struct spdk_nvme_io_qpair_opts *opts)