nvme: add a quirk for QEMU emulated NVMe SSD

When starting the QEMU NVMe SSD with "cmb_size_mb=XX"
parameter, the controller memory buffer feature is
enabled in the Guest, the SPDK NVMe driver running
in the Guest will allocate the submission queue in
the controller memory buffer by default, it will use
memset and SSE instruction when copying NVMe command
to the submission queue entry or zero the whole
submission queue, inside the memset implementation
the AVX2 instuction will be used if the CPU can support
such feature.  However, due to the limitation in the
QEMU, the maximum access width to the PCI BAR space is
set to 8 Bytes, SPDK will report illegal instruction
in Guest.

Here we add a quirk for the QEMU emulated NVMe.

Fix issue #1362.

Change-Id: Ib5e2e4198d39ce7f8455175f7db283db0b01eebf
Signed-off-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/2196
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Changpeng Liu 2020-05-05 22:17:15 -04:00 committed by Tomasz Zawadzki
parent b2947f528f
commit 44f69a9e51
3 changed files with 23 additions and 8 deletions

View File

@ -138,6 +138,13 @@ extern pid_t g_spdk_nvme_pid;
*/
#define NVME_QUIRK_MINIMUM_IO_QUEUE_SIZE 0x800
/**
* The maximum access width to PCI memory space is 8 Bytes, don't use AVX2 or
* SSE instructions to optimize the memory access(memcpy or memset) larger than
* 8 Bytes.
*/
#define NVME_QUIRK_MAXIMUM_PCI_ACCESS_WIDTH 0x1000
#define NVME_MAX_ASYNC_EVENTS (8)
#define NVME_MAX_ADMIN_TIMEOUT_IN_SECS (30)

View File

@ -973,6 +973,7 @@ static int
nvme_pcie_qpair_reset(struct spdk_nvme_qpair *qpair)
{
struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair);
uint32_t i;
/* all head/tail vals are set to 0 */
pqpair->last_sq_tail = pqpair->sq_tail = pqpair->sq_head = pqpair->cq_head = 0;
@ -985,11 +986,9 @@ nvme_pcie_qpair_reset(struct spdk_nvme_qpair *qpair)
* rolls over.
*/
pqpair->flags.phase = 1;
memset(pqpair->cmd, 0,
pqpair->num_entries * sizeof(struct spdk_nvme_cmd));
memset(pqpair->cpl, 0,
pqpair->num_entries * sizeof(struct spdk_nvme_cpl));
for (i = 0; i < pqpair->num_entries; i++) {
pqpair->cpl[i].status.p = 0;
}
return 0;
}
@ -1326,6 +1325,7 @@ nvme_pcie_qpair_submit_tracker(struct spdk_nvme_qpair *qpair, struct nvme_tracke
{
struct nvme_request *req;
struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair);
struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
req = tr->req;
assert(req != NULL);
@ -1335,8 +1335,15 @@ nvme_pcie_qpair_submit_tracker(struct spdk_nvme_qpair *qpair, struct nvme_tracke
qpair->first_fused_submitted = 1;
}
/* Copy the command from the tracker to the submission queue. */
nvme_pcie_copy_command(&pqpair->cmd[pqpair->sq_tail], &req->cmd);
/* Don't use wide instructions to copy NVMe command, this is limited by QEMU
* virtual NVMe controller, the maximum access width is 8 Bytes for one time.
*/
if (spdk_unlikely((ctrlr->quirks & NVME_QUIRK_MAXIMUM_PCI_ACCESS_WIDTH) && pqpair->sq_in_cmb)) {
pqpair->cmd[pqpair->sq_tail] = req->cmd;
} else {
/* Copy the command from the tracker to the submission queue. */
nvme_pcie_copy_command(&pqpair->cmd[pqpair->sq_tail], &req->cmd);
}
if (spdk_unlikely(++pqpair->sq_tail == pqpair->num_entries)) {
pqpair->sq_tail = 0;

View File

@ -83,7 +83,8 @@ static const struct nvme_quirk nvme_quirks[] = {
},
{ {SPDK_PCI_VID_INTEL, 0x5845, SPDK_PCI_ANY_ID, SPDK_PCI_ANY_ID},
NVME_QUIRK_IDENTIFY_CNS |
NVME_INTEL_QUIRK_NO_LOG_PAGES
NVME_INTEL_QUIRK_NO_LOG_PAGES |
NVME_QUIRK_MAXIMUM_PCI_ACCESS_WIDTH
},
{ {SPDK_PCI_VID_CNEXLABS, 0x1f1f, SPDK_PCI_ANY_ID, SPDK_PCI_ANY_ID},
NVME_QUIRK_IDENTIFY_CNS |