nvme: move PCIe-specific definitions to nvme_pcie

Add a transport function to get the max data transfer size to break the
dependency on NVME_MAX_XFER_SIZE.

Change-Id: I846d12878bdd8b80903ca1b1b49b3bb8e2be98bb
Signed-off-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
Daniel Verkamp 2016-10-19 16:14:09 -07:00
parent b33e29efd3
commit bee15d8be0
4 changed files with 79 additions and 70 deletions

View File

@ -493,10 +493,12 @@ nvme_ctrlr_identify(struct spdk_nvme_ctrlr *ctrlr)
* Use MDTS to ensure our default max_xfer_size doesn't exceed what the
* controller supports.
*/
ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE;
ctrlr->max_xfer_size = ctrlr->transport->ctrlr_get_max_xfer_size(ctrlr);
SPDK_TRACELOG(SPDK_TRACE_NVME, "transport max_xfer_size %u\n", ctrlr->max_xfer_size);
if (ctrlr->cdata.mdts > 0) {
ctrlr->max_xfer_size = nvme_min(ctrlr->max_xfer_size,
ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts)));
SPDK_TRACELOG(SPDK_TRACE_NVME, "MDTS max_xfer_size %u\n", ctrlr->max_xfer_size);
}
return 0;

View File

@ -70,47 +70,6 @@
*/
#define NVME_INTEL_QUIRK_WRITE_LATENCY 0x2
#define NVME_MAX_PRP_LIST_ENTRIES (506)
/*
* For commands requiring more than 2 PRP entries, one PRP will be
* embedded in the command (prp1), and the rest of the PRP entries
* will be in a list pointed to by the command (prp2). This means
* that real max number of PRP entries we support is 506+1, which
* results in a max xfer size of 506*PAGE_SIZE.
*/
#define NVME_MAX_XFER_SIZE NVME_MAX_PRP_LIST_ENTRIES * PAGE_SIZE
#define NVME_ADMIN_TRACKERS (16)
#define NVME_ADMIN_ENTRIES (128)
/* min and max are defined in admin queue attributes section of spec */
#define NVME_MIN_ADMIN_ENTRIES (2)
#define NVME_MAX_ADMIN_ENTRIES (4096)
/*
* NVME_IO_ENTRIES defines the size of an I/O qpair's submission and completion
* queues, while NVME_IO_TRACKERS defines the maximum number of I/O that we
* will allow outstanding on an I/O qpair at any time. The only advantage in
* having IO_ENTRIES > IO_TRACKERS is for debugging purposes - when dumping
* the contents of the submission and completion queues, it will show a longer
* history of data.
*/
#define NVME_IO_ENTRIES (256)
#define NVME_IO_TRACKERS (128)
#define NVME_MIN_IO_TRACKERS (4)
#define NVME_MAX_IO_TRACKERS (1024)
/*
* NVME_MAX_SGL_DESCRIPTORS defines the maximum number of descriptors in one SGL
* segment.
*/
#define NVME_MAX_SGL_DESCRIPTORS (253)
/*
* NVME_MAX_IO_ENTRIES is not defined, since it is specified in CC.MQES
* for each controller.
*/
#define NVME_MAX_ASYNC_EVENTS (8)
#define NVME_MIN_TIMEOUT_PERIOD (5)
@ -263,6 +222,8 @@ struct spdk_nvme_transport {
int (*ctrlr_get_reg_4)(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value);
int (*ctrlr_get_reg_8)(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value);
uint32_t (*ctrlr_get_max_xfer_size)(struct spdk_nvme_ctrlr *ctrlr);
struct spdk_nvme_qpair *(*ctrlr_create_io_qpair)(struct spdk_nvme_ctrlr *ctrlr,
uint16_t qid, enum spdk_nvme_qprio qprio);
int (*ctrlr_delete_io_qpair)(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair);
@ -292,34 +253,6 @@ struct nvme_async_event_request {
struct spdk_nvme_cpl cpl;
};
struct nvme_tracker {
LIST_ENTRY(nvme_tracker) list;
struct nvme_request *req;
uint16_t cid;
uint16_t rsvd1: 15;
uint16_t active: 1;
uint32_t rsvd2;
uint64_t prp_sgl_bus_addr;
union {
uint64_t prp[NVME_MAX_PRP_LIST_ENTRIES];
struct spdk_nvme_sgl_descriptor sgl[NVME_MAX_SGL_DESCRIPTORS];
} u;
uint64_t rsvd3;
};
/*
* struct nvme_tracker must be exactly 4K so that the prp[] array does not cross a page boundary
* and so that there is no padding required to meet alignment requirements.
*/
SPDK_STATIC_ASSERT(sizeof(struct nvme_tracker) == 4096, "nvme_tracker is not 4K");
SPDK_STATIC_ASSERT((offsetof(struct nvme_tracker, u.sgl) & 7) == 0, "SGL must be Qword aligned");
struct spdk_nvme_qpair {
const struct spdk_nvme_transport *transport;

View File

@ -37,6 +37,38 @@
#include "nvme_internal.h"
#define NVME_ADMIN_ENTRIES (128)
#define NVME_ADMIN_TRACKERS (16)
/*
* NVME_IO_ENTRIES defines the size of an I/O qpair's submission and completion
* queues, while NVME_IO_TRACKERS defines the maximum number of I/O that we
* will allow outstanding on an I/O qpair at any time. The only advantage in
* having IO_ENTRIES > IO_TRACKERS is for debugging purposes - when dumping
* the contents of the submission and completion queues, it will show a longer
* history of data.
*/
#define NVME_IO_ENTRIES (256)
#define NVME_IO_TRACKERS (128)
/*
* NVME_MAX_SGL_DESCRIPTORS defines the maximum number of descriptors in one SGL
* segment.
*/
#define NVME_MAX_SGL_DESCRIPTORS (253)
#define NVME_MAX_PRP_LIST_ENTRIES (506)
/*
* For commands requiring more than 2 PRP entries, one PRP will be
* embedded in the command (prp1), and the rest of the PRP entries
* will be in a list pointed to by the command (prp2). This means
* that real max number of PRP entries we support is 506+1, which
* results in a max xfer size of 506*PAGE_SIZE.
*/
#define NVME_MAX_XFER_SIZE NVME_MAX_PRP_LIST_ENTRIES * PAGE_SIZE
/* PCIe transport extensions for spdk_nvme_ctrlr */
struct nvme_pcie_ctrlr {
struct spdk_nvme_ctrlr ctrlr;
@ -60,6 +92,33 @@ struct nvme_pcie_ctrlr {
uint32_t doorbell_stride_u32;
};
struct nvme_tracker {
LIST_ENTRY(nvme_tracker) list;
struct nvme_request *req;
uint16_t cid;
uint16_t rsvd1: 15;
uint16_t active: 1;
uint32_t rsvd2;
uint64_t prp_sgl_bus_addr;
union {
uint64_t prp[NVME_MAX_PRP_LIST_ENTRIES];
struct spdk_nvme_sgl_descriptor sgl[NVME_MAX_SGL_DESCRIPTORS];
} u;
uint64_t rsvd3;
};
/*
* struct nvme_tracker must be exactly 4K so that the prp[] array does not cross a page boundary
* and so that there is no padding required to meet alignment requirements.
*/
SPDK_STATIC_ASSERT(sizeof(struct nvme_tracker) == 4096, "nvme_tracker is not 4K");
SPDK_STATIC_ASSERT((offsetof(struct nvme_tracker, u.sgl) & 7) == 0, "SGL must be Qword aligned");
/* PCIe transport extensions for spdk_nvme_qpair */
struct nvme_pcie_qpair {
/* Submission queue tail doorbell */
@ -214,6 +273,12 @@ nvme_pcie_ctrlr_get_cmbsz(struct nvme_pcie_ctrlr *pctrlr, union spdk_nvme_cmbsz_
&cmbsz->raw);
}
static uint32_t
nvme_pcie_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
{
return NVME_MAX_XFER_SIZE;
}
static void
nvme_pcie_ctrlr_map_cmb(struct nvme_pcie_ctrlr *pctrlr)
{
@ -1454,6 +1519,8 @@ const struct spdk_nvme_transport spdk_nvme_transport_pcie = {
.ctrlr_get_reg_4 = nvme_pcie_ctrlr_get_reg_4,
.ctrlr_get_reg_8 = nvme_pcie_ctrlr_get_reg_8,
.ctrlr_get_max_xfer_size = nvme_pcie_ctrlr_get_max_xfer_size,
.ctrlr_create_io_qpair = nvme_pcie_ctrlr_create_io_qpair,
.ctrlr_delete_io_qpair = nvme_pcie_ctrlr_delete_io_qpair,
.ctrlr_reinit_io_qpair = nvme_pcie_ctrlr_reinit_io_qpair,

View File

@ -120,6 +120,11 @@ ut_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *val
return 0;
}
static uint32_t
ut_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
{
return UINT32_MAX;
}
static struct spdk_nvme_qpair *
ut_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid, enum spdk_nvme_qprio qprio)
@ -162,6 +167,8 @@ static const struct spdk_nvme_transport nvme_ctrlr_ut_transport = {
.ctrlr_get_reg_4 = ut_ctrlr_get_reg_4,
.ctrlr_get_reg_8 = ut_ctrlr_get_reg_8,
.ctrlr_get_max_xfer_size = ut_ctrlr_get_max_xfer_size,
.ctrlr_create_io_qpair = ut_ctrlr_create_io_qpair,
.ctrlr_delete_io_qpair = ut_ctrlr_delete_io_qpair,