nvme: add transport get_max_sges abstraction

For pcie, this just equals the number of SGLs we can fit
into the per-tracker memory.

For rdma, this is just set to 1 for now since nvme_rdma.c
does not support multiple SGEs yet.  Once that support is
added, this will change to use MSDBD (Maximum SGL Data Block
Descriptors) instead from the controller identify data.

Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: I34a4c546b5ff46918a296a73ed8cbcc6c9879d5a

Reviewed-on: https://review.gerrithub.io/372358
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Jim Harris 2017-08-02 12:03:06 -07:00
parent 75a1b39fcc
commit 002660c4f0
6 changed files with 34 additions and 0 deletions

View File

@ -1363,6 +1363,7 @@ nvme_ctrlr_start(struct spdk_nvme_ctrlr *ctrlr)
if (ctrlr->cdata.sgls.supported) {
ctrlr->flags |= SPDK_NVME_CTRLR_SGL_SUPPORTED;
ctrlr->max_sges = nvme_transport_ctrlr_get_max_sges(ctrlr);
}
if (nvme_ctrlr_set_keep_alive_timeout(ctrlr) != 0) {

View File

@ -380,6 +380,8 @@ struct spdk_nvme_ctrlr {
bool is_failed;
uint16_t max_sges;
/** Controller support flags */
uint64_t flags;
@ -595,6 +597,7 @@ void nvme_qpair_print_completion(struct spdk_nvme_qpair *qpair, struct spdk_nvme
int nvme_ ## name ## _ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value); \
uint32_t nvme_ ## name ## _ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr); \
uint32_t nvme_ ## name ## _ctrlr_get_max_io_queue_size(struct spdk_nvme_ctrlr *ctrlr); \
uint16_t nvme_ ## name ## _ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr); \
struct spdk_nvme_qpair *nvme_ ## name ## _ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid, const struct spdk_nvme_io_qpair_opts *opts); \
int nvme_ ## name ## _ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair); \
int nvme_ ## name ## _ctrlr_reinit_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair); \

View File

@ -406,6 +406,12 @@ nvme_pcie_ctrlr_get_max_io_queue_size(struct spdk_nvme_ctrlr *ctrlr)
return NVME_IO_ENTRIES;
}
uint16_t
nvme_pcie_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
{
return NVME_MAX_SGL_DESCRIPTORS;
}
static void
nvme_pcie_ctrlr_map_cmb(struct nvme_pcie_ctrlr *pctrlr)
{

View File

@ -1499,3 +1499,15 @@ nvme_rdma_ctrlr_get_max_io_queue_size(struct spdk_nvme_ctrlr *ctrlr)
{
return NVME_HOST_MAX_ENTRIES_PER_QUEUE;
}
uint16_t
nvme_rdma_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
{
/*
* We do not support >1 SGE in the initiator currently,
* so we can only return 1 here. Once that support is
* added, this should return ctrlr->cdata.nvmf_specific.msdbd
* instead.
*/
return 1;
}

View File

@ -145,6 +145,12 @@ nvme_transport_ctrlr_get_max_io_queue_size(struct spdk_nvme_ctrlr *ctrlr)
NVME_TRANSPORT_CALL(ctrlr->trid.trtype, ctrlr_get_max_io_queue_size, (ctrlr));
}
uint16_t
nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
{
NVME_TRANSPORT_CALL(ctrlr->trid.trtype, ctrlr_get_max_sges, (ctrlr));
}
struct spdk_nvme_qpair *
nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
const struct spdk_nvme_io_qpair_opts *opts)

View File

@ -121,6 +121,12 @@ nvme_transport_ctrlr_get_max_io_queue_size(struct spdk_nvme_ctrlr *ctrlr)
return SPDK_NVME_IO_QUEUE_MAX_ENTRIES;
}
uint16_t
nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
{
return 1;
}
struct spdk_nvme_qpair *
nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
const struct spdk_nvme_io_qpair_opts *opts)