nvmf: allow to override virtual controller capabilities

Virtual controller capabilities can be overridden on transport
specific layer. The current behavior shall be preserved.

This can be useful to limit or extend the default based on transport
type.

Signed-off-by: Jacek Kalwas <jacek.kalwas@intel.com>
Change-Id: I754f0d957a46f219adc1e55f792e79c7546ddb43
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1274
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
This commit is contained in:
Jacek Kalwas 2020-03-16 11:22:28 +01:00 committed by Tomasz Zawadzki
parent 20bc88d284
commit 538f1354e0
8 changed files with 77 additions and 47 deletions

View File

@ -1525,6 +1525,30 @@ enum spdk_nvme_flush_broadcast {
#define SPDK_NVME_NQN_FIELD_SIZE 256 #define SPDK_NVME_NQN_FIELD_SIZE 256
/** Identify Controller data NVMe over Fabrics-specific fields */
struct spdk_nvme_cdata_nvmf_specific {
/** I/O queue command capsule supported size (16-byte units) */
uint32_t ioccsz;
/** I/O queue response capsule supported size (16-byte units) */
uint32_t iorcsz;
/** In-capsule data offset (16-byte units) */
uint16_t icdoff;
/** Controller attributes */
struct {
/** Controller model: \ref spdk_nvmf_ctrlr_model */
uint8_t ctrlr_model : 1;
uint8_t reserved : 7;
} ctrattr;
/** Maximum SGL block descriptors (0 = no limit) */
uint8_t msdbd;
uint8_t reserved[244];
};
struct __attribute__((packed)) __attribute__((aligned)) spdk_nvme_ctrlr_data { struct __attribute__((packed)) __attribute__((aligned)) spdk_nvme_ctrlr_data {
/* bytes 0-255: controller capabilities and features */ /* bytes 0-255: controller capabilities and features */
@ -1875,29 +1899,7 @@ struct __attribute__((packed)) __attribute__((aligned)) spdk_nvme_ctrlr_data {
uint8_t reserved5[768]; uint8_t reserved5[768];
/** NVMe over Fabrics-specific fields */ struct spdk_nvme_cdata_nvmf_specific nvmf_specific;
struct {
/** I/O queue command capsule supported size (16-byte units) */
uint32_t ioccsz;
/** I/O queue response capsule supported size (16-byte units) */
uint32_t iorcsz;
/** In-capsule data offset (16-byte units) */
uint16_t icdoff;
/** Controller attributes */
struct {
/** Controller model: \ref spdk_nvmf_ctrlr_model */
uint8_t ctrlr_model : 1;
uint8_t reserved : 7;
} ctrattr;
/** Maximum SGL block descriptors (0 = no limit) */
uint8_t msdbd;
uint8_t reserved[244];
} nvmf_specific;
/* bytes 2048-3071: power state descriptors */ /* bytes 2048-3071: power state descriptors */
struct spdk_nvme_power_state psd[32]; struct spdk_nvme_power_state psd[32];

View File

@ -169,10 +169,18 @@ struct spdk_nvmf_listener {
TAILQ_ENTRY(spdk_nvmf_listener) link; TAILQ_ENTRY(spdk_nvmf_listener) link;
}; };
/**
* A subset of struct spdk_nvme_ctrlr_data that are emulated by a fabrics device.
*/
struct spdk_nvmf_ctrlr_data {
struct spdk_nvme_cdata_nvmf_specific nvmf_specific;
};
struct spdk_nvmf_transport { struct spdk_nvmf_transport {
struct spdk_nvmf_tgt *tgt; struct spdk_nvmf_tgt *tgt;
const struct spdk_nvmf_transport_ops *ops; const struct spdk_nvmf_transport_ops *ops;
struct spdk_nvmf_transport_opts opts; struct spdk_nvmf_transport_opts opts;
struct spdk_nvmf_ctrlr_data cdata;
/* A mempool for transport related data transfers */ /* A mempool for transport related data transfers */
struct spdk_mempool *data_buf_pool; struct spdk_mempool *data_buf_pool;
@ -354,6 +362,20 @@ struct spdk_nvmf_registers {
uint64_t acq; uint64_t acq;
}; };
/**
* Initialize NVMe-oF controller capabilities.
*
* After that call transport specific layer can override the settings
* but internally must enforce the conditions on when it can be updated
* (e.g. no connections active).
*
* \param opts transport options
* \param cdata subset of ctrlr capabilities
*/
void
spdk_nvmf_ctrlr_data_init(struct spdk_nvmf_transport_opts *opts,
struct spdk_nvmf_ctrlr_data *cdata);
const struct spdk_nvmf_registers *spdk_nvmf_ctrlr_get_regs(struct spdk_nvmf_ctrlr *ctrlr); const struct spdk_nvmf_registers *spdk_nvmf_ctrlr_get_regs(struct spdk_nvmf_ctrlr *ctrlr);
void spdk_nvmf_request_free_buffers(struct spdk_nvmf_request *req, void spdk_nvmf_request_free_buffers(struct spdk_nvmf_request *req,

View File

@ -1813,6 +1813,17 @@ nvmf_ctrlr_populate_oacs(struct spdk_nvmf_ctrlr *ctrlr,
NULL; NULL;
} }
void
spdk_nvmf_ctrlr_data_init(struct spdk_nvmf_transport_opts *opts, struct spdk_nvmf_ctrlr_data *cdata)
{
cdata->nvmf_specific.ioccsz = sizeof(struct spdk_nvme_cmd) / 16;
cdata->nvmf_specific.ioccsz += opts->in_capsule_data_size / 16;
cdata->nvmf_specific.iorcsz = sizeof(struct spdk_nvme_cpl) / 16;
cdata->nvmf_specific.icdoff = 0; /* offset starts directly after SQE */
cdata->nvmf_specific.ctrattr.ctrlr_model = SPDK_NVMF_CTRLR_MODEL_DYNAMIC;
cdata->nvmf_specific.msdbd = 1;
}
int int
spdk_nvmf_ctrlr_identify_ctrlr(struct spdk_nvmf_ctrlr *ctrlr, struct spdk_nvme_ctrlr_data *cdata) spdk_nvmf_ctrlr_identify_ctrlr(struct spdk_nvmf_ctrlr *ctrlr, struct spdk_nvme_ctrlr_data *cdata)
{ {
@ -1867,23 +1878,7 @@ spdk_nvmf_ctrlr_identify_ctrlr(struct spdk_nvmf_ctrlr *ctrlr, struct spdk_nvme_c
cdata->vwc.present = 1; cdata->vwc.present = 1;
cdata->vwc.flush_broadcast = SPDK_NVME_FLUSH_BROADCAST_NOT_SUPPORTED; cdata->vwc.flush_broadcast = SPDK_NVME_FLUSH_BROADCAST_NOT_SUPPORTED;
cdata->nvmf_specific.ioccsz = sizeof(struct spdk_nvme_cmd) / 16; cdata->nvmf_specific = transport->cdata.nvmf_specific;
cdata->nvmf_specific.iorcsz = sizeof(struct spdk_nvme_cpl) / 16;
cdata->nvmf_specific.icdoff = 0; /* offset starts directly after SQE */
cdata->nvmf_specific.ctrattr.ctrlr_model = SPDK_NVMF_CTRLR_MODEL_DYNAMIC;
/* The RDMA transport supports up to SPDK_NVMF_MAX_SGL_ENTRIES descriptors. */
if (transport->ops->type == SPDK_NVME_TRANSPORT_RDMA) {
cdata->nvmf_specific.msdbd = SPDK_NVMF_MAX_SGL_ENTRIES;
} else {
cdata->nvmf_specific.msdbd = 1;
}
/* TODO: this should be set by the transport */
/* Disable in-capsule data transfer for RDMA controller when dif_insert_or_strip is enabled
since in-capsule data only works with NVME drives that support SGL memory layout */
if (!(transport->ops->type == SPDK_NVME_TRANSPORT_RDMA && ctrlr->dif_insert_or_strip)) {
cdata->nvmf_specific.ioccsz += transport->opts.in_capsule_data_size / 16;
}
cdata->oncs.dsm = spdk_nvmf_ctrlr_dsm_supported(ctrlr); cdata->oncs.dsm = spdk_nvmf_ctrlr_dsm_supported(ctrlr);
cdata->oncs.write_zeroes = spdk_nvmf_ctrlr_write_zeroes_supported(ctrlr); cdata->oncs.write_zeroes = spdk_nvmf_ctrlr_write_zeroes_supported(ctrlr);

View File

@ -1889,6 +1889,8 @@ nvmf_fc_create(struct spdk_nvmf_transport_opts *opts)
/* initialize the low level FC driver */ /* initialize the low level FC driver */
nvmf_fc_lld_init(); nvmf_fc_lld_init();
spdk_nvmf_ctrlr_data_init(opts, &g_nvmf_ftransport->transport.cdata);
return &g_nvmf_ftransport->transport; return &g_nvmf_ftransport->transport;
} }

View File

@ -2497,6 +2497,16 @@ spdk_nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
rtransport->poll_fds[i++].events = POLLIN; rtransport->poll_fds[i++].events = POLLIN;
} }
spdk_nvmf_ctrlr_data_init(opts, &rtransport->transport.cdata);
rtransport->transport.cdata.nvmf_specific.msdbd = SPDK_NVMF_MAX_SGL_ENTRIES;
/* Disable in-capsule data transfer for RDMA controller when dif_insert_or_strip is enabled
since in-capsule data only works with NVME drives that support SGL memory layout */
if (opts->dif_insert_or_strip) {
rtransport->transport.cdata.nvmf_specific.ioccsz = sizeof(struct spdk_nvme_cmd) / 16;
}
return &rtransport->transport; return &rtransport->transport;
} }

View File

@ -514,6 +514,8 @@ spdk_nvmf_tcp_create(struct spdk_nvmf_transport_opts *opts)
return NULL; return NULL;
} }
spdk_nvmf_ctrlr_data_init(opts, &ttransport->transport.cdata);
pthread_mutex_init(&ttransport->lock, NULL); pthread_mutex_init(&ttransport->lock, NULL);
return &ttransport->transport; return &ttransport->transport;

View File

@ -1411,6 +1411,8 @@ test_identify_ctrlr(void)
struct spdk_nvme_ctrlr_data cdata = {}; struct spdk_nvme_ctrlr_data cdata = {};
uint32_t expected_ioccsz; uint32_t expected_ioccsz;
spdk_nvmf_ctrlr_data_init(&transport.opts, &transport.cdata);
/* Check ioccsz, TCP transport */ /* Check ioccsz, TCP transport */
tops.type = SPDK_NVME_TRANSPORT_TCP; tops.type = SPDK_NVME_TRANSPORT_TCP;
expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
@ -1429,13 +1431,6 @@ test_identify_ctrlr(void)
expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16; expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE); CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz); CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
/* Check ioccsz, RDMA transport with dif_insert_or_strip */
tops.type = SPDK_NVME_TRANSPORT_RDMA;
ctrlr.dif_insert_or_strip = true;
expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16;
CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
} }
static int static int

View File

@ -73,6 +73,8 @@ DEFINE_STUB_V(spdk_trace_register_description, (const char *name,
DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id, DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
uint32_t size, uint64_t object_id, uint64_t arg1)); uint32_t size, uint64_t object_id, uint64_t arg1));
DEFINE_STUB_V(spdk_nvmf_ctrlr_data_init, (struct spdk_nvmf_transport_opts *opts,
struct spdk_nvmf_ctrlr_data *cdata));
DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req)); DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req));
DEFINE_STUB(spdk_nvme_transport_id_compare, int, (const struct spdk_nvme_transport_id *trid1, DEFINE_STUB(spdk_nvme_transport_id_compare, int, (const struct spdk_nvme_transport_id *trid1,
const struct spdk_nvme_transport_id *trid2), 0); const struct spdk_nvme_transport_id *trid2), 0);