nvmf: move cdata subset from transport to ctrlr

Having that transport can decide about particular ctrlr attributes not
globally but per ctrlr.

Signed-off-by: Jacek Kalwas <jacek.kalwas@intel.com>
Change-Id: Ia3fb0d4e576cb9f8ce6df75f775e2fd5727d7f48
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/2757
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Broadcom CI
Community-CI: Mellanox Build Bot
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Jacek Kalwas 2020-06-03 17:15:19 +02:00 committed by Tomasz Zawadzki
parent 0ba47879b5
commit 000e6f5b87
7 changed files with 51 additions and 49 deletions

View File

@ -182,7 +182,6 @@ struct spdk_nvmf_transport {
struct spdk_nvmf_tgt *tgt;
const struct spdk_nvmf_transport_ops *ops;
struct spdk_nvmf_transport_opts opts;
struct spdk_nvmf_ctrlr_data cdata;
/* A mempool for transport related data transfers */
struct spdk_mempool *data_buf_pool;
@ -253,6 +252,12 @@ struct spdk_nvmf_transport_ops {
*/
void (*accept)(struct spdk_nvmf_transport *transport, new_qpair_fn cb_fn, void *cb_arg);
/**
* Initialize subset of identify controller data.
*/
void (*cdata_init)(struct spdk_nvmf_transport *transport, struct spdk_nvmf_subsystem *subsystem,
struct spdk_nvmf_ctrlr_data *cdata);
/**
* Fill out a discovery log entry for a specific listen address.
*/
@ -364,20 +369,6 @@ struct spdk_nvmf_registers {
uint64_t acq;
};
/**
* Initialize NVMe-oF controller capabilities.
*
* After that call transport specific layer can override the settings
* but internally must enforce the conditions on when it can be updated
* (e.g. no connections active).
*
* \param opts transport options
* \param cdata subset of ctrlr capabilities
*/
void
spdk_nvmf_ctrlr_data_init(struct spdk_nvmf_transport_opts *opts,
struct spdk_nvmf_ctrlr_data *cdata);
const struct spdk_nvmf_registers *spdk_nvmf_ctrlr_get_regs(struct spdk_nvmf_ctrlr *ctrlr);
void spdk_nvmf_request_free_buffers(struct spdk_nvmf_request *req,

View File

@ -276,6 +276,26 @@ _nvmf_subsystem_add_ctrlr(void *ctx)
spdk_thread_send_msg(ctrlr->thread, _nvmf_ctrlr_add_admin_qpair, req);
}
static void
nvmf_ctrlr_cdata_init(struct spdk_nvmf_transport *transport, struct spdk_nvmf_subsystem *subsystem,
struct spdk_nvmf_ctrlr_data *cdata)
{
cdata->kas = KAS_DEFAULT_VALUE;
cdata->sgls.supported = 1;
cdata->sgls.keyed_sgl = 1;
cdata->sgls.sgl_offset = 1;
cdata->nvmf_specific.ioccsz = sizeof(struct spdk_nvme_cmd) / 16;
cdata->nvmf_specific.ioccsz += transport->opts.in_capsule_data_size / 16;
cdata->nvmf_specific.iorcsz = sizeof(struct spdk_nvme_cpl) / 16;
cdata->nvmf_specific.icdoff = 0; /* offset starts directly after SQE */
cdata->nvmf_specific.ctrattr.ctrlr_model = SPDK_NVMF_CTRLR_MODEL_DYNAMIC;
cdata->nvmf_specific.msdbd = 1;
if (transport->ops->cdata_init) {
transport->ops->cdata_init(transport, subsystem, cdata);
}
}
static struct spdk_nvmf_ctrlr *
nvmf_ctrlr_create(struct spdk_nvmf_subsystem *subsystem,
struct spdk_nvmf_request *req,
@ -303,11 +323,13 @@ nvmf_ctrlr_create(struct spdk_nvmf_subsystem *subsystem,
return NULL;
}
nvmf_ctrlr_cdata_init(transport, subsystem, &ctrlr->cdata);
/*
* KAS: This field indicates the granularity of the Keep Alive Timer in 100ms units.
* If this field is cleared to 0h, then Keep Alive is not supported.
*/
if (transport->cdata.kas) {
if (ctrlr->cdata.kas) {
ctrlr->feat.keep_alive_timer.bits.kato = spdk_divide_round_up(connect_cmd->kato,
KAS_DEFAULT_VALUE * KAS_TIME_UNIT_IN_MS) *
KAS_DEFAULT_VALUE * KAS_TIME_UNIT_IN_MS;
@ -1817,21 +1839,6 @@ nvmf_ctrlr_populate_oacs(struct spdk_nvmf_ctrlr *ctrlr,
NULL;
}
void
spdk_nvmf_ctrlr_data_init(struct spdk_nvmf_transport_opts *opts, struct spdk_nvmf_ctrlr_data *cdata)
{
cdata->kas = KAS_DEFAULT_VALUE;
cdata->sgls.supported = 1;
cdata->sgls.keyed_sgl = 1;
cdata->sgls.sgl_offset = 1;
cdata->nvmf_specific.ioccsz = sizeof(struct spdk_nvme_cmd) / 16;
cdata->nvmf_specific.ioccsz += opts->in_capsule_data_size / 16;
cdata->nvmf_specific.iorcsz = sizeof(struct spdk_nvme_cpl) / 16;
cdata->nvmf_specific.icdoff = 0; /* offset starts directly after SQE */
cdata->nvmf_specific.ctrattr.ctrlr_model = SPDK_NVMF_CTRLR_MODEL_DYNAMIC;
cdata->nvmf_specific.msdbd = 1;
}
int
spdk_nvmf_ctrlr_identify_ctrlr(struct spdk_nvmf_ctrlr *ctrlr, struct spdk_nvme_ctrlr_data *cdata)
{
@ -1850,7 +1857,7 @@ spdk_nvmf_ctrlr_identify_ctrlr(struct spdk_nvmf_ctrlr *ctrlr, struct spdk_nvme_c
cdata->lpa.edlp = 1;
cdata->elpe = 127;
cdata->maxcmd = transport->opts.max_queue_depth;
cdata->sgls = transport->cdata.sgls;
cdata->sgls = ctrlr->cdata.sgls;
cdata->fuses.compare_and_write = 1;
cdata->acwu = 1;
spdk_strcpy_pad(cdata->subnqn, subsystem->subnqn, sizeof(cdata->subnqn), '\0');
@ -1864,7 +1871,7 @@ spdk_nvmf_ctrlr_identify_ctrlr(struct spdk_nvmf_ctrlr *ctrlr, struct spdk_nvme_c
if (subsystem->subtype == SPDK_NVMF_SUBTYPE_NVME) {
spdk_strcpy_pad(cdata->mn, spdk_nvmf_subsystem_get_mn(subsystem), sizeof(cdata->mn), ' ');
spdk_strcpy_pad(cdata->sn, spdk_nvmf_subsystem_get_sn(subsystem), sizeof(cdata->sn), ' ');
cdata->kas = transport->cdata.kas;
cdata->kas = ctrlr->cdata.kas;
cdata->rab = 6;
cdata->cmic.multi_port = 1;
@ -1885,7 +1892,7 @@ spdk_nvmf_ctrlr_identify_ctrlr(struct spdk_nvmf_ctrlr *ctrlr, struct spdk_nvme_c
cdata->vwc.present = 1;
cdata->vwc.flush_broadcast = SPDK_NVME_FLUSH_BROADCAST_NOT_SUPPORTED;
cdata->nvmf_specific = transport->cdata.nvmf_specific;
cdata->nvmf_specific = ctrlr->cdata.nvmf_specific;
cdata->oncs.dsm = nvmf_ctrlr_dsm_supported(ctrlr);
cdata->oncs.write_zeroes = nvmf_ctrlr_write_zeroes_supported(ctrlr);

View File

@ -1865,8 +1865,6 @@ nvmf_fc_create(struct spdk_nvmf_transport_opts *opts)
/* initialize the low level FC driver */
nvmf_fc_lld_init();
spdk_nvmf_ctrlr_data_init(opts, &g_nvmf_ftransport->transport.cdata);
return &g_nvmf_ftransport->transport;
}

View File

@ -199,6 +199,8 @@ struct spdk_nvmf_ctrlr {
char hostnqn[SPDK_NVMF_NQN_MAX_LEN + 1];
struct spdk_nvmf_subsystem *subsys;
struct spdk_nvmf_ctrlr_data cdata;
struct spdk_nvmf_registers vcprop;
struct spdk_nvmf_ctrlr_feat feat;

View File

@ -2480,16 +2480,6 @@ nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
rtransport->poll_fds[i++].events = POLLIN;
}
spdk_nvmf_ctrlr_data_init(opts, &rtransport->transport.cdata);
rtransport->transport.cdata.nvmf_specific.msdbd = SPDK_NVMF_MAX_SGL_ENTRIES;
/* Disable in-capsule data transfer for RDMA controller when dif_insert_or_strip is enabled
since in-capsule data only works with NVME drives that support SGL memory layout */
if (opts->dif_insert_or_strip) {
rtransport->transport.cdata.nvmf_specific.ioccsz = sizeof(struct spdk_nvme_cmd) / 16;
}
return &rtransport->transport;
}
@ -3202,6 +3192,19 @@ nvmf_rdma_accept(struct spdk_nvmf_transport *transport, new_qpair_fn cb_fn, void
assert(nfds == 0);
}
static void
nvmf_rdma_cdata_init(struct spdk_nvmf_transport *transport, struct spdk_nvmf_subsystem *subsystem,
struct spdk_nvmf_ctrlr_data *cdata)
{
cdata->nvmf_specific.msdbd = SPDK_NVMF_MAX_SGL_ENTRIES;
/* Disable in-capsule data transfer for RDMA controller when dif_insert_or_strip is enabled
since in-capsule data only works with NVME drives that support SGL memory layout */
if (transport->opts.dif_insert_or_strip) {
cdata->nvmf_specific.ioccsz = sizeof(struct spdk_nvme_cmd) / 16;
}
}
static void
nvmf_rdma_discover(struct spdk_nvmf_transport *transport,
struct spdk_nvme_transport_id *trid,
@ -4086,6 +4089,7 @@ const struct spdk_nvmf_transport_ops spdk_nvmf_transport_rdma = {
.listen = nvmf_rdma_listen,
.stop_listen = nvmf_rdma_stop_listen,
.accept = nvmf_rdma_accept,
.cdata_init = nvmf_rdma_cdata_init,
.listener_discover = nvmf_rdma_discover,

View File

@ -514,8 +514,6 @@ nvmf_tcp_create(struct spdk_nvmf_transport_opts *opts)
return NULL;
}
spdk_nvmf_ctrlr_data_init(opts, &ttransport->transport.cdata);
pthread_mutex_init(&ttransport->lock, NULL);
return &ttransport->transport;

View File

@ -301,6 +301,7 @@ test_connect(void)
struct spdk_nvmf_poll_group group;
struct spdk_nvmf_subsystem_poll_group *sgroups;
struct spdk_nvmf_transport transport;
struct spdk_nvmf_transport_ops tops = {};
struct spdk_nvmf_subsystem subsystem;
struct spdk_nvmf_request req;
struct spdk_nvmf_qpair admin_qpair;
@ -334,6 +335,7 @@ test_connect(void)
memset(&tgt, 0, sizeof(tgt));
memset(&transport, 0, sizeof(transport));
transport.ops = &tops;
transport.opts.max_aq_depth = 32;
transport.opts.max_queue_depth = 64;
transport.opts.max_qpairs_per_ctrlr = 3;
@ -1412,7 +1414,7 @@ test_identify_ctrlr(void)
struct spdk_nvme_ctrlr_data cdata = {};
uint32_t expected_ioccsz;
spdk_nvmf_ctrlr_data_init(&transport.opts, &transport.cdata);
nvmf_ctrlr_cdata_init(&transport, &subsystem, &ctrlr.cdata);
/* Check ioccsz, TCP transport */
tops.type = SPDK_NVME_TRANSPORT_TCP;