nvmf: Move target opts to transport opts (part 1)
- Move most of the target opts from nvmf_tgt to nvmf_transport. - Update transport create functions to pass in transport opts. - When transport opts are NULL in transport create function, use target opts. (for backward compatiblity) - Part 1 of 2 patches. Part 2 (to follow after part 1 accepted) will allow independent creation of transport with specific opts while maintaining backward compatibility with current apps and rpc configuration that still use the add listener method to create a transport. Change-Id: I0e27447c4a98e0b6a6c590541404b4e4be879b47 Signed-off-by: John Barnard <john.barnard@broadcom.com> Reviewed-on: https://review.gerrithub.io/423329 Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
parent
d393983d74
commit
8e8084903e
@ -61,6 +61,7 @@ struct spdk_nvmf_host;
|
|||||||
struct spdk_nvmf_listener;
|
struct spdk_nvmf_listener;
|
||||||
struct spdk_nvmf_poll_group;
|
struct spdk_nvmf_poll_group;
|
||||||
struct spdk_json_write_ctx;
|
struct spdk_json_write_ctx;
|
||||||
|
struct spdk_nvmf_transport;
|
||||||
|
|
||||||
struct spdk_nvmf_tgt_opts {
|
struct spdk_nvmf_tgt_opts {
|
||||||
uint16_t max_queue_depth;
|
uint16_t max_queue_depth;
|
||||||
@ -70,6 +71,16 @@ struct spdk_nvmf_tgt_opts {
|
|||||||
uint32_t max_subsystems;
|
uint32_t max_subsystems;
|
||||||
uint32_t io_unit_size;
|
uint32_t io_unit_size;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct spdk_nvmf_transport_opts {
|
||||||
|
uint16_t max_queue_depth;
|
||||||
|
uint16_t max_qpairs_per_ctrlr;
|
||||||
|
uint32_t in_capsule_data_size;
|
||||||
|
uint32_t max_io_size;
|
||||||
|
uint32_t io_unit_size;
|
||||||
|
uint32_t max_aq_depth;
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Initialize the default value of opts.
|
* Initialize the default value of opts.
|
||||||
*
|
*
|
||||||
|
@ -152,9 +152,7 @@ spdk_nvmf_ctrlr_create(struct spdk_nvmf_subsystem *subsystem,
|
|||||||
struct spdk_nvmf_fabric_connect_data *connect_data)
|
struct spdk_nvmf_fabric_connect_data *connect_data)
|
||||||
{
|
{
|
||||||
struct spdk_nvmf_ctrlr *ctrlr;
|
struct spdk_nvmf_ctrlr *ctrlr;
|
||||||
struct spdk_nvmf_tgt *tgt;
|
struct spdk_nvmf_transport *transport;
|
||||||
|
|
||||||
tgt = subsystem->tgt;
|
|
||||||
|
|
||||||
ctrlr = calloc(1, sizeof(*ctrlr));
|
ctrlr = calloc(1, sizeof(*ctrlr));
|
||||||
if (ctrlr == NULL) {
|
if (ctrlr == NULL) {
|
||||||
@ -166,7 +164,8 @@ spdk_nvmf_ctrlr_create(struct spdk_nvmf_subsystem *subsystem,
|
|||||||
ctrlr->subsys = subsystem;
|
ctrlr->subsys = subsystem;
|
||||||
ctrlr->thread = req->qpair->group->thread;
|
ctrlr->thread = req->qpair->group->thread;
|
||||||
|
|
||||||
ctrlr->qpair_mask = spdk_bit_array_create(tgt->opts.max_qpairs_per_ctrlr);
|
transport = req->qpair->transport;
|
||||||
|
ctrlr->qpair_mask = spdk_bit_array_create(transport->opts.max_qpairs_per_ctrlr);
|
||||||
if (!ctrlr->qpair_mask) {
|
if (!ctrlr->qpair_mask) {
|
||||||
SPDK_ERRLOG("Failed to allocate controller qpair mask\n");
|
SPDK_ERRLOG("Failed to allocate controller qpair mask\n");
|
||||||
free(ctrlr);
|
free(ctrlr);
|
||||||
@ -178,14 +177,17 @@ spdk_nvmf_ctrlr_create(struct spdk_nvmf_subsystem *subsystem,
|
|||||||
ctrlr->feat.volatile_write_cache.bits.wce = 1;
|
ctrlr->feat.volatile_write_cache.bits.wce = 1;
|
||||||
|
|
||||||
/* Subtract 1 for admin queue, 1 for 0's based */
|
/* Subtract 1 for admin queue, 1 for 0's based */
|
||||||
ctrlr->feat.number_of_queues.bits.ncqr = tgt->opts.max_qpairs_per_ctrlr - 1 - 1;
|
ctrlr->feat.number_of_queues.bits.ncqr = transport->opts.max_qpairs_per_ctrlr - 1 -
|
||||||
ctrlr->feat.number_of_queues.bits.nsqr = tgt->opts.max_qpairs_per_ctrlr - 1 - 1;
|
1;
|
||||||
|
ctrlr->feat.number_of_queues.bits.nsqr = transport->opts.max_qpairs_per_ctrlr - 1 -
|
||||||
|
1;
|
||||||
|
|
||||||
memcpy(ctrlr->hostid, connect_data->hostid, sizeof(ctrlr->hostid));
|
memcpy(ctrlr->hostid, connect_data->hostid, sizeof(ctrlr->hostid));
|
||||||
|
|
||||||
ctrlr->vcprop.cap.raw = 0;
|
ctrlr->vcprop.cap.raw = 0;
|
||||||
ctrlr->vcprop.cap.bits.cqr = 1; /* NVMe-oF specification required */
|
ctrlr->vcprop.cap.bits.cqr = 1; /* NVMe-oF specification required */
|
||||||
ctrlr->vcprop.cap.bits.mqes = tgt->opts.max_queue_depth - 1; /* max queue depth */
|
ctrlr->vcprop.cap.bits.mqes = transport->opts.max_queue_depth -
|
||||||
|
1; /* max queue depth */
|
||||||
ctrlr->vcprop.cap.bits.ams = 0; /* optional arb mechanisms */
|
ctrlr->vcprop.cap.bits.ams = 0; /* optional arb mechanisms */
|
||||||
ctrlr->vcprop.cap.bits.to = 1; /* ready timeout - 500 msec units */
|
ctrlr->vcprop.cap.bits.to = 1; /* ready timeout - 500 msec units */
|
||||||
ctrlr->vcprop.cap.bits.dstrd = 0; /* fixed to 0 for NVMe-oF */
|
ctrlr->vcprop.cap.bits.dstrd = 0; /* fixed to 0 for NVMe-oF */
|
||||||
@ -376,9 +378,9 @@ spdk_nvmf_ctrlr_connect(struct spdk_nvmf_request *req)
|
|||||||
* SQSIZE is a 0-based value, so it must be at least 1 (minimum queue depth is 2) and
|
* SQSIZE is a 0-based value, so it must be at least 1 (minimum queue depth is 2) and
|
||||||
* strictly less than max_queue_depth.
|
* strictly less than max_queue_depth.
|
||||||
*/
|
*/
|
||||||
if (cmd->sqsize == 0 || cmd->sqsize >= tgt->opts.max_queue_depth) {
|
if (cmd->sqsize == 0 || cmd->sqsize >= qpair->transport->opts.max_queue_depth) {
|
||||||
SPDK_ERRLOG("Invalid SQSIZE %u (min 1, max %u)\n",
|
SPDK_ERRLOG("Invalid SQSIZE %u (min 1, max %u)\n",
|
||||||
cmd->sqsize, tgt->opts.max_queue_depth - 1);
|
cmd->sqsize, qpair->transport->opts.max_queue_depth - 1);
|
||||||
SPDK_NVMF_INVALID_CONNECT_CMD(rsp, sqsize);
|
SPDK_NVMF_INVALID_CONNECT_CMD(rsp, sqsize);
|
||||||
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
|
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
|
||||||
}
|
}
|
||||||
@ -1151,19 +1153,19 @@ static int
|
|||||||
spdk_nvmf_ctrlr_identify_ctrlr(struct spdk_nvmf_ctrlr *ctrlr, struct spdk_nvme_ctrlr_data *cdata)
|
spdk_nvmf_ctrlr_identify_ctrlr(struct spdk_nvmf_ctrlr *ctrlr, struct spdk_nvme_ctrlr_data *cdata)
|
||||||
{
|
{
|
||||||
struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys;
|
struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys;
|
||||||
struct spdk_nvmf_tgt *tgt = subsystem->tgt;
|
struct spdk_nvmf_transport *transport = ctrlr->admin_qpair->transport;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Common fields for discovery and NVM subsystems
|
* Common fields for discovery and NVM subsystems
|
||||||
*/
|
*/
|
||||||
spdk_strcpy_pad(cdata->fr, FW_VERSION, sizeof(cdata->fr), ' ');
|
spdk_strcpy_pad(cdata->fr, FW_VERSION, sizeof(cdata->fr), ' ');
|
||||||
assert((tgt->opts.max_io_size % 4096) == 0);
|
assert((transport->opts.max_io_size % 4096) == 0);
|
||||||
cdata->mdts = spdk_u32log2(tgt->opts.max_io_size / 4096);
|
cdata->mdts = spdk_u32log2(transport->opts.max_io_size / 4096);
|
||||||
cdata->cntlid = ctrlr->cntlid;
|
cdata->cntlid = ctrlr->cntlid;
|
||||||
cdata->ver = ctrlr->vcprop.vs;
|
cdata->ver = ctrlr->vcprop.vs;
|
||||||
cdata->lpa.edlp = 1;
|
cdata->lpa.edlp = 1;
|
||||||
cdata->elpe = 127;
|
cdata->elpe = 127;
|
||||||
cdata->maxcmd = tgt->opts.max_queue_depth;
|
cdata->maxcmd = transport->opts.max_queue_depth;
|
||||||
cdata->sgls.supported = 1;
|
cdata->sgls.supported = 1;
|
||||||
cdata->sgls.keyed_sgl = 1;
|
cdata->sgls.keyed_sgl = 1;
|
||||||
cdata->sgls.sgl_offset = 1;
|
cdata->sgls.sgl_offset = 1;
|
||||||
@ -1206,7 +1208,7 @@ spdk_nvmf_ctrlr_identify_ctrlr(struct spdk_nvmf_ctrlr *ctrlr, struct spdk_nvme_c
|
|||||||
cdata->nvmf_specific.msdbd = 1; /* target supports single SGL in capsule */
|
cdata->nvmf_specific.msdbd = 1; /* target supports single SGL in capsule */
|
||||||
|
|
||||||
/* TODO: this should be set by the transport */
|
/* TODO: this should be set by the transport */
|
||||||
cdata->nvmf_specific.ioccsz += tgt->opts.in_capsule_data_size / 16;
|
cdata->nvmf_specific.ioccsz += transport->opts.in_capsule_data_size / 16;
|
||||||
|
|
||||||
cdata->oncs.dsm = spdk_nvmf_ctrlr_dsm_supported(ctrlr);
|
cdata->oncs.dsm = spdk_nvmf_ctrlr_dsm_supported(ctrlr);
|
||||||
cdata->oncs.write_zeroes = spdk_nvmf_ctrlr_write_zeroes_supported(ctrlr);
|
cdata->oncs.write_zeroes = spdk_nvmf_ctrlr_write_zeroes_supported(ctrlr);
|
||||||
|
@ -96,7 +96,7 @@ nvmf_update_discovery_log(struct spdk_nvmf_tgt *tgt)
|
|||||||
memset(entry, 0, sizeof(*entry));
|
memset(entry, 0, sizeof(*entry));
|
||||||
entry->portid = numrec;
|
entry->portid = numrec;
|
||||||
entry->cntlid = 0xffff;
|
entry->cntlid = 0xffff;
|
||||||
entry->asqsz = tgt->opts.max_queue_depth;
|
entry->asqsz = listener->transport->opts.max_aq_depth;
|
||||||
entry->subtype = subsystem->subtype;
|
entry->subtype = subsystem->subtype;
|
||||||
snprintf(entry->subnqn, sizeof(entry->subnqn), "%s", subsystem->subnqn);
|
snprintf(entry->subnqn, sizeof(entry->subnqn), "%s", subsystem->subnqn);
|
||||||
|
|
||||||
|
@ -234,14 +234,6 @@ spdk_nvmf_tgt_create(struct spdk_nvmf_tgt_opts *opts)
|
|||||||
tgt->opts = *opts;
|
tgt->opts = *opts;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((tgt->opts.max_io_size % tgt->opts.io_unit_size != 0) ||
|
|
||||||
(tgt->opts.max_io_size / tgt->opts.io_unit_size > SPDK_NVMF_MAX_SGL_ENTRIES)) {
|
|
||||||
SPDK_ERRLOG("Unsupported IO size, MaxIO:%d, UnitIO:%d\n", tgt->opts.max_io_size,
|
|
||||||
tgt->opts.io_unit_size);
|
|
||||||
free(tgt);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
tgt->discovery_genctr = 0;
|
tgt->discovery_genctr = 0;
|
||||||
tgt->discovery_log_page = NULL;
|
tgt->discovery_log_page = NULL;
|
||||||
tgt->discovery_log_page_size = 0;
|
tgt->discovery_log_page_size = 0;
|
||||||
@ -258,14 +250,6 @@ spdk_nvmf_tgt_create(struct spdk_nvmf_tgt_opts *opts)
|
|||||||
spdk_nvmf_tgt_destroy_poll_group,
|
spdk_nvmf_tgt_destroy_poll_group,
|
||||||
sizeof(struct spdk_nvmf_poll_group));
|
sizeof(struct spdk_nvmf_poll_group));
|
||||||
|
|
||||||
SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Max Queue Pairs Per Controller: %d\n",
|
|
||||||
tgt->opts.max_qpairs_per_ctrlr);
|
|
||||||
SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Max Queue Depth: %d\n", tgt->opts.max_queue_depth);
|
|
||||||
SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Max In Capsule Data: %d bytes\n",
|
|
||||||
tgt->opts.in_capsule_data_size);
|
|
||||||
SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Max I/O Size: %d bytes\n", tgt->opts.max_io_size);
|
|
||||||
SPDK_DEBUGLOG(SPDK_LOG_NVMF, "I/O Unit Size: %d bytes\n", tgt->opts.io_unit_size);
|
|
||||||
|
|
||||||
return tgt;
|
return tgt;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -493,7 +477,7 @@ spdk_nvmf_tgt_listen(struct spdk_nvmf_tgt *tgt,
|
|||||||
|
|
||||||
transport = spdk_nvmf_tgt_get_transport(tgt, trid->trtype);
|
transport = spdk_nvmf_tgt_get_transport(tgt, trid->trtype);
|
||||||
if (!transport) {
|
if (!transport) {
|
||||||
transport = spdk_nvmf_transport_create(tgt, trid->trtype);
|
transport = spdk_nvmf_transport_create(tgt, trid->trtype, NULL);
|
||||||
if (!transport) {
|
if (!transport) {
|
||||||
SPDK_ERRLOG("Transport initialization failed\n");
|
SPDK_ERRLOG("Transport initialization failed\n");
|
||||||
cb_fn(cb_arg, -EINVAL);
|
cb_fn(cb_arg, -EINVAL);
|
||||||
|
@ -316,11 +316,6 @@ struct spdk_nvmf_rdma_transport {
|
|||||||
|
|
||||||
pthread_mutex_t lock;
|
pthread_mutex_t lock;
|
||||||
|
|
||||||
uint16_t max_queue_depth;
|
|
||||||
uint32_t max_io_size;
|
|
||||||
uint32_t io_unit_size;
|
|
||||||
uint32_t in_capsule_data_size;
|
|
||||||
|
|
||||||
/* fields used to poll RDMA/IB events */
|
/* fields used to poll RDMA/IB events */
|
||||||
nfds_t npoll_fds;
|
nfds_t npoll_fds;
|
||||||
struct pollfd *poll_fds;
|
struct pollfd *poll_fds;
|
||||||
@ -551,9 +546,11 @@ spdk_nvmf_rdma_qpair_initialize(struct spdk_nvmf_qpair *qpair)
|
|||||||
int rc, i;
|
int rc, i;
|
||||||
struct spdk_nvmf_rdma_recv *rdma_recv;
|
struct spdk_nvmf_rdma_recv *rdma_recv;
|
||||||
struct spdk_nvmf_rdma_request *rdma_req;
|
struct spdk_nvmf_rdma_request *rdma_req;
|
||||||
|
struct spdk_nvmf_transport *transport;
|
||||||
|
|
||||||
rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
|
rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
|
||||||
rtransport = SPDK_CONTAINEROF(qpair->transport, struct spdk_nvmf_rdma_transport, transport);
|
rtransport = SPDK_CONTAINEROF(qpair->transport, struct spdk_nvmf_rdma_transport, transport);
|
||||||
|
transport = &rtransport->transport;
|
||||||
|
|
||||||
memset(&rqpair->ibv_init_attr, 0, sizeof(struct ibv_qp_init_attr));
|
memset(&rqpair->ibv_init_attr, 0, sizeof(struct ibv_qp_init_attr));
|
||||||
rqpair->ibv_init_attr.qp_context = rqpair;
|
rqpair->ibv_init_attr.qp_context = rqpair;
|
||||||
@ -584,13 +581,15 @@ spdk_nvmf_rdma_qpair_initialize(struct spdk_nvmf_qpair *qpair)
|
|||||||
rqpair->cpls = spdk_dma_zmalloc(rqpair->max_queue_depth * sizeof(*rqpair->cpls),
|
rqpair->cpls = spdk_dma_zmalloc(rqpair->max_queue_depth * sizeof(*rqpair->cpls),
|
||||||
0x1000, NULL);
|
0x1000, NULL);
|
||||||
|
|
||||||
if (rtransport->in_capsule_data_size) {
|
|
||||||
rqpair->bufs = spdk_dma_zmalloc(rqpair->max_queue_depth * rtransport->in_capsule_data_size,
|
if (transport->opts.in_capsule_data_size > 0) {
|
||||||
|
rqpair->bufs = spdk_dma_zmalloc(rqpair->max_queue_depth *
|
||||||
|
transport->opts.in_capsule_data_size,
|
||||||
0x1000, NULL);
|
0x1000, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!rqpair->reqs || !rqpair->recvs || !rqpair->cmds ||
|
if (!rqpair->reqs || !rqpair->recvs || !rqpair->cmds ||
|
||||||
!rqpair->cpls || (rtransport->in_capsule_data_size && !rqpair->bufs)) {
|
!rqpair->cpls || (transport->opts.in_capsule_data_size && !rqpair->bufs)) {
|
||||||
SPDK_ERRLOG("Unable to allocate sufficient memory for RDMA queue.\n");
|
SPDK_ERRLOG("Unable to allocate sufficient memory for RDMA queue.\n");
|
||||||
spdk_nvmf_rdma_qpair_destroy(rqpair);
|
spdk_nvmf_rdma_qpair_destroy(rqpair);
|
||||||
return -1;
|
return -1;
|
||||||
@ -603,14 +602,14 @@ spdk_nvmf_rdma_qpair_initialize(struct spdk_nvmf_qpair *qpair)
|
|||||||
rqpair->max_queue_depth * sizeof(*rqpair->cpls),
|
rqpair->max_queue_depth * sizeof(*rqpair->cpls),
|
||||||
0);
|
0);
|
||||||
|
|
||||||
if (rtransport->in_capsule_data_size) {
|
if (transport->opts.in_capsule_data_size) {
|
||||||
rqpair->bufs_mr = ibv_reg_mr(rqpair->cm_id->pd, rqpair->bufs,
|
rqpair->bufs_mr = ibv_reg_mr(rqpair->cm_id->pd, rqpair->bufs,
|
||||||
rqpair->max_queue_depth * rtransport->in_capsule_data_size,
|
rqpair->max_queue_depth *
|
||||||
IBV_ACCESS_LOCAL_WRITE |
|
transport->opts.in_capsule_data_size,
|
||||||
IBV_ACCESS_REMOTE_WRITE);
|
IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!rqpair->cmds_mr || !rqpair->cpls_mr || (rtransport->in_capsule_data_size &&
|
if (!rqpair->cmds_mr || !rqpair->cpls_mr || (transport->opts.in_capsule_data_size &&
|
||||||
!rqpair->bufs_mr)) {
|
!rqpair->bufs_mr)) {
|
||||||
SPDK_ERRLOG("Unable to register required memory for RDMA queue.\n");
|
SPDK_ERRLOG("Unable to register required memory for RDMA queue.\n");
|
||||||
spdk_nvmf_rdma_qpair_destroy(rqpair);
|
spdk_nvmf_rdma_qpair_destroy(rqpair);
|
||||||
@ -622,7 +621,8 @@ spdk_nvmf_rdma_qpair_initialize(struct spdk_nvmf_qpair *qpair)
|
|||||||
rqpair->cpls, rqpair->max_queue_depth * sizeof(*rqpair->cpls), rqpair->cpls_mr->lkey);
|
rqpair->cpls, rqpair->max_queue_depth * sizeof(*rqpair->cpls), rqpair->cpls_mr->lkey);
|
||||||
if (rqpair->bufs && rqpair->bufs_mr) {
|
if (rqpair->bufs && rqpair->bufs_mr) {
|
||||||
SPDK_DEBUGLOG(SPDK_LOG_RDMA, "In Capsule Data Array: %p Length: %x LKey: %x\n",
|
SPDK_DEBUGLOG(SPDK_LOG_RDMA, "In Capsule Data Array: %p Length: %x LKey: %x\n",
|
||||||
rqpair->bufs, rqpair->max_queue_depth * rtransport->in_capsule_data_size, rqpair->bufs_mr->lkey);
|
rqpair->bufs, rqpair->max_queue_depth *
|
||||||
|
transport->opts.in_capsule_data_size, rqpair->bufs_mr->lkey);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Initialise request state queues and counters of the queue pair */
|
/* Initialise request state queues and counters of the queue pair */
|
||||||
@ -639,7 +639,8 @@ spdk_nvmf_rdma_qpair_initialize(struct spdk_nvmf_qpair *qpair)
|
|||||||
|
|
||||||
/* Set up memory to receive commands */
|
/* Set up memory to receive commands */
|
||||||
if (rqpair->bufs) {
|
if (rqpair->bufs) {
|
||||||
rdma_recv->buf = (void *)((uintptr_t)rqpair->bufs + (i * rtransport->in_capsule_data_size));
|
rdma_recv->buf = (void *)((uintptr_t)rqpair->bufs + (i *
|
||||||
|
transport->opts.in_capsule_data_size));
|
||||||
}
|
}
|
||||||
|
|
||||||
rdma_recv->sgl[0].addr = (uintptr_t)&rqpair->cmds[i];
|
rdma_recv->sgl[0].addr = (uintptr_t)&rqpair->cmds[i];
|
||||||
@ -649,7 +650,7 @@ spdk_nvmf_rdma_qpair_initialize(struct spdk_nvmf_qpair *qpair)
|
|||||||
|
|
||||||
if (rdma_recv->buf && rqpair->bufs_mr) {
|
if (rdma_recv->buf && rqpair->bufs_mr) {
|
||||||
rdma_recv->sgl[1].addr = (uintptr_t)rdma_recv->buf;
|
rdma_recv->sgl[1].addr = (uintptr_t)rdma_recv->buf;
|
||||||
rdma_recv->sgl[1].length = rtransport->in_capsule_data_size;
|
rdma_recv->sgl[1].length = transport->opts.in_capsule_data_size;
|
||||||
rdma_recv->sgl[1].lkey = rqpair->bufs_mr->lkey;
|
rdma_recv->sgl[1].lkey = rqpair->bufs_mr->lkey;
|
||||||
rdma_recv->wr.num_sge++;
|
rdma_recv->wr.num_sge++;
|
||||||
}
|
}
|
||||||
@ -876,9 +877,10 @@ nvmf_rdma_connect(struct spdk_nvmf_transport *transport, struct rdma_cm_event *e
|
|||||||
SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Calculating Queue Depth\n");
|
SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Calculating Queue Depth\n");
|
||||||
|
|
||||||
/* Start with the maximum queue depth allowed by the target */
|
/* Start with the maximum queue depth allowed by the target */
|
||||||
max_queue_depth = rtransport->max_queue_depth;
|
max_queue_depth = rtransport->transport.opts.max_queue_depth;
|
||||||
max_rw_depth = rtransport->max_queue_depth;
|
max_rw_depth = rtransport->transport.opts.max_queue_depth;
|
||||||
SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Target Max Queue Depth: %d\n", rtransport->max_queue_depth);
|
SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Target Max Queue Depth: %d\n",
|
||||||
|
rtransport->transport.opts.max_queue_depth);
|
||||||
|
|
||||||
/* Next check the local NIC's hardware limitations */
|
/* Next check the local NIC's hardware limitations */
|
||||||
SPDK_DEBUGLOG(SPDK_LOG_RDMA,
|
SPDK_DEBUGLOG(SPDK_LOG_RDMA,
|
||||||
@ -1091,7 +1093,7 @@ spdk_nvmf_rdma_request_fill_iovs(struct spdk_nvmf_rdma_transport *rtransport,
|
|||||||
|
|
||||||
rdma_req->req.iov[i].iov_base = (void *)((uintptr_t)(buf + NVMF_DATA_BUFFER_MASK) &
|
rdma_req->req.iov[i].iov_base = (void *)((uintptr_t)(buf + NVMF_DATA_BUFFER_MASK) &
|
||||||
~NVMF_DATA_BUFFER_MASK);
|
~NVMF_DATA_BUFFER_MASK);
|
||||||
rdma_req->req.iov[i].iov_len = spdk_min(length, rtransport->io_unit_size);
|
rdma_req->req.iov[i].iov_len = spdk_min(length, rtransport->transport.opts.io_unit_size);
|
||||||
rdma_req->req.iovcnt++;
|
rdma_req->req.iovcnt++;
|
||||||
rdma_req->data.buffers[i] = buf;
|
rdma_req->data.buffers[i] = buf;
|
||||||
rdma_req->data.wr.sg_list[i].addr = (uintptr_t)(rdma_req->req.iov[i].iov_base);
|
rdma_req->data.wr.sg_list[i].addr = (uintptr_t)(rdma_req->req.iov[i].iov_base);
|
||||||
@ -1138,9 +1140,9 @@ spdk_nvmf_rdma_request_parse_sgl(struct spdk_nvmf_rdma_transport *rtransport,
|
|||||||
if (sgl->generic.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK &&
|
if (sgl->generic.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK &&
|
||||||
(sgl->keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS ||
|
(sgl->keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS ||
|
||||||
sgl->keyed.subtype == SPDK_NVME_SGL_SUBTYPE_INVALIDATE_KEY)) {
|
sgl->keyed.subtype == SPDK_NVME_SGL_SUBTYPE_INVALIDATE_KEY)) {
|
||||||
if (sgl->keyed.length > rtransport->max_io_size) {
|
if (sgl->keyed.length > rtransport->transport.opts.max_io_size) {
|
||||||
SPDK_ERRLOG("SGL length 0x%x exceeds max io size 0x%x\n",
|
SPDK_ERRLOG("SGL length 0x%x exceeds max io size 0x%x\n",
|
||||||
sgl->keyed.length, rtransport->max_io_size);
|
sgl->keyed.length, rtransport->transport.opts.max_io_size);
|
||||||
rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
|
rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@ -1185,7 +1187,7 @@ spdk_nvmf_rdma_request_parse_sgl(struct spdk_nvmf_rdma_transport *rtransport,
|
|||||||
} else if (sgl->generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK &&
|
} else if (sgl->generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK &&
|
||||||
sgl->unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET) {
|
sgl->unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET) {
|
||||||
uint64_t offset = sgl->address;
|
uint64_t offset = sgl->address;
|
||||||
uint32_t max_len = rtransport->in_capsule_data_size;
|
uint32_t max_len = rtransport->transport.opts.in_capsule_data_size;
|
||||||
|
|
||||||
SPDK_DEBUGLOG(SPDK_LOG_NVMF, "In-capsule data: offset 0x%" PRIx64 ", length 0x%x\n",
|
SPDK_DEBUGLOG(SPDK_LOG_NVMF, "In-capsule data: offset 0x%" PRIx64 ", length 0x%x\n",
|
||||||
offset, sgl->unkeyed.length);
|
offset, sgl->unkeyed.length);
|
||||||
@ -1438,7 +1440,7 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
|
|||||||
static int spdk_nvmf_rdma_destroy(struct spdk_nvmf_transport *transport);
|
static int spdk_nvmf_rdma_destroy(struct spdk_nvmf_transport *transport);
|
||||||
|
|
||||||
static struct spdk_nvmf_transport *
|
static struct spdk_nvmf_transport *
|
||||||
spdk_nvmf_rdma_create(struct spdk_nvmf_tgt *tgt)
|
spdk_nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
|
||||||
{
|
{
|
||||||
int rc;
|
int rc;
|
||||||
struct spdk_nvmf_rdma_transport *rtransport;
|
struct spdk_nvmf_rdma_transport *rtransport;
|
||||||
@ -1466,24 +1468,27 @@ spdk_nvmf_rdma_create(struct spdk_nvmf_tgt *tgt)
|
|||||||
TAILQ_INIT(&rtransport->devices);
|
TAILQ_INIT(&rtransport->devices);
|
||||||
TAILQ_INIT(&rtransport->ports);
|
TAILQ_INIT(&rtransport->ports);
|
||||||
|
|
||||||
rtransport->transport.tgt = tgt;
|
|
||||||
rtransport->transport.ops = &spdk_nvmf_transport_rdma;
|
rtransport->transport.ops = &spdk_nvmf_transport_rdma;
|
||||||
|
|
||||||
SPDK_INFOLOG(SPDK_LOG_RDMA, "*** RDMA Transport Init ***\n");
|
SPDK_INFOLOG(SPDK_LOG_RDMA, "*** RDMA Transport Init ***\n"
|
||||||
|
" Transport opts: max_ioq_depth=%d, max_io_size=%d,\n"
|
||||||
rtransport->max_queue_depth = tgt->opts.max_queue_depth;
|
" max_qpairs_per_ctrlr=%d, io_unit_size=%d,\n"
|
||||||
rtransport->max_io_size = tgt->opts.max_io_size;
|
" in_capsule_data_size=%d, max_aq_depth=%d\n",
|
||||||
rtransport->io_unit_size = tgt->opts.io_unit_size;
|
opts->max_queue_depth,
|
||||||
rtransport->in_capsule_data_size = tgt->opts.in_capsule_data_size;
|
opts->max_io_size,
|
||||||
|
opts->max_qpairs_per_ctrlr,
|
||||||
|
opts->io_unit_size,
|
||||||
|
opts->in_capsule_data_size,
|
||||||
|
opts->max_aq_depth);
|
||||||
|
|
||||||
/* I/O unit size cannot be larger than max I/O size */
|
/* I/O unit size cannot be larger than max I/O size */
|
||||||
if (rtransport->io_unit_size > rtransport->max_io_size) {
|
if (opts->io_unit_size > opts->max_io_size) {
|
||||||
rtransport->io_unit_size = rtransport->max_io_size;
|
opts->io_unit_size = opts->max_io_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
sge_count = rtransport->max_io_size / rtransport->io_unit_size;
|
sge_count = opts->max_io_size / opts->io_unit_size;
|
||||||
if (sge_count > SPDK_NVMF_MAX_SGL_ENTRIES) {
|
if (sge_count > SPDK_NVMF_MAX_SGL_ENTRIES) {
|
||||||
SPDK_ERRLOG("Unsupported IO Unit size specified, %d bytes\n", rtransport->io_unit_size);
|
SPDK_ERRLOG("Unsupported IO Unit size specified, %d bytes\n", opts->io_unit_size);
|
||||||
spdk_nvmf_rdma_destroy(&rtransport->transport);
|
spdk_nvmf_rdma_destroy(&rtransport->transport);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -1504,8 +1509,8 @@ spdk_nvmf_rdma_create(struct spdk_nvmf_tgt *tgt)
|
|||||||
}
|
}
|
||||||
|
|
||||||
rtransport->data_buf_pool = spdk_mempool_create("spdk_nvmf_rdma",
|
rtransport->data_buf_pool = spdk_mempool_create("spdk_nvmf_rdma",
|
||||||
rtransport->max_queue_depth * 4, /* The 4 is arbitrarily chosen. Needs to be configurable. */
|
opts->max_queue_depth * 4, /* The 4 is arbitrarily chosen. Needs to be configurable. */
|
||||||
rtransport->io_unit_size + NVMF_DATA_BUFFER_ALIGNMENT,
|
opts->max_io_size + NVMF_DATA_BUFFER_ALIGNMENT,
|
||||||
SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
|
SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
|
||||||
SPDK_ENV_SOCKET_ID_ANY);
|
SPDK_ENV_SOCKET_ID_ANY);
|
||||||
if (!rtransport->data_buf_pool) {
|
if (!rtransport->data_buf_pool) {
|
||||||
@ -1631,10 +1636,13 @@ spdk_nvmf_rdma_destroy(struct spdk_nvmf_transport *transport)
|
|||||||
free(device);
|
free(device);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (spdk_mempool_count(rtransport->data_buf_pool) != (rtransport->max_queue_depth * 4)) {
|
if (rtransport->data_buf_pool != NULL) {
|
||||||
SPDK_ERRLOG("transport buffer pool count is %zu but should be %u\n",
|
if (spdk_mempool_count(rtransport->data_buf_pool) !=
|
||||||
spdk_mempool_count(rtransport->data_buf_pool),
|
(transport->opts.max_queue_depth * 4)) {
|
||||||
rtransport->max_queue_depth * 4);
|
SPDK_ERRLOG("transport buffer pool count is %zu but should be %u\n",
|
||||||
|
spdk_mempool_count(rtransport->data_buf_pool),
|
||||||
|
transport->opts.max_queue_depth * 4);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
spdk_mempool_free(rtransport->data_buf_pool);
|
spdk_mempool_free(rtransport->data_buf_pool);
|
||||||
|
@ -49,34 +49,67 @@ static const struct spdk_nvmf_transport_ops *const g_transport_ops[] = {
|
|||||||
|
|
||||||
#define NUM_TRANSPORTS (SPDK_COUNTOF(g_transport_ops))
|
#define NUM_TRANSPORTS (SPDK_COUNTOF(g_transport_ops))
|
||||||
|
|
||||||
struct spdk_nvmf_transport *
|
static inline const struct spdk_nvmf_transport_ops *
|
||||||
spdk_nvmf_transport_create(struct spdk_nvmf_tgt *tgt,
|
spdk_nvmf_get_transport_ops(enum spdk_nvme_transport_type type)
|
||||||
enum spdk_nvme_transport_type type)
|
|
||||||
{
|
{
|
||||||
size_t i;
|
size_t i;
|
||||||
const struct spdk_nvmf_transport_ops *ops = NULL;
|
|
||||||
struct spdk_nvmf_transport *transport;
|
|
||||||
|
|
||||||
for (i = 0; i != NUM_TRANSPORTS; i++) {
|
for (i = 0; i != NUM_TRANSPORTS; i++) {
|
||||||
if (g_transport_ops[i]->type == type) {
|
if (g_transport_ops[i]->type == type) {
|
||||||
ops = g_transport_ops[i];
|
return g_transport_ops[i];
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct spdk_nvmf_transport *
|
||||||
|
spdk_nvmf_transport_create(struct spdk_nvmf_tgt *tgt,
|
||||||
|
enum spdk_nvme_transport_type type,
|
||||||
|
struct spdk_nvmf_transport_opts *opts)
|
||||||
|
{
|
||||||
|
const struct spdk_nvmf_transport_ops *ops = NULL;
|
||||||
|
struct spdk_nvmf_transport *transport;
|
||||||
|
struct spdk_nvmf_transport_opts tgt_opts;
|
||||||
|
|
||||||
|
if (opts == NULL) {
|
||||||
|
/* get transport opts from global target opts */
|
||||||
|
tgt_opts.max_queue_depth = tgt->opts.max_queue_depth;
|
||||||
|
tgt_opts.max_qpairs_per_ctrlr = tgt->opts.max_qpairs_per_ctrlr;
|
||||||
|
tgt_opts.in_capsule_data_size = tgt->opts.in_capsule_data_size;
|
||||||
|
tgt_opts.max_io_size = tgt->opts.max_io_size;
|
||||||
|
tgt_opts.io_unit_size = tgt->opts.io_unit_size;
|
||||||
|
tgt_opts.max_aq_depth = tgt->opts.max_queue_depth;
|
||||||
|
opts = &tgt_opts;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((opts->max_io_size % opts->io_unit_size != 0) ||
|
||||||
|
(opts->max_io_size / opts->io_unit_size >
|
||||||
|
SPDK_NVMF_MAX_SGL_ENTRIES)) {
|
||||||
|
SPDK_ERRLOG("%s: invalid IO size, MaxIO:%d, UnitIO:%d, MaxSGL:%d\n",
|
||||||
|
spdk_nvme_transport_id_trtype_str(type),
|
||||||
|
opts->max_io_size,
|
||||||
|
opts->io_unit_size,
|
||||||
|
SPDK_NVMF_MAX_SGL_ENTRIES);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
ops = spdk_nvmf_get_transport_ops(type);
|
||||||
if (!ops) {
|
if (!ops) {
|
||||||
SPDK_ERRLOG("Transport type %s unavailable.\n",
|
SPDK_ERRLOG("Transport type %s unavailable.\n",
|
||||||
spdk_nvme_transport_id_trtype_str(type));
|
spdk_nvme_transport_id_trtype_str(type));
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
transport = ops->create(tgt);
|
transport = ops->create(opts);
|
||||||
if (!transport) {
|
if (!transport) {
|
||||||
SPDK_ERRLOG("Unable to create new transport of type %s\n",
|
SPDK_ERRLOG("Unable to create new transport of type %s\n",
|
||||||
spdk_nvme_transport_id_trtype_str(type));
|
spdk_nvme_transport_id_trtype_str(type));
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
transport->tgt = tgt;
|
||||||
|
transport->ops = ops;
|
||||||
|
transport->opts = *opts;
|
||||||
|
|
||||||
return transport;
|
return transport;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -42,6 +42,7 @@
|
|||||||
struct spdk_nvmf_transport {
|
struct spdk_nvmf_transport {
|
||||||
struct spdk_nvmf_tgt *tgt;
|
struct spdk_nvmf_tgt *tgt;
|
||||||
const struct spdk_nvmf_transport_ops *ops;
|
const struct spdk_nvmf_transport_ops *ops;
|
||||||
|
struct spdk_nvmf_transport_opts opts;
|
||||||
|
|
||||||
TAILQ_ENTRY(spdk_nvmf_transport) link;
|
TAILQ_ENTRY(spdk_nvmf_transport) link;
|
||||||
};
|
};
|
||||||
@ -53,9 +54,9 @@ struct spdk_nvmf_transport_ops {
|
|||||||
enum spdk_nvme_transport_type type;
|
enum spdk_nvme_transport_type type;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a transport for the given target
|
* Create a transport for the given transport opts
|
||||||
*/
|
*/
|
||||||
struct spdk_nvmf_transport *(*create)(struct spdk_nvmf_tgt *tgt);
|
struct spdk_nvmf_transport *(*create)(struct spdk_nvmf_transport_opts *opts);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Destroy the transport
|
* Destroy the transport
|
||||||
@ -132,7 +133,9 @@ struct spdk_nvmf_transport_ops {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct spdk_nvmf_transport *spdk_nvmf_transport_create(struct spdk_nvmf_tgt *tgt,
|
struct spdk_nvmf_transport *spdk_nvmf_transport_create(struct spdk_nvmf_tgt *tgt,
|
||||||
enum spdk_nvme_transport_type type);
|
enum spdk_nvme_transport_type type,
|
||||||
|
struct spdk_nvmf_transport_opts *opts);
|
||||||
|
|
||||||
int spdk_nvmf_transport_destroy(struct spdk_nvmf_transport *transport);
|
int spdk_nvmf_transport_destroy(struct spdk_nvmf_transport *transport);
|
||||||
|
|
||||||
int spdk_nvmf_transport_listen(struct spdk_nvmf_transport *transport,
|
int spdk_nvmf_transport_listen(struct spdk_nvmf_transport *transport,
|
||||||
|
@ -283,10 +283,9 @@ test_connect(void)
|
|||||||
admin_qpair.group = &group;
|
admin_qpair.group = &group;
|
||||||
|
|
||||||
memset(&tgt, 0, sizeof(tgt));
|
memset(&tgt, 0, sizeof(tgt));
|
||||||
tgt.opts.max_queue_depth = 64;
|
|
||||||
tgt.opts.max_qpairs_per_ctrlr = 3;
|
|
||||||
|
|
||||||
memset(&transport, 0, sizeof(transport));
|
memset(&transport, 0, sizeof(transport));
|
||||||
|
transport.opts.max_queue_depth = 64;
|
||||||
|
transport.opts.max_qpairs_per_ctrlr = 3;
|
||||||
transport.tgt = &tgt;
|
transport.tgt = &tgt;
|
||||||
|
|
||||||
memset(&qpair, 0, sizeof(qpair));
|
memset(&qpair, 0, sizeof(qpair));
|
||||||
|
@ -111,7 +111,8 @@ static struct spdk_nvmf_transport g_transport = {};
|
|||||||
|
|
||||||
struct spdk_nvmf_transport *
|
struct spdk_nvmf_transport *
|
||||||
spdk_nvmf_transport_create(struct spdk_nvmf_tgt *tgt,
|
spdk_nvmf_transport_create(struct spdk_nvmf_tgt *tgt,
|
||||||
enum spdk_nvme_transport_type type)
|
enum spdk_nvme_transport_type type,
|
||||||
|
struct spdk_nvmf_transport_opts *tprt_opts)
|
||||||
{
|
{
|
||||||
if (type == SPDK_NVME_TRANSPORT_RDMA) {
|
if (type == SPDK_NVME_TRANSPORT_RDMA) {
|
||||||
g_transport.tgt = tgt;
|
g_transport.tgt = tgt;
|
||||||
|
@ -103,7 +103,8 @@ static struct spdk_nvmf_transport g_transport = {};
|
|||||||
|
|
||||||
struct spdk_nvmf_transport *
|
struct spdk_nvmf_transport *
|
||||||
spdk_nvmf_transport_create(struct spdk_nvmf_tgt *tgt,
|
spdk_nvmf_transport_create(struct spdk_nvmf_tgt *tgt,
|
||||||
enum spdk_nvme_transport_type type)
|
enum spdk_nvme_transport_type type,
|
||||||
|
struct spdk_nvmf_transport_opts *tprt_opts)
|
||||||
{
|
{
|
||||||
if (type == SPDK_NVME_TRANSPORT_RDMA) {
|
if (type == SPDK_NVME_TRANSPORT_RDMA) {
|
||||||
g_transport.tgt = tgt;
|
g_transport.tgt = tgt;
|
||||||
|
Loading…
Reference in New Issue
Block a user