nvmf: Add config options for inline and max I/O size

These don't actually work quite yet, but pipe the
configuration file data through to where it will
be needed.

Change-Id: I95512d718d45b936fa85c03c0b80689ce3c866bc
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Ben Walker 2016-07-25 14:22:58 -07:00
parent 3d52e57cd0
commit 296add8bb1
6 changed files with 76 additions and 24 deletions

View File

@ -33,6 +33,12 @@
# Set the maximum number of outstanding I/O per queue.
#MaxQueueDepth 128
# Set the maximum in-capsule data size. Must be a multiple of 16.
#InCapsuleDataSize 4096
# Set the maximum I/O size. Must be a multiple of 4096.
#MaxIOSize 131072
# Define an NVMf Subsystem.
# - NQN is required and must be unique.
# - Mode may be either "Direct" or "Virtual". Direct means that physical

View File

@ -68,6 +68,14 @@ struct spdk_nvmf_probe_ctx {
#define SPDK_NVMF_CONFIG_QUEUE_DEPTH_MIN 16
#define SPDK_NVMF_CONFIG_QUEUE_DEPTH_MAX 1024
#define SPDK_NVMF_CONFIG_IN_CAPSULE_DATA_SIZE_DEFAULT 131072
#define SPDK_NVMF_CONFIG_IN_CAPSULE_DATA_SIZE_MIN 4096
#define SPDK_NVMF_CONFIG_IN_CAPSULE_DATA_SIZE_MAX 131072
#define SPDK_NVMF_CONFIG_MAX_IO_SIZE_DEFAULT 131072
#define SPDK_NVMF_CONFIG_MAX_IO_SIZE_MIN 4096
#define SPDK_NVMF_CONFIG_MAX_IO_SIZE_MAX 131072
static int
spdk_add_nvmf_discovery_subsystem(void)
{
@ -89,6 +97,8 @@ spdk_nvmf_parse_nvmf_tgt(void)
struct spdk_conf_section *sp;
int max_queue_depth;
int max_queues_per_sess;
int in_capsule_data_size;
int max_io_size;
int rc;
sp = spdk_conf_find_section(NULL, "Nvmf");
@ -111,7 +121,27 @@ spdk_nvmf_parse_nvmf_tgt(void)
max_queues_per_sess = nvmf_max(max_queues_per_sess, SPDK_NVMF_CONFIG_QUEUES_PER_SESSION_MIN);
max_queues_per_sess = nvmf_min(max_queues_per_sess, SPDK_NVMF_CONFIG_QUEUES_PER_SESSION_MAX);
rc = nvmf_tgt_init(max_queue_depth, max_queues_per_sess);
in_capsule_data_size = spdk_conf_section_get_intval(sp, "InCapsuleDataSize");
if (in_capsule_data_size < 0) {
in_capsule_data_size = SPDK_NVMF_CONFIG_IN_CAPSULE_DATA_SIZE_DEFAULT;
} else if ((in_capsule_data_size % 16) != 0) {
SPDK_ERRLOG("InCapsuleDataSize must be a multiple of 16\n");
return -1;
}
in_capsule_data_size = nvmf_max(in_capsule_data_size, SPDK_NVMF_CONFIG_IN_CAPSULE_DATA_SIZE_MIN);
in_capsule_data_size = nvmf_min(in_capsule_data_size, SPDK_NVMF_CONFIG_IN_CAPSULE_DATA_SIZE_MAX);
max_io_size = spdk_conf_section_get_intval(sp, "MaxIOSize");
if (max_io_size < 0) {
max_io_size = SPDK_NVMF_CONFIG_MAX_IO_SIZE_DEFAULT;
} else if ((max_io_size % 4096) != 0) {
SPDK_ERRLOG("MaxIOSize must be a multiple of 4096\n");
return -1;
}
max_io_size = nvmf_max(max_io_size, SPDK_NVMF_CONFIG_MAX_IO_SIZE_MIN);
max_io_size = nvmf_min(max_io_size, SPDK_NVMF_CONFIG_MAX_IO_SIZE_MAX);
rc = nvmf_tgt_init(max_queue_depth, max_queues_per_sess, in_capsule_data_size, max_io_size);
if (rc != 0) {
SPDK_ERRLOG("nvmf_tgt_init() failed\n");
return rc;

View File

@ -116,12 +116,20 @@ spdk_nvmf_check_pools(void)
}
int
nvmf_tgt_init(int max_queue_depth, int max_queues_per_sess)
nvmf_tgt_init(uint16_t max_queue_depth, uint16_t max_queues_per_sess,
uint32_t in_capsule_data_size, uint32_t max_io_size)
{
int rc;
g_nvmf_tgt.max_queues_per_session = max_queues_per_sess;
g_nvmf_tgt.max_queue_depth = max_queue_depth;
g_nvmf_tgt.in_capsule_data_size = in_capsule_data_size;
g_nvmf_tgt.max_io_size = max_io_size;
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Max Queues Per Session: %d\n", max_queues_per_sess);
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Max Queue Depth: %d\n", max_queue_depth);
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Max In Capsule Data: %d bytes\n", in_capsule_data_size);
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Max I/O Size: %d bytes\n", max_io_size);
/* init nvmf specific config options */
if (!g_nvmf_tgt.sin_port) {

View File

@ -46,14 +46,6 @@
#define nvmf_min(a,b) (((a)<(b))?(a):(b))
#define nvmf_max(a,b) (((a)>(b))?(a):(b))
#define DEFAULT_BB_SIZE (128 * 1024)
/*
* NVMf target supports a maximum transfer size that is equal to
* a single allocated bounce buffer per request.
*/
#define SPDK_NVMF_MAX_RECV_DATA_TRANSFER_SIZE DEFAULT_BB_SIZE
#define SPDK_NVMF_DEFAULT_NUM_SESSIONS_PER_LCORE 1
#define SPDK_NVMF_DEFAULT_SIN_PORT ((uint16_t)4420)
@ -71,13 +63,27 @@
#define TRACE_NVMF_IO_COMPLETE SPDK_TPOINT_ID(TRACE_GROUP_NVMF, 0x8)
struct spdk_nvmf_globals {
int max_queue_depth;
int max_queues_per_session;
uint16_t max_queue_depth;
uint16_t max_queues_per_session;
uint32_t in_capsule_data_size;
uint32_t max_io_size;
uint16_t sin_port;
};
int nvmf_tgt_init(int max_queue_depth, int max_conn_per_sess);
int nvmf_tgt_init(uint16_t max_queue_depth, uint16_t max_conn_per_sess,
uint32_t in_capsule_data_size, uint32_t max_io_size);
static inline uint32_t
nvmf_u32log2(uint32_t x)
{
if (x == 0) {
/* __builtin_clz(0) is undefined, so just bail */
return 0;
}
return 31u - __builtin_clz(x);
}
extern struct spdk_nvmf_globals g_nvmf_tgt;

View File

@ -68,7 +68,7 @@
struct spdk_nvmf_rdma_request {
struct spdk_nvmf_request req;
/* Inline data buffer of size DEFAULT_BB_SIZE */
/* In Capsule data buffer */
void *buf;
};
@ -100,9 +100,9 @@ struct spdk_nvmf_rdma_conn {
union nvmf_c2h_msg *cpls;
struct ibv_mr *cpls_mr;
/* Array of size "queue_depth * DEFAULT_BB_SIZE" containing
* buffers to be used for inline data. TODO: Currently, all data
* is inline.
/* Array of size "queue_depth * InCapsuleDataSize" containing
* buffers to be used for in capsule data. TODO: Currently, all data
* is in capsule.
*/
void *bufs;
struct ibv_mr *bufs_mr;
@ -248,7 +248,7 @@ spdk_nvmf_rdma_conn_create(struct rdma_cm_id *id, uint16_t queue_depth)
rdma_conn->cpls = rte_calloc("nvmf_rdma_cpl", rdma_conn->queue_depth,
sizeof(*rdma_conn->cpls), 0);
rdma_conn->bufs = rte_calloc("nvmf_rdma_buf", rdma_conn->queue_depth,
DEFAULT_BB_SIZE, 0);
g_nvmf_tgt.in_capsule_data_size, 0);
if (!rdma_conn->reqs || !rdma_conn->cmds || !rdma_conn->cpls || !rdma_conn->bufs) {
SPDK_ERRLOG("Unable to allocate sufficient memory for RDMA queue.\n");
spdk_nvmf_rdma_conn_destroy(rdma_conn);
@ -260,7 +260,7 @@ spdk_nvmf_rdma_conn_create(struct rdma_cm_id *id, uint16_t queue_depth)
rdma_conn->cpls_mr = rdma_reg_msgs(rdma_conn->cm_id, rdma_conn->cpls,
queue_depth * sizeof(*rdma_conn->cpls));
rdma_conn->bufs_mr = rdma_reg_msgs(rdma_conn->cm_id, rdma_conn->bufs,
rdma_conn->queue_depth * DEFAULT_BB_SIZE);
rdma_conn->queue_depth * g_nvmf_tgt.in_capsule_data_size);
if (!rdma_conn->cmds_mr || !rdma_conn->cpls_mr || !rdma_conn->bufs_mr) {
SPDK_ERRLOG("Unable to register required memory for RDMA queue.\n");
spdk_nvmf_rdma_conn_destroy(rdma_conn);
@ -270,7 +270,7 @@ spdk_nvmf_rdma_conn_create(struct rdma_cm_id *id, uint16_t queue_depth)
for (i = 0; i < queue_depth; i++) {
rdma_req = &rdma_conn->reqs[i];
rdma_req->buf = (void *)((uintptr_t)rdma_conn->bufs + (i * DEFAULT_BB_SIZE));
rdma_req->buf = (void *)((uintptr_t)rdma_conn->bufs + (i * g_nvmf_tgt.in_capsule_data_size));
rdma_req->req.cmd = &rdma_conn->cmds[i];
rdma_req->req.rsp = &rdma_conn->cpls[i];
rdma_req->req.conn = &rdma_conn->conn;
@ -404,7 +404,7 @@ nvmf_post_rdma_recv(struct spdk_nvmf_request *req)
nvmf_trace_ibv_sge(&sg_list[0]);
sg_list[1].addr = (uintptr_t)rdma_req->buf;
sg_list[1].length = DEFAULT_BB_SIZE;
sg_list[1].length = g_nvmf_tgt.in_capsule_data_size;
sg_list[1].lkey = rdma_conn->bufs_mr->lkey;
nvmf_trace_ibv_sge(&sg_list[1]);
@ -1088,7 +1088,7 @@ spdk_nvmf_rdma_poll(struct spdk_nvmf_conn *conn)
rdma_req->buf,
wc.byte_len - sizeof(struct spdk_nvmf_capsule_cmd),
rdma_req->buf,
DEFAULT_BB_SIZE);
g_nvmf_tgt.max_io_size);
if (rc < 0) {
SPDK_ERRLOG("prep_data failed\n");
return spdk_nvmf_request_complete(req);

View File

@ -85,6 +85,8 @@ nvmf_init_nvme_session_properties(struct nvmf_session *session)
{
const struct spdk_nvme_ctrlr_data *cdata;
assert((g_nvmf_tgt.max_io_size % 4096) == 0);
/*
Here we are going to initialize the features, properties, and
identify controller details for the virtual controller associated
@ -99,7 +101,7 @@ nvmf_init_nvme_session_properties(struct nvmf_session *session)
session->vcdata.cntlid = 0;
session->vcdata.kas = 10;
session->vcdata.maxcmd = g_nvmf_tgt.max_queue_depth;
session->vcdata.mdts = SPDK_NVMF_MAX_RECV_DATA_TRANSFER_SIZE / 4096;
session->vcdata.mdts = nvmf_u32log2(g_nvmf_tgt.max_io_size / 4096);
session->vcdata.sgls.keyed_sgl = 1;
session->vcdata.sgls.sgl_offset = 1;
@ -110,7 +112,7 @@ nvmf_init_nvme_session_properties(struct nvmf_session *session)
session->vcdata.nvmf_specific.msdbd = 1; /* target supports single SGL in capsule */
/* TODO: this should be set by the transport */
session->vcdata.nvmf_specific.ioccsz += SPDK_NVMF_MAX_RECV_DATA_TRANSFER_SIZE / 16;
session->vcdata.nvmf_specific.ioccsz += g_nvmf_tgt.max_io_size / 16;
SPDK_TRACELOG(SPDK_TRACE_NVMF, " ctrlr data: maxcmd %x\n",
session->vcdata.maxcmd);