nvmf: add the transport shared buffer num configuration option.
Previously, we allocate the buffer size according to the MaxQueueDepth info, however this is not exactly a good way for customers to configure, we should provided a shared buffer number configuration for the transport. Change-Id: Ic6ff83076a65e77ec7376688ffb3737fd899057c Signed-off-by: Ziye Yang <optimistyzy@gmail.com> Reviewed-on: https://review.gerrithub.io/437450 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
parent
3947bc2492
commit
58f1624497
@ -42,6 +42,11 @@ prior to calling `spdk_nvmf_tgt_listen`.
|
||||
Related to the previous change, the rpc `set_nvmf_target_options` has been renamed to
|
||||
`set_nvmf_target_max_subsystems` to indicate that this is the only target option available for the user to edit.
|
||||
|
||||
Add an field `num_shared_buffers` in struct spdk_nvmf_transport_opts,
|
||||
and also update the related rpc function nvmf_create_transport, to make this
|
||||
configurable parameter available to users. The `num_shared_buffers` is used to
|
||||
configure the shared buffer numbers of the transport used by RDMA or TCP transport.
|
||||
|
||||
### nvmf
|
||||
|
||||
Add a new TCP/IP transport (located in lib/nvmf/tcp.c). With this tranport,
|
||||
|
@ -90,6 +90,10 @@
|
||||
# Set the maximum number of IO for admin queue
|
||||
#MaxAQDepth 32
|
||||
|
||||
# Set the number of pooled data buffers available to the transport
|
||||
# It is used to provide the read/write data buffers for the qpairs on this transport.
|
||||
#NumSharedBuffers 512
|
||||
|
||||
[Nvme]
|
||||
# NVMe Device Whitelist
|
||||
# Users may specify which NVMe devices to claim by their transport id.
|
||||
|
@ -70,6 +70,7 @@ struct spdk_nvmf_transport_opts {
|
||||
uint32_t max_io_size;
|
||||
uint32_t io_unit_size;
|
||||
uint32_t max_aq_depth;
|
||||
uint32_t num_shared_buffers;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -520,6 +520,10 @@ spdk_nvmf_parse_transport(struct spdk_nvmf_parse_transport_ctx *ctx)
|
||||
if (val >= 0) {
|
||||
opts.max_aq_depth = val;
|
||||
}
|
||||
val = spdk_conf_section_get_intval(ctx->sp, "NumSharedBuffers");
|
||||
if (val >= 0) {
|
||||
opts.num_shared_buffers = val;
|
||||
}
|
||||
|
||||
transport = spdk_nvmf_transport_create(trtype, &opts);
|
||||
if (transport) {
|
||||
|
@ -1450,6 +1450,10 @@ static const struct spdk_json_object_decoder nvmf_rpc_create_transport_decoder[]
|
||||
"max_aq_depth", offsetof(struct nvmf_rpc_create_transport_ctx, opts.max_aq_depth),
|
||||
spdk_json_decode_uint32, true
|
||||
},
|
||||
{
|
||||
"num_shared_buffers", offsetof(struct nvmf_rpc_create_transport_ctx, opts.num_shared_buffers),
|
||||
spdk_json_decode_uint32, true
|
||||
},
|
||||
};
|
||||
|
||||
static void
|
||||
@ -1581,6 +1585,7 @@ dump_nvmf_transport(struct spdk_json_write_ctx *w, struct spdk_nvmf_transport *t
|
||||
spdk_json_write_named_uint32(w, "max_io_size", opts->max_io_size);
|
||||
spdk_json_write_named_uint32(w, "io_unit_size", opts->io_unit_size);
|
||||
spdk_json_write_named_uint32(w, "max_aq_depth", opts->max_aq_depth);
|
||||
spdk_json_write_named_uint32(w, "num_shared_buffers", opts->num_shared_buffers);
|
||||
|
||||
spdk_json_write_object_end(w);
|
||||
}
|
||||
|
@ -1524,18 +1524,20 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
|
||||
#define SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE 4096
|
||||
#define SPDK_NVMF_RDMA_DEFAULT_MAX_IO_SIZE 131072
|
||||
#define SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE 4096
|
||||
#define SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS 512
|
||||
#define SPDK_NVMF_RDMA_DEFAULT_IO_BUFFER_SIZE (SPDK_NVMF_RDMA_DEFAULT_MAX_IO_SIZE / SPDK_NVMF_MAX_SGL_ENTRIES)
|
||||
|
||||
static void
|
||||
spdk_nvmf_rdma_opts_init(struct spdk_nvmf_transport_opts *opts)
|
||||
{
|
||||
opts->max_queue_depth = SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH;
|
||||
opts->max_qpairs_per_ctrlr = SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR;
|
||||
opts->in_capsule_data_size = SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE;
|
||||
opts->max_io_size = SPDK_NVMF_RDMA_DEFAULT_MAX_IO_SIZE;
|
||||
opts->io_unit_size = spdk_max(SPDK_NVMF_RDMA_DEFAULT_IO_BUFFER_SIZE,
|
||||
SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE);
|
||||
opts->max_aq_depth = SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH;
|
||||
opts->max_queue_depth = SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH;
|
||||
opts->max_qpairs_per_ctrlr = SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR;
|
||||
opts->in_capsule_data_size = SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE;
|
||||
opts->max_io_size = SPDK_NVMF_RDMA_DEFAULT_MAX_IO_SIZE;
|
||||
opts->io_unit_size = spdk_max(SPDK_NVMF_RDMA_DEFAULT_IO_BUFFER_SIZE,
|
||||
SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE);
|
||||
opts->max_aq_depth = SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH;
|
||||
opts->num_shared_buffers = SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS;
|
||||
}
|
||||
|
||||
static int spdk_nvmf_rdma_destroy(struct spdk_nvmf_transport *transport);
|
||||
@ -1580,13 +1582,15 @@ spdk_nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
|
||||
SPDK_INFOLOG(SPDK_LOG_RDMA, "*** RDMA Transport Init ***\n"
|
||||
" Transport opts: max_ioq_depth=%d, max_io_size=%d,\n"
|
||||
" max_qpairs_per_ctrlr=%d, io_unit_size=%d,\n"
|
||||
" in_capsule_data_size=%d, max_aq_depth=%d\n",
|
||||
" in_capsule_data_size=%d, max_aq_depth=%d\n"
|
||||
" num_shared_buffers=%d\n",
|
||||
opts->max_queue_depth,
|
||||
opts->max_io_size,
|
||||
opts->max_qpairs_per_ctrlr,
|
||||
opts->io_unit_size,
|
||||
opts->in_capsule_data_size,
|
||||
opts->max_aq_depth);
|
||||
opts->max_aq_depth,
|
||||
opts->num_shared_buffers);
|
||||
|
||||
/* I/O unit size cannot be larger than max I/O size */
|
||||
if (opts->io_unit_size > opts->max_io_size) {
|
||||
@ -1615,9 +1619,8 @@ spdk_nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* The maximum number of buffers we will need for a given request is equal to just less than double the number of SGL elements */
|
||||
rtransport->data_buf_pool = spdk_mempool_create("spdk_nvmf_rdma",
|
||||
opts->max_queue_depth * (SPDK_NVMF_MAX_SGL_ENTRIES * 2) * 4,
|
||||
opts->num_shared_buffers * (SPDK_NVMF_MAX_SGL_ENTRIES * 2),
|
||||
opts->io_unit_size + NVMF_DATA_BUFFER_ALIGNMENT,
|
||||
SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
|
||||
SPDK_ENV_SOCKET_ID_ANY);
|
||||
@ -1767,10 +1770,10 @@ spdk_nvmf_rdma_destroy(struct spdk_nvmf_transport *transport)
|
||||
|
||||
if (rtransport->data_buf_pool != NULL) {
|
||||
if (spdk_mempool_count(rtransport->data_buf_pool) !=
|
||||
(transport->opts.max_queue_depth * (SPDK_NVMF_MAX_SGL_ENTRIES * 2) * 4)) {
|
||||
transport->opts.num_shared_buffers * (SPDK_NVMF_MAX_SGL_ENTRIES * 2)) {
|
||||
SPDK_ERRLOG("transport buffer pool count is %zu but should be %u\n",
|
||||
spdk_mempool_count(rtransport->data_buf_pool),
|
||||
transport->opts.max_queue_depth * (SPDK_NVMF_MAX_SGL_ENTRIES * 2) * 4);
|
||||
transport->opts.num_shared_buffers * (SPDK_NVMF_MAX_SGL_ENTRIES * 2));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -547,13 +547,15 @@ spdk_nvmf_tcp_create(struct spdk_nvmf_transport_opts *opts)
|
||||
SPDK_INFOLOG(SPDK_LOG_NVMF_TCP, "*** TCP Transport Init ***\n"
|
||||
" Transport opts: max_ioq_depth=%d, max_io_size=%d,\n"
|
||||
" max_qpairs_per_ctrlr=%d, io_unit_size=%d,\n"
|
||||
" in_capsule_data_size=%d, max_aq_depth=%d\n",
|
||||
" in_capsule_data_size=%d, max_aq_depth=%d\n"
|
||||
" num_shared_buffers=%d\n",
|
||||
opts->max_queue_depth,
|
||||
opts->max_io_size,
|
||||
opts->max_qpairs_per_ctrlr,
|
||||
opts->io_unit_size,
|
||||
opts->in_capsule_data_size,
|
||||
opts->max_aq_depth);
|
||||
opts->max_aq_depth,
|
||||
opts->num_shared_buffers);
|
||||
|
||||
/* I/O unit size cannot be larger than max I/O size */
|
||||
if (opts->io_unit_size > opts->max_io_size) {
|
||||
@ -568,7 +570,7 @@ spdk_nvmf_tcp_create(struct spdk_nvmf_transport_opts *opts)
|
||||
}
|
||||
|
||||
ttransport->data_buf_pool = spdk_mempool_create("spdk_nvmf_tcp_data",
|
||||
opts->max_queue_depth * 4, /* The 4 is arbitrarily chosen. Needs to be configurable. */
|
||||
opts->num_shared_buffers,
|
||||
opts->max_io_size + NVMF_DATA_BUFFER_ALIGNMENT,
|
||||
SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
|
||||
SPDK_ENV_SOCKET_ID_ANY);
|
||||
@ -596,10 +598,10 @@ spdk_nvmf_tcp_destroy(struct spdk_nvmf_transport *transport)
|
||||
assert(transport != NULL);
|
||||
ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
|
||||
|
||||
if (spdk_mempool_count(ttransport->data_buf_pool) != (transport->opts.max_queue_depth * 4)) {
|
||||
if (spdk_mempool_count(ttransport->data_buf_pool) != (transport->opts.num_shared_buffers)) {
|
||||
SPDK_ERRLOG("transport buffer pool count is %zu but should be %u\n",
|
||||
spdk_mempool_count(ttransport->data_buf_pool),
|
||||
transport->opts.max_queue_depth * 4);
|
||||
transport->opts.num_shared_buffers);
|
||||
}
|
||||
|
||||
spdk_mempool_free(ttransport->data_buf_pool);
|
||||
@ -2850,16 +2852,18 @@ spdk_nvmf_tcp_qpair_set_sq_size(struct spdk_nvmf_qpair *qpair)
|
||||
#define SPDK_NVMF_TCP_DEFAULT_IN_CAPSULE_DATA_SIZE 4096
|
||||
#define SPDK_NVMF_TCP_DEFAULT_MAX_IO_SIZE 131072
|
||||
#define SPDK_NVMF_TCP_DEFAULT_IO_UNIT_SIZE 131072
|
||||
#define SPDK_NVMF_TCP_DEFAULT_NUM_SHARED_BUFFERS 512
|
||||
|
||||
static void
|
||||
spdk_nvmf_tcp_opts_init(struct spdk_nvmf_transport_opts *opts)
|
||||
{
|
||||
opts->max_queue_depth = SPDK_NVMF_TCP_DEFAULT_MAX_QUEUE_DEPTH;
|
||||
opts->max_qpairs_per_ctrlr = SPDK_NVMF_TCP_DEFAULT_MAX_QPAIRS_PER_CTRLR;
|
||||
opts->in_capsule_data_size = SPDK_NVMF_TCP_DEFAULT_IN_CAPSULE_DATA_SIZE;
|
||||
opts->max_io_size = SPDK_NVMF_TCP_DEFAULT_MAX_IO_SIZE;
|
||||
opts->io_unit_size = SPDK_NVMF_TCP_DEFAULT_IO_UNIT_SIZE;
|
||||
opts->max_aq_depth = SPDK_NVMF_TCP_DEFAULT_AQ_DEPTH;
|
||||
opts->max_queue_depth = SPDK_NVMF_TCP_DEFAULT_MAX_QUEUE_DEPTH;
|
||||
opts->max_qpairs_per_ctrlr = SPDK_NVMF_TCP_DEFAULT_MAX_QPAIRS_PER_CTRLR;
|
||||
opts->in_capsule_data_size = SPDK_NVMF_TCP_DEFAULT_IN_CAPSULE_DATA_SIZE;
|
||||
opts->max_io_size = SPDK_NVMF_TCP_DEFAULT_MAX_IO_SIZE;
|
||||
opts->io_unit_size = SPDK_NVMF_TCP_DEFAULT_IO_UNIT_SIZE;
|
||||
opts->max_aq_depth = SPDK_NVMF_TCP_DEFAULT_AQ_DEPTH;
|
||||
opts->num_shared_buffers = SPDK_NVMF_TCP_DEFAULT_NUM_SHARED_BUFFERS;
|
||||
}
|
||||
|
||||
const struct spdk_nvmf_transport_ops spdk_nvmf_transport_tcp = {
|
||||
|
@ -1279,7 +1279,8 @@ Format: 'user:u1 secret:s1 muser:mu1 msecret:ms1,user:u2 secret:s2 muser:mu2 mse
|
||||
in_capsule_data_size=args.in_capsule_data_size,
|
||||
max_io_size=args.max_io_size,
|
||||
io_unit_size=args.io_unit_size,
|
||||
max_aq_depth=args.max_aq_depth)
|
||||
max_aq_depth=args.max_aq_depth,
|
||||
num_shared_buffers=args.num_shared_buffers)
|
||||
|
||||
p = subparsers.add_parser('nvmf_create_transport', help='Create NVMf transport')
|
||||
p.add_argument('-t', '--trtype', help='Transport type (ex. RDMA)', type=str, required=True)
|
||||
@ -1289,6 +1290,7 @@ Format: 'user:u1 secret:s1 muser:mu1 msecret:ms1,user:u2 secret:s2 muser:mu2 mse
|
||||
p.add_argument('-i', '--max-io-size', help='Max I/O size (bytes)', type=int)
|
||||
p.add_argument('-u', '--io-unit-size', help='I/O unit size (bytes)', type=int)
|
||||
p.add_argument('-a', '--max-aq-depth', help='Max number of admin cmds per AQ', type=int)
|
||||
p.add_argument('-n', '--num-shared-buffers', help='The number of pooled data buffers available to the transport', type=int)
|
||||
p.set_defaults(func=nvmf_create_transport)
|
||||
|
||||
def get_nvmf_transports(args):
|
||||
|
@ -81,7 +81,8 @@ def nvmf_create_transport(client,
|
||||
in_capsule_data_size=None,
|
||||
max_io_size=None,
|
||||
io_unit_size=None,
|
||||
max_aq_depth=None):
|
||||
max_aq_depth=None,
|
||||
num_shared_buffers=None):
|
||||
"""NVMf Transport Create options.
|
||||
|
||||
Args:
|
||||
@ -92,6 +93,7 @@ def nvmf_create_transport(client,
|
||||
max_io_size: Maximum I/O data size in bytes (optional)
|
||||
io_unit_size: I/O unit size in bytes (optional)
|
||||
max_aq_depth: Max size admin quque per controller (optional)
|
||||
num_shared_buffers: The number of pooled data buffers available to the transport (optional)
|
||||
|
||||
Returns:
|
||||
True or False
|
||||
@ -111,6 +113,8 @@ def nvmf_create_transport(client,
|
||||
params['io_unit_size'] = io_unit_size
|
||||
if max_aq_depth:
|
||||
params['max_aq_depth'] = max_aq_depth
|
||||
if num_shared_buffers:
|
||||
params['num_shared_buffers'] = num_shared_buffers
|
||||
return client.call('nvmf_create_transport', params)
|
||||
|
||||
|
||||
|
@ -48,6 +48,7 @@ struct spdk_nvmf_transport_opts g_rdma_ut_transport_opts = {
|
||||
.max_io_size = (SPDK_NVMF_RDMA_DEFAULT_IO_BUFFER_SIZE * RDMA_UT_UNITS_IN_MAX_IO),
|
||||
.io_unit_size = SPDK_NVMF_RDMA_DEFAULT_IO_BUFFER_SIZE,
|
||||
.max_aq_depth = SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH,
|
||||
.num_shared_buffers = SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS,
|
||||
};
|
||||
|
||||
SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF)
|
||||
|
Loading…
Reference in New Issue
Block a user