rpc: Add new parameter 'control_msg_num' to 'nvmf_create_transport'

This parameter represents the number of control messages to be
allocated per poll group, specific for TCP transport.
The new parameter can't be zero.

Change-Id: I8ae198c0b46e9a5850a80492aa6260f0c6ef885e
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/4829
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Mellanox Build Bot
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
This commit is contained in:
Alexey Marchuk 2020-10-22 16:33:30 +03:00 committed by Tomasz Zawadzki
parent 85fa43241b
commit 4fe47d6ff4
4 changed files with 32 additions and 6 deletions

View File

@ -157,7 +157,8 @@ New optional parameters, `enable_placement_id` and `enable_quickack` were added
A new RPC `bdev_examine_bdev` was added to allow users to examine a bdev explicitly. A new RPC `bdev_examine_bdev` was added to allow users to examine a bdev explicitly.
It can be used only if bdev_auto_examine is set to false by the RPC `bdev_set_options`. It can be used only if bdev_auto_examine is set to false by the RPC `bdev_set_options`.
Add optional 'no_wr_batching' parameter to 'nvmf_create_transport' RPC method. New optional parameters `no_wr_batching` and `control_msg_num` were added to the RPC
'nvmf_create_transport'.
New RPCs, `iscsi_target_node_set_redirect` and `iscsi_target_node_request_logout`, have New RPCs, `iscsi_target_node_set_redirect` and `iscsi_target_node_request_logout`, have
been added, and a new optional parameter `private` has been added to the RPC been added, and a new optional parameter `private` has been added to the RPC

View File

@ -51,6 +51,7 @@
#define NVMF_TCP_MAX_ACCEPT_SOCK_ONE_TIME 16 #define NVMF_TCP_MAX_ACCEPT_SOCK_ONE_TIME 16
#define SPDK_NVMF_TCP_DEFAULT_MAX_SOCK_PRIORITY 16 #define SPDK_NVMF_TCP_DEFAULT_MAX_SOCK_PRIORITY 16
#define SPDK_NVMF_TCP_DEFAULT_SOCK_PRIORITY 0 #define SPDK_NVMF_TCP_DEFAULT_SOCK_PRIORITY 0
#define SPDK_NVMF_TCP_DEFAULT_CONTROL_MSG_NUM 32
#define SPDK_NVMF_TCP_DEFAULT_SUCCESS_OPTIMIZATION true #define SPDK_NVMF_TCP_DEFAULT_SUCCESS_OPTIMIZATION true
const struct spdk_nvmf_transport_ops spdk_nvmf_transport_tcp; const struct spdk_nvmf_transport_ops spdk_nvmf_transport_tcp;
@ -281,6 +282,7 @@ struct spdk_nvmf_tcp_port {
struct tcp_transport_opts { struct tcp_transport_opts {
bool c2h_success; bool c2h_success;
uint16_t control_msg_num;
uint32_t sock_priority; uint32_t sock_priority;
}; };
@ -298,6 +300,10 @@ static const struct spdk_json_object_decoder tcp_transport_opts_decoder[] = {
"c2h_success", offsetof(struct tcp_transport_opts, c2h_success), "c2h_success", offsetof(struct tcp_transport_opts, c2h_success),
spdk_json_decode_bool, true spdk_json_decode_bool, true
}, },
{
"control_msg_num", offsetof(struct tcp_transport_opts, control_msg_num),
spdk_json_decode_uint16, true
},
{ {
"sock_priority", offsetof(struct tcp_transport_opts, sock_priority), "sock_priority", offsetof(struct tcp_transport_opts, sock_priority),
spdk_json_decode_uint32, true spdk_json_decode_uint32, true
@ -510,6 +516,7 @@ nvmf_tcp_create(struct spdk_nvmf_transport_opts *opts)
ttransport->tcp_opts.c2h_success = SPDK_NVMF_TCP_DEFAULT_SUCCESS_OPTIMIZATION; ttransport->tcp_opts.c2h_success = SPDK_NVMF_TCP_DEFAULT_SUCCESS_OPTIMIZATION;
ttransport->tcp_opts.sock_priority = SPDK_NVMF_TCP_DEFAULT_SOCK_PRIORITY; ttransport->tcp_opts.sock_priority = SPDK_NVMF_TCP_DEFAULT_SOCK_PRIORITY;
ttransport->tcp_opts.control_msg_num = SPDK_NVMF_TCP_DEFAULT_CONTROL_MSG_NUM;
if (opts->transport_specific != NULL && if (opts->transport_specific != NULL &&
spdk_json_decode_object_relaxed(opts->transport_specific, tcp_transport_opts_decoder, spdk_json_decode_object_relaxed(opts->transport_specific, tcp_transport_opts_decoder,
SPDK_COUNTOF(tcp_transport_opts_decoder), SPDK_COUNTOF(tcp_transport_opts_decoder),
@ -527,7 +534,7 @@ nvmf_tcp_create(struct spdk_nvmf_transport_opts *opts)
" in_capsule_data_size=%d, max_aq_depth=%d\n" " in_capsule_data_size=%d, max_aq_depth=%d\n"
" num_shared_buffers=%d, c2h_success=%d,\n" " num_shared_buffers=%d, c2h_success=%d,\n"
" dif_insert_or_strip=%d, sock_priority=%d\n" " dif_insert_or_strip=%d, sock_priority=%d\n"
" abort_timeout_sec=%d\n", " abort_timeout_sec=%d, control_msg_num=%hu\n",
opts->max_queue_depth, opts->max_queue_depth,
opts->max_io_size, opts->max_io_size,
opts->max_qpairs_per_ctrlr - 1, opts->max_qpairs_per_ctrlr - 1,
@ -538,7 +545,8 @@ nvmf_tcp_create(struct spdk_nvmf_transport_opts *opts)
ttransport->tcp_opts.c2h_success, ttransport->tcp_opts.c2h_success,
opts->dif_insert_or_strip, opts->dif_insert_or_strip,
ttransport->tcp_opts.sock_priority, ttransport->tcp_opts.sock_priority,
opts->abort_timeout_sec); opts->abort_timeout_sec,
ttransport->tcp_opts.control_msg_num);
if (ttransport->tcp_opts.sock_priority > SPDK_NVMF_TCP_DEFAULT_MAX_SOCK_PRIORITY) { if (ttransport->tcp_opts.sock_priority > SPDK_NVMF_TCP_DEFAULT_MAX_SOCK_PRIORITY) {
SPDK_ERRLOG("Unsupported socket_priority=%d, the current range is: 0 to %d\n" SPDK_ERRLOG("Unsupported socket_priority=%d, the current range is: 0 to %d\n"
@ -548,6 +556,13 @@ nvmf_tcp_create(struct spdk_nvmf_transport_opts *opts)
return NULL; return NULL;
} }
if (ttransport->tcp_opts.control_msg_num == 0 &&
opts->in_capsule_data_size < SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE) {
SPDK_WARNLOG("TCP param control_msg_num can't be 0 if ICD is less than %u bytes. Using default value %u\n",
SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE, SPDK_NVMF_TCP_DEFAULT_CONTROL_MSG_NUM);
ttransport->tcp_opts.control_msg_num = SPDK_NVMF_TCP_DEFAULT_CONTROL_MSG_NUM;
}
/* I/O unit size cannot be larger than max I/O size */ /* I/O unit size cannot be larger than max I/O size */
if (opts->io_unit_size > opts->max_io_size) { if (opts->io_unit_size > opts->max_io_size) {
opts->io_unit_size = opts->max_io_size; opts->io_unit_size = opts->max_io_size;
@ -1059,6 +1074,7 @@ nvmf_tcp_control_msg_list_free(struct spdk_nvmf_tcp_control_msg_list *list)
static struct spdk_nvmf_transport_poll_group * static struct spdk_nvmf_transport_poll_group *
nvmf_tcp_poll_group_create(struct spdk_nvmf_transport *transport) nvmf_tcp_poll_group_create(struct spdk_nvmf_transport *transport)
{ {
struct spdk_nvmf_tcp_transport *ttransport;
struct spdk_nvmf_tcp_poll_group *tgroup; struct spdk_nvmf_tcp_poll_group *tgroup;
tgroup = calloc(1, sizeof(*tgroup)); tgroup = calloc(1, sizeof(*tgroup));
@ -1074,11 +1090,13 @@ nvmf_tcp_poll_group_create(struct spdk_nvmf_transport *transport)
TAILQ_INIT(&tgroup->qpairs); TAILQ_INIT(&tgroup->qpairs);
TAILQ_INIT(&tgroup->await_req); TAILQ_INIT(&tgroup->await_req);
ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
if (transport->opts.in_capsule_data_size < SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE) { if (transport->opts.in_capsule_data_size < SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE) {
SPDK_DEBUGLOG(nvmf_tcp, "ICD %u is less than min required for admin/fabric commands (%u). " SPDK_DEBUGLOG(nvmf_tcp, "ICD %u is less than min required for admin/fabric commands (%u). "
"Creating control messages list\n", transport->opts.in_capsule_data_size, "Creating control messages list\n", transport->opts.in_capsule_data_size,
SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE); SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE);
tgroup->control_msg_list = nvmf_tcp_control_msg_list_create(32); tgroup->control_msg_list = nvmf_tcp_control_msg_list_create(ttransport->tcp_opts.control_msg_num);
if (!tgroup->control_msg_list) { if (!tgroup->control_msg_list) {
goto cleanup; goto cleanup;
} }

View File

@ -1815,7 +1815,8 @@ Format: 'user:u1 secret:s1 muser:mu1 msecret:ms1,user:u2 secret:s2 muser:mu2 mse
sock_priority=args.sock_priority, sock_priority=args.sock_priority,
acceptor_backlog=args.acceptor_backlog, acceptor_backlog=args.acceptor_backlog,
abort_timeout_sec=args.abort_timeout_sec, abort_timeout_sec=args.abort_timeout_sec,
no_wr_batching=args.no_wr_batching) no_wr_batching=args.no_wr_batching,
control_msg_num=args.control_msg_num)
p = subparsers.add_parser('nvmf_create_transport', help='Create NVMf transport') p = subparsers.add_parser('nvmf_create_transport', help='Create NVMf transport')
p.add_argument('-t', '--trtype', help='Transport type (ex. RDMA)', type=str, required=True) p.add_argument('-t', '--trtype', help='Transport type (ex. RDMA)', type=str, required=True)
@ -1838,6 +1839,8 @@ Format: 'user:u1 secret:s1 muser:mu1 msecret:ms1,user:u2 secret:s2 muser:mu2 mse
p.add_argument('-l', '--acceptor_backlog', help='Pending connections allowed at one time. Relevant only for RDMA transport', type=int) p.add_argument('-l', '--acceptor_backlog', help='Pending connections allowed at one time. Relevant only for RDMA transport', type=int)
p.add_argument('-x', '--abort-timeout-sec', help='Abort execution timeout value, in seconds', type=int) p.add_argument('-x', '--abort-timeout-sec', help='Abort execution timeout value, in seconds', type=int)
p.add_argument('-w', '--no-wr-batching', action='store_true', help='Disable work requests batching. Relevant only for RDMA transport') p.add_argument('-w', '--no-wr-batching', action='store_true', help='Disable work requests batching. Relevant only for RDMA transport')
p.add_argument('-e', '--control_msg_num', help="""The number of control messages per poll group.
Relevant only for TCP transport""", type=int)
p.set_defaults(func=nvmf_create_transport) p.set_defaults(func=nvmf_create_transport)
def nvmf_get_transports(args): def nvmf_get_transports(args):

View File

@ -110,7 +110,8 @@ def nvmf_create_transport(client,
sock_priority=None, sock_priority=None,
acceptor_backlog=None, acceptor_backlog=None,
abort_timeout_sec=None, abort_timeout_sec=None,
no_wr_batching=None): no_wr_batching=None,
control_msg_num=None):
"""NVMf Transport Create options. """NVMf Transport Create options.
Args: Args:
@ -131,6 +132,7 @@ def nvmf_create_transport(client,
acceptor_backlog: Pending connections allowed at one time - RDMA specific (optional) acceptor_backlog: Pending connections allowed at one time - RDMA specific (optional)
abort_timeout_sec: Abort execution timeout value, in seconds (optional) abort_timeout_sec: Abort execution timeout value, in seconds (optional)
no_wr_batching: Boolean flag to disable work requests batching - RDMA specific (optional) no_wr_batching: Boolean flag to disable work requests batching - RDMA specific (optional)
control_msg_num: The number of control messages per poll group - TCP specific (optional)
Returns: Returns:
True or False True or False
""" """
@ -174,6 +176,8 @@ def nvmf_create_transport(client,
params['abort_timeout_sec'] = abort_timeout_sec params['abort_timeout_sec'] = abort_timeout_sec
if no_wr_batching is not None: if no_wr_batching is not None:
params['no_wr_batching'] = no_wr_batching params['no_wr_batching'] = no_wr_batching
if control_msg_num is not None:
params['control_msg_num'] = control_msg_num
return client.call('nvmf_create_transport', params) return client.call('nvmf_create_transport', params)