nvmf: Allow users to configure which lcore each subsystem runs on

Users can specify the core for each subsystem and the acceptor listen routine
to run on different cores for performance consideration.

Change-Id: I4bd1a96f39194c870863b4b778e6ea7cf8fc1a2d
Signed-off-by: Changpeng Liu <changpeng.liu@intel.com>
This commit is contained in:
Changpeng Liu 2016-08-16 14:22:51 +08:00 committed by Daniel Verkamp
parent 077fe1da65
commit 8a23223e1b
5 changed files with 35 additions and 11 deletions

View File

@ -39,8 +39,14 @@
# Set the maximum I/O size. Must be a multiple of 4096. # Set the maximum I/O size. Must be a multiple of 4096.
#MaxIOSize 131072 #MaxIOSize 131072
# Set the global acceptor lcore ID, lcores are numbered starting at 0.
#AcceptorCore 0
# Define an NVMf Subsystem. # Define an NVMf Subsystem.
# - NQN is required and must be unique. # - NQN is required and must be unique.
# - Core may be set or not. If set, the specified subsystem will run on
# it, otherwise each subsystem will use a round-robin method to allocate
# core from available cores, lcores are numbered starting at 0.
# - Mode may be either "Direct" or "Virtual". Direct means that physical # - Mode may be either "Direct" or "Virtual". Direct means that physical
# devices attached to the target will be presented to hosts as if they # devices attached to the target will be presented to hosts as if they
# were directly attached to the host. No software emulation or command # were directly attached to the host. No software emulation or command
@ -59,6 +65,7 @@
# any PCI device. # any PCI device.
[Subsystem1] [Subsystem1]
NQN nqn.2016-06.io.spdk:cnode1 NQN nqn.2016-06.io.spdk:cnode1
Core 0
Mode Direct Mode Direct
Listen RDMA 15.15.15.2:4420 Listen RDMA 15.15.15.2:4420
Host nqn.2016-06.io.spdk:init Host nqn.2016-06.io.spdk:init
@ -67,6 +74,7 @@
# Multiple subsystems are allowed. # Multiple subsystems are allowed.
[Subsystem2] [Subsystem2]
NQN nqn.2016-06.io.spdk:cnode2 NQN nqn.2016-06.io.spdk:cnode2
Core 0
Mode Direct Mode Direct
Listen RDMA 192.168.2.21:4420 Listen RDMA 192.168.2.21:4420
Host nqn.2016-06.io.spdk:init Host nqn.2016-06.io.spdk:init

View File

@ -100,6 +100,7 @@ spdk_nvmf_parse_nvmf_tgt(void)
int max_queues_per_sess; int max_queues_per_sess;
int in_capsule_data_size; int in_capsule_data_size;
int max_io_size; int max_io_size;
int acceptor_lcore;
int rc; int rc;
sp = spdk_conf_find_section(NULL, "Nvmf"); sp = spdk_conf_find_section(NULL, "Nvmf");
@ -142,7 +143,13 @@ spdk_nvmf_parse_nvmf_tgt(void)
max_io_size = nvmf_max(max_io_size, SPDK_NVMF_CONFIG_MAX_IO_SIZE_MIN); max_io_size = nvmf_max(max_io_size, SPDK_NVMF_CONFIG_MAX_IO_SIZE_MIN);
max_io_size = nvmf_min(max_io_size, SPDK_NVMF_CONFIG_MAX_IO_SIZE_MAX); max_io_size = nvmf_min(max_io_size, SPDK_NVMF_CONFIG_MAX_IO_SIZE_MAX);
rc = nvmf_tgt_init(max_queue_depth, max_queues_per_sess, in_capsule_data_size, max_io_size); acceptor_lcore = spdk_conf_section_get_intval(sp, "AcceptorCore");
if (acceptor_lcore < 0) {
acceptor_lcore = rte_lcore_id();
}
rc = nvmf_tgt_init(max_queue_depth, max_queues_per_sess, in_capsule_data_size, max_io_size,
acceptor_lcore);
if (rc != 0) { if (rc != 0) {
SPDK_ERRLOG("nvmf_tgt_init() failed\n"); SPDK_ERRLOG("nvmf_tgt_init() failed\n");
return rc; return rc;
@ -360,7 +367,7 @@ spdk_nvmf_parse_subsystem(struct spdk_conf_section *sp)
struct spdk_nvmf_subsystem *subsystem; struct spdk_nvmf_subsystem *subsystem;
int i, ret; int i, ret;
uint64_t mask; uint64_t mask;
uint32_t lcore; int lcore = 0;
nqn = spdk_conf_section_get_val(sp, "NQN"); nqn = spdk_conf_section_get_val(sp, "NQN");
if (nqn == NULL) { if (nqn == NULL) {
@ -372,12 +379,15 @@ spdk_nvmf_parse_subsystem(struct spdk_conf_section *sp)
return -1; return -1;
} }
/* Determine which core to assign to the subsystem using round robin */ /* Determine which core to assign to the subsystem */
mask = spdk_app_get_core_mask(); mask = spdk_app_get_core_mask();
lcore = 0; lcore = spdk_conf_section_get_intval(sp, "Core");
for (i = 0; i < sp->num; i++) { if (lcore < 0) {
lcore = spdk_nvmf_allocate_lcore(mask, lcore); lcore = 0;
lcore++; for (i = 0; i < sp->num; i++) {
lcore = spdk_nvmf_allocate_lcore(mask, lcore);
lcore++;
}
} }
lcore = spdk_nvmf_allocate_lcore(mask, lcore); lcore = spdk_nvmf_allocate_lcore(mask, lcore);

View File

@ -117,7 +117,8 @@ spdk_nvmf_check_pools(void)
int int
nvmf_tgt_init(uint16_t max_queue_depth, uint16_t max_queues_per_sess, nvmf_tgt_init(uint16_t max_queue_depth, uint16_t max_queues_per_sess,
uint32_t in_capsule_data_size, uint32_t max_io_size) uint32_t in_capsule_data_size, uint32_t max_io_size,
uint32_t acceptor_lcore)
{ {
int rc; int rc;
@ -125,11 +126,13 @@ nvmf_tgt_init(uint16_t max_queue_depth, uint16_t max_queues_per_sess,
g_nvmf_tgt.max_queue_depth = max_queue_depth; g_nvmf_tgt.max_queue_depth = max_queue_depth;
g_nvmf_tgt.in_capsule_data_size = in_capsule_data_size; g_nvmf_tgt.in_capsule_data_size = in_capsule_data_size;
g_nvmf_tgt.max_io_size = max_io_size; g_nvmf_tgt.max_io_size = max_io_size;
g_nvmf_tgt.acceptor_lcore = acceptor_lcore;
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Max Queues Per Session: %d\n", max_queues_per_sess); SPDK_TRACELOG(SPDK_TRACE_NVMF, "Max Queues Per Session: %d\n", max_queues_per_sess);
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Max Queue Depth: %d\n", max_queue_depth); SPDK_TRACELOG(SPDK_TRACE_NVMF, "Max Queue Depth: %d\n", max_queue_depth);
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Max In Capsule Data: %d bytes\n", in_capsule_data_size); SPDK_TRACELOG(SPDK_TRACE_NVMF, "Max In Capsule Data: %d bytes\n", in_capsule_data_size);
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Max I/O Size: %d bytes\n", max_io_size); SPDK_TRACELOG(SPDK_TRACE_NVMF, "Max I/O Size: %d bytes\n", max_io_size);
SPDK_TRACELOG(SPDK_TRACE_NVMF, "NVMf Acceptor lcore: %d \n", acceptor_lcore);
/* init nvmf specific config options */ /* init nvmf specific config options */
if (!g_nvmf_tgt.sin_port) { if (!g_nvmf_tgt.sin_port) {

View File

@ -69,11 +69,14 @@ struct spdk_nvmf_globals {
uint32_t in_capsule_data_size; uint32_t in_capsule_data_size;
uint32_t max_io_size; uint32_t max_io_size;
uint32_t acceptor_lcore;
uint16_t sin_port; uint16_t sin_port;
}; };
int nvmf_tgt_init(uint16_t max_queue_depth, uint16_t max_conn_per_sess, int nvmf_tgt_init(uint16_t max_queue_depth, uint16_t max_conn_per_sess,
uint32_t in_capsule_data_size, uint32_t max_io_size); uint32_t in_capsule_data_size, uint32_t max_io_size,
uint32_t acceptor_lcore);
static inline uint32_t static inline uint32_t
nvmf_u32log2(uint32_t x) nvmf_u32log2(uint32_t x)

View File

@ -1018,8 +1018,8 @@ spdk_nvmf_rdma_acceptor_start(void)
sin_port = ntohs(rdma_get_src_port(g_rdma.acceptor_listen_id)); sin_port = ntohs(rdma_get_src_port(g_rdma.acceptor_listen_id));
SPDK_NOTICELOG("*** NVMf Target Listening on port %d ***\n", sin_port); SPDK_NOTICELOG("*** NVMf Target Listening on port %d ***\n", sin_port);
spdk_poller_register(&g_rdma.acceptor_poller, nvmf_rdma_accept, NULL, rte_lcore_id(), NULL, spdk_poller_register(&g_rdma.acceptor_poller, nvmf_rdma_accept, NULL, g_nvmf_tgt.acceptor_lcore,
ACCEPT_TIMEOUT_US); NULL, ACCEPT_TIMEOUT_US);
return rc; return rc;
listen_error: listen_error: