nvmf_tgt: Remove AcceptorCore config parameter

Historically, polling for new connections was costly.
Now, it's very inexpensive and there isn't a reason
to change which core it occurs on. Simplify
initialization and configuration by removing it.

Change-Id: I1cc4c321bb5986289bd48860cb270b0b552e3baa
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.gerrithub.io/387681
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
Ben Walker 2017-11-14 13:51:39 -07:00
parent 8cd7252f04
commit bfd55056b0
4 changed files with 3 additions and 22 deletions

View File

@ -118,7 +118,7 @@ spdk_add_nvmf_discovery_subsystem(void)
struct nvmf_tgt_subsystem *app_subsys; struct nvmf_tgt_subsystem *app_subsys;
app_subsys = nvmf_tgt_create_subsystem(SPDK_NVMF_DISCOVERY_NQN, SPDK_NVMF_SUBTYPE_DISCOVERY, 0, app_subsys = nvmf_tgt_create_subsystem(SPDK_NVMF_DISCOVERY_NQN, SPDK_NVMF_SUBTYPE_DISCOVERY, 0,
g_spdk_nvmf_tgt_conf.acceptor_lcore); spdk_env_get_current_core());
if (app_subsys == NULL) { if (app_subsys == NULL) {
SPDK_ERRLOG("Failed creating discovery nvmf library subsystem\n"); SPDK_ERRLOG("Failed creating discovery nvmf library subsystem\n");
return -1; return -1;
@ -138,7 +138,6 @@ spdk_nvmf_read_config_file_params(struct spdk_conf_section *sp,
int max_queues_per_sess; int max_queues_per_sess;
int in_capsule_data_size; int in_capsule_data_size;
int max_io_size; int max_io_size;
int acceptor_lcore;
int acceptor_poll_rate; int acceptor_poll_rate;
max_queue_depth = spdk_conf_section_get_intval(sp, "MaxQueueDepth"); max_queue_depth = spdk_conf_section_get_intval(sp, "MaxQueueDepth");
@ -161,11 +160,6 @@ spdk_nvmf_read_config_file_params(struct spdk_conf_section *sp,
opts->max_io_size = max_io_size; opts->max_io_size = max_io_size;
} }
acceptor_lcore = spdk_conf_section_get_intval(sp, "AcceptorCore");
if (acceptor_lcore >= 0) {
g_spdk_nvmf_tgt_conf.acceptor_lcore = acceptor_lcore;
}
acceptor_poll_rate = spdk_conf_section_get_intval(sp, "AcceptorPollRate"); acceptor_poll_rate = spdk_conf_section_get_intval(sp, "AcceptorPollRate");
if (acceptor_poll_rate >= 0) { if (acceptor_poll_rate >= 0) {
g_spdk_nvmf_tgt_conf.acceptor_poll_rate = acceptor_poll_rate; g_spdk_nvmf_tgt_conf.acceptor_poll_rate = acceptor_poll_rate;
@ -180,7 +174,6 @@ spdk_nvmf_parse_nvmf_tgt(void)
int rc; int rc;
spdk_nvmf_tgt_opts_init(&opts); spdk_nvmf_tgt_opts_init(&opts);
g_spdk_nvmf_tgt_conf.acceptor_lcore = spdk_env_get_current_core();
g_spdk_nvmf_tgt_conf.acceptor_poll_rate = ACCEPT_TIMEOUT_US; g_spdk_nvmf_tgt_conf.acceptor_poll_rate = ACCEPT_TIMEOUT_US;
sp = spdk_conf_find_section(NULL, "Nvmf"); sp = spdk_conf_find_section(NULL, "Nvmf");

View File

@ -332,13 +332,6 @@ nvmf_tgt_advance_state(void *arg1, void *arg2)
rc = -EINVAL; rc = -EINVAL;
break; break;
} }
if (((1ULL << g_spdk_nvmf_tgt_conf.acceptor_lcore) & spdk_app_get_core_mask()) == 0) {
SPDK_ERRLOG("Invalid AcceptorCore setting\n");
g_tgt.state = NVMF_TGT_ERROR;
rc = -EINVAL;
break;
}
g_tgt.state = NVMF_TGT_INIT_CREATE_POLL_GROUP; g_tgt.state = NVMF_TGT_INIT_CREATE_POLL_GROUP;
break; break;
case NVMF_TGT_INIT_CREATE_POLL_GROUP: { case NVMF_TGT_INIT_CREATE_POLL_GROUP: {
@ -363,10 +356,9 @@ nvmf_tgt_advance_state(void *arg1, void *arg2)
} }
case NVMF_TGT_INIT_START_ACCEPTOR: case NVMF_TGT_INIT_START_ACCEPTOR:
spdk_poller_register(&g_acceptor_poller, acceptor_poll, g_tgt.tgt, spdk_poller_register(&g_acceptor_poller, acceptor_poll, g_tgt.tgt,
g_spdk_nvmf_tgt_conf.acceptor_lcore, spdk_env_get_current_core(),
g_spdk_nvmf_tgt_conf.acceptor_poll_rate); g_spdk_nvmf_tgt_conf.acceptor_poll_rate);
SPDK_NOTICELOG("Acceptor running on core %u on socket %u\n", g_spdk_nvmf_tgt_conf.acceptor_lcore, SPDK_NOTICELOG("Acceptor running\n");
spdk_env_get_socket_id(g_spdk_nvmf_tgt_conf.acceptor_lcore));
g_tgt.state = NVMF_TGT_RUNNING; g_tgt.state = NVMF_TGT_RUNNING;
break; break;
case NVMF_TGT_RUNNING: case NVMF_TGT_RUNNING:

View File

@ -48,7 +48,6 @@ struct rpc_listen_address {
}; };
struct spdk_nvmf_tgt_conf { struct spdk_nvmf_tgt_conf {
uint32_t acceptor_lcore;
uint32_t acceptor_poll_rate; uint32_t acceptor_poll_rate;
}; };

View File

@ -59,9 +59,6 @@
# Set the maximum I/O size. Must be a multiple of 4096. # Set the maximum I/O size. Must be a multiple of 4096.
#MaxIOSize 131072 #MaxIOSize 131072
# Set the global acceptor lcore ID, lcores are numbered starting at 0.
#AcceptorCore 0
# Set how often the acceptor polls for incoming connections. The acceptor is also # Set how often the acceptor polls for incoming connections. The acceptor is also
# responsible for polling existing connections that have gone idle. 0 means continuously # responsible for polling existing connections that have gone idle. 0 means continuously
# poll. Units in microseconds. # poll. Units in microseconds.