nvmf_tgt: Rewrite initialization as a state machine

This is about to become a hot mess of asynchronous operations,
so introduce a state machine to make it easier to follow
the logic.

Change-Id: If67acfd61d4e70e30a95245d5f46112323943459
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.gerrithub.io/385482
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Ben Walker 2017-11-01 16:24:18 -07:00
parent f7476d444b
commit db3edeb341
3 changed files with 188 additions and 126 deletions

View File

@ -188,8 +188,8 @@ spdk_nvmf_parse_nvmf_tgt(void)
spdk_nvmf_read_config_file_params(sp, &opts);
}
g_tgt = spdk_nvmf_tgt_create(&opts);
if (!g_tgt) {
g_tgt.tgt = spdk_nvmf_tgt_create(&opts);
if (!g_tgt.tgt) {
SPDK_ERRLOG("spdk_nvmf_tgt_create() failed\n");
return -1;
}
@ -455,7 +455,7 @@ spdk_nvmf_construct_subsystem(const char *name, int32_t lcore,
snprintf(trid.traddr, sizeof(trid.traddr), "%s", addresses[i].traddr);
snprintf(trid.trsvcid, sizeof(trid.trsvcid), "%s", addresses[i].trsvcid);
rc = spdk_nvmf_tgt_listen(g_tgt, &trid);
rc = spdk_nvmf_tgt_listen(g_tgt.tgt, &trid);
if (rc) {
SPDK_ERRLOG("Failed to listen on transport %s, adrfam %s, traddr %s, trsvcid %s\n",
addresses[i].transport,

View File

@ -46,7 +46,7 @@ struct nvmf_tgt_poll_group {
struct spdk_poller *poller;
};
struct spdk_nvmf_tgt *g_tgt = NULL;
struct nvmf_tgt g_tgt = {};
static struct nvmf_tgt_poll_group *g_poll_groups = NULL;
static size_t g_num_poll_groups = 0;
@ -57,11 +57,7 @@ static struct spdk_poller *g_acceptor_poller = NULL;
static TAILQ_HEAD(, nvmf_tgt_subsystem) g_subsystems = TAILQ_HEAD_INITIALIZER(g_subsystems);
static bool g_subsystems_shutdown;
static void
shutdown_complete(void)
{
spdk_app_stop(0);
}
static void nvmf_tgt_advance_state(void *arg1, void *arg2);
static void
subsystem_delete_event(void *arg1, void *arg2)
@ -75,9 +71,8 @@ subsystem_delete_event(void *arg1, void *arg2)
spdk_nvmf_delete_subsystem(subsystem);
if (g_subsystems_shutdown && TAILQ_EMPTY(&g_subsystems)) {
spdk_nvmf_tgt_destroy(g_tgt);
/* Finished shutting down all subsystems - continue the shutdown process. */
shutdown_complete();
g_tgt.state = NVMF_TGT_FINI_FREE_RESOURCES;
nvmf_tgt_advance_state(NULL, NULL);
}
}
@ -95,66 +90,29 @@ nvmf_tgt_delete_subsystem(struct nvmf_tgt_subsystem *app_subsys)
spdk_poller_unregister(&app_subsys->poller, event);
}
static void
shutdown_subsystems(void)
{
struct nvmf_tgt_subsystem *app_subsys, *tmp;
g_subsystems_shutdown = true;
TAILQ_FOREACH_SAFE(app_subsys, &g_subsystems, tailq, tmp) {
nvmf_tgt_delete_subsystem(app_subsys);
}
}
static void
nvmf_tgt_poll_group_stopped_event(void *arg1, void *arg2)
{
uint32_t core;
g_active_poll_groups--;
if (g_active_poll_groups == 0) {
/* All of the poll group pollers are stopped, so we can now delete the poll groups safely. */
SPDK_ENV_FOREACH_CORE(core) {
struct nvmf_tgt_poll_group *app_poll_group = &g_poll_groups[core];
spdk_nvmf_poll_group_destroy(app_poll_group->group);
}
shutdown_subsystems();
}
g_tgt.state = NVMF_TGT_FINI_DESTROY_POLL_GROUP;
nvmf_tgt_advance_state(NULL, NULL);
}
static void
acceptor_poller_unregistered_event(void *arg1, void *arg2)
{
struct nvmf_tgt_poll_group *app_poll_group;
struct spdk_event *event;
uint32_t core;
/* Stop poll group pollers on all cores */
SPDK_ENV_FOREACH_CORE(core) {
app_poll_group = &g_poll_groups[core];
event = spdk_event_allocate(spdk_env_get_current_core(),
nvmf_tgt_poll_group_stopped_event,
NULL, NULL);
spdk_poller_unregister(&app_poll_group->poller, event);
}
g_tgt.state = NVMF_TGT_FINI_STOP_POLLER;
nvmf_tgt_advance_state(NULL, NULL);
}
static void
spdk_nvmf_shutdown_cb(void)
{
struct spdk_event *event;
fprintf(stdout, "\n=========================\n");
fprintf(stdout, " NVMF shutdown signal\n");
fprintf(stdout, "=========================\n");
event = spdk_event_allocate(spdk_env_get_current_core(), acceptor_poller_unregistered_event,
NULL, NULL);
spdk_poller_unregister(&g_acceptor_poller, event);
g_tgt.state = NVMF_TGT_FINI_STOP_ACCEPTOR;
nvmf_tgt_advance_state(NULL, NULL);
}
static void
@ -194,7 +152,7 @@ nvmf_tgt_create_subsystem(const char *name, enum spdk_nvmf_subtype subtype, uint
struct spdk_nvmf_subsystem *subsystem;
struct nvmf_tgt_subsystem *app_subsys;
if (spdk_nvmf_tgt_find_subsystem(g_tgt, name)) {
if (spdk_nvmf_tgt_find_subsystem(g_tgt.tgt, name)) {
SPDK_ERRLOG("Subsystem already exist\n");
return NULL;
}
@ -205,7 +163,7 @@ nvmf_tgt_create_subsystem(const char *name, enum spdk_nvmf_subtype subtype, uint
return NULL;
}
subsystem = spdk_nvmf_create_subsystem(g_tgt, name, subtype, num_ns);
subsystem = spdk_nvmf_create_subsystem(g_tgt.tgt, name, subtype, num_ns);
if (subsystem == NULL) {
SPDK_ERRLOG("Subsystem creation failed\n");
free(app_subsys);
@ -223,21 +181,6 @@ nvmf_tgt_create_subsystem(const char *name, enum spdk_nvmf_subtype subtype, uint
return app_subsys;
}
/* This function can only be used before the pollers are started. */
static void
nvmf_tgt_delete_subsystems(void)
{
struct nvmf_tgt_subsystem *app_subsys, *tmp;
struct spdk_nvmf_subsystem *subsystem;
TAILQ_FOREACH_SAFE(app_subsys, &g_subsystems, tailq, tmp) {
TAILQ_REMOVE(&g_subsystems, app_subsys, tailq);
subsystem = app_subsys->subsystem;
spdk_nvmf_delete_subsystem(subsystem);
free(app_subsys);
}
}
struct nvmf_tgt_subsystem *
nvmf_tgt_subsystem_first(void)
{
@ -281,68 +224,161 @@ nvmf_tgt_poll_group_poll(void *arg)
}
static void
spdk_nvmf_startup(void *arg1, void *arg2)
nvmf_tgt_advance_state(void *arg1, void *arg2)
{
enum nvmf_tgt_state prev_state;
int rc = 0;
do {
prev_state = g_tgt.state;
switch (g_tgt.state) {
case NVMF_TGT_INIT_NONE: {
uint32_t core;
int rc;
rc = spdk_nvmf_parse_conf();
if (rc < 0) {
SPDK_ERRLOG("spdk_nvmf_parse_conf() failed\n");
goto initialize_error;
}
if (((1ULL << g_spdk_nvmf_tgt_conf.acceptor_lcore) & spdk_app_get_core_mask()) == 0) {
SPDK_ERRLOG("Invalid AcceptorCore setting\n");
goto initialize_error;
}
g_tgt.state = NVMF_TGT_INIT_PARSE_CONFIG;
g_tgt.core = spdk_env_get_first_core();
/* Find the maximum core number */
SPDK_ENV_FOREACH_CORE(core) {
g_num_poll_groups = spdk_max(g_num_poll_groups, core + 1);
}
assert(g_num_poll_groups > 0);
g_poll_groups = calloc(g_num_poll_groups, sizeof(*g_poll_groups));
if (g_poll_groups == NULL) {
goto initialize_error;
g_tgt.state = NVMF_TGT_ERROR;
rc = -ENOMEM;
break;
}
break;
}
case NVMF_TGT_INIT_PARSE_CONFIG:
rc = spdk_nvmf_parse_conf();
if (rc < 0) {
SPDK_ERRLOG("spdk_nvmf_parse_conf() failed\n");
g_tgt.state = NVMF_TGT_ERROR;
rc = -EINVAL;
break;
}
/* Create a poll group on each core in the app core mask. */
g_active_poll_groups = 0;
SPDK_ENV_FOREACH_CORE(core) {
struct nvmf_tgt_poll_group *app_poll_group = &g_poll_groups[core];
app_poll_group->group = spdk_nvmf_poll_group_create(g_tgt);
if (app_poll_group->group == NULL) {
SPDK_ERRLOG("Failed to create poll group for core %u\n", core);
goto initialize_error;
if (((1ULL << g_spdk_nvmf_tgt_conf.acceptor_lcore) & spdk_app_get_core_mask()) == 0) {
SPDK_ERRLOG("Invalid AcceptorCore setting\n");
g_tgt.state = NVMF_TGT_ERROR;
rc = -EINVAL;
break;
}
g_tgt.state = NVMF_TGT_INIT_CREATE_POLL_GROUP;
break;
case NVMF_TGT_INIT_CREATE_POLL_GROUP: {
struct nvmf_tgt_poll_group *pg;
spdk_poller_register(&app_poll_group->poller,
nvmf_tgt_poll_group_poll, app_poll_group,
core, 0);
pg = &g_poll_groups[g_tgt.core];
assert(pg != NULL);
pg->group = spdk_nvmf_poll_group_create(g_tgt.tgt);
if (pg->group == NULL) {
SPDK_ERRLOG("Failed to create poll group for core %u\n", g_tgt.core);
rc = -ENOMEM;
g_tgt.state = NVMF_TGT_ERROR;
break;
}
g_tgt.state = NVMF_TGT_INIT_START_POLLER;
break;
}
case NVMF_TGT_INIT_START_POLLER: {
struct nvmf_tgt_poll_group *pg;
pg = &g_poll_groups[g_tgt.core];
assert(pg != NULL);
spdk_poller_register(&pg->poller,
nvmf_tgt_poll_group_poll, pg,
g_tgt.core, 0);
g_active_poll_groups++;
g_tgt.core = spdk_env_get_next_core(g_tgt.core);
if (g_tgt.core != UINT32_MAX) {
g_tgt.state = NVMF_TGT_INIT_CREATE_POLL_GROUP;
} else {
g_tgt.state = NVMF_TGT_INIT_START_ACCEPTOR;
}
spdk_poller_register(&g_acceptor_poller, acceptor_poll, g_tgt,
break;
}
case NVMF_TGT_INIT_START_ACCEPTOR:
spdk_poller_register(&g_acceptor_poller, acceptor_poll, g_tgt.tgt,
g_spdk_nvmf_tgt_conf.acceptor_lcore,
g_spdk_nvmf_tgt_conf.acceptor_poll_rate);
SPDK_NOTICELOG("Acceptor running on core %u on socket %u\n", g_spdk_nvmf_tgt_conf.acceptor_lcore,
spdk_env_get_socket_id(g_spdk_nvmf_tgt_conf.acceptor_lcore));
g_tgt.state = NVMF_TGT_RUNNING;
break;
case NVMF_TGT_RUNNING:
if (getenv("MEMZONE_DUMP") != NULL) {
spdk_memzone_dump(stdout);
fflush(stdout);
}
g_tgt.core = spdk_env_get_first_core();
break;
case NVMF_TGT_FINI_STOP_ACCEPTOR: {
struct spdk_event *event;
event = spdk_event_allocate(spdk_env_get_current_core(), acceptor_poller_unregistered_event,
NULL, NULL);
spdk_poller_unregister(&g_acceptor_poller, event);
break;
}
case NVMF_TGT_FINI_STOP_POLLER: {
struct spdk_event *event;
struct nvmf_tgt_poll_group *pg;
pg = &g_poll_groups[g_tgt.core];
assert(pg != NULL);
event = spdk_event_allocate(spdk_env_get_current_core(), nvmf_tgt_poll_group_stopped_event,
NULL, NULL);
spdk_poller_unregister(&pg->poller, event);
break;
}
case NVMF_TGT_FINI_DESTROY_POLL_GROUP: {
struct nvmf_tgt_poll_group *pg;
pg = &g_poll_groups[g_tgt.core];
assert(pg != NULL);
spdk_nvmf_poll_group_destroy(pg->group);
assert(g_active_poll_groups > 0);
g_active_poll_groups--;
g_tgt.core = spdk_env_get_next_core(g_tgt.core);
if (g_tgt.core != UINT32_MAX) {
g_tgt.state = NVMF_TGT_FINI_STOP_POLLER;
} else {
assert(g_active_poll_groups == 0);
g_tgt.state = NVMF_TGT_FINI_SHUTDOWN_SUBSYSTEMS;
}
break;
}
case NVMF_TGT_FINI_SHUTDOWN_SUBSYSTEMS: {
struct nvmf_tgt_subsystem *app_subsys, *tmp;
g_subsystems_shutdown = true;
TAILQ_FOREACH_SAFE(app_subsys, &g_subsystems, tailq, tmp) {
nvmf_tgt_delete_subsystem(app_subsys);
}
break;
}
case NVMF_TGT_FINI_FREE_RESOURCES:
spdk_nvmf_tgt_destroy(g_tgt.tgt);
g_tgt.state = NVMF_TGT_STOPPED;
break;
case NVMF_TGT_STOPPED:
spdk_app_stop(0);
return;
initialize_error:
nvmf_tgt_delete_subsystems();
case NVMF_TGT_ERROR:
spdk_app_stop(rc);
return;
}
} while (g_tgt.state != prev_state);
}
int
@ -353,7 +389,7 @@ spdk_nvmf_tgt_start(struct spdk_app_opts *opts)
opts->shutdown_cb = spdk_nvmf_shutdown_cb;
/* Blocks until the application is exiting */
rc = spdk_app_start(opts, spdk_nvmf_startup, NULL, NULL);
rc = spdk_app_start(opts, nvmf_tgt_advance_state, NULL, NULL);
spdk_app_fini();

View File

@ -61,9 +61,35 @@ struct nvmf_tgt_subsystem {
uint32_t lcore;
};
enum nvmf_tgt_state {
NVMF_TGT_INIT_NONE = 0,
NVMF_TGT_INIT_PARSE_CONFIG,
NVMF_TGT_INIT_CREATE_POLL_GROUP,
NVMF_TGT_INIT_START_POLLER,
NVMF_TGT_INIT_START_ACCEPTOR,
NVMF_TGT_RUNNING,
NVMF_TGT_FINI_STOP_ACCEPTOR,
NVMF_TGT_FINI_STOP_POLLER,
NVMF_TGT_FINI_DESTROY_POLL_GROUP,
NVMF_TGT_FINI_SHUTDOWN_SUBSYSTEMS,
NVMF_TGT_FINI_FREE_RESOURCES,
NVMF_TGT_STOPPED,
NVMF_TGT_ERROR,
};
struct nvmf_tgt {
enum nvmf_tgt_state state;
struct spdk_nvmf_tgt *tgt;
// Used at initialization only
uint32_t core; // The current core when allocating pollers
};
extern struct spdk_nvmf_tgt_conf g_spdk_nvmf_tgt_conf;
extern struct spdk_nvmf_tgt *g_tgt;
extern struct nvmf_tgt g_tgt;
struct nvmf_tgt_subsystem *
nvmf_tgt_subsystem_first(void);