nvmf_example: create the poll groups

Create the poll groups of the nvmf target.

Change-Id: I56a7add994927245ef5984574fa6230821276b7f
Signed-off-by: JinYu <jin.yu@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/468658
Community-CI: SPDK CI Jenkins <sys_sgci@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: GangCao <gang.cao@intel.com>
This commit is contained in:
JinYu 2019-08-26 17:21:54 +08:00 committed by Tomasz Zawadzki
parent b63e47f237
commit 8d601b077f

View File

@ -49,6 +49,8 @@ static const char *g_rpc_addr = SPDK_DEFAULT_RPC_ADDR;
enum nvmf_target_state { enum nvmf_target_state {
NVMF_INIT_SUBSYSTEM = 0, NVMF_INIT_SUBSYSTEM = 0,
NVMF_INIT_TARGET, NVMF_INIT_TARGET,
NVMF_INIT_POLL_GROUPS,
NVMF_FINI_POLL_GROUPS,
NVMF_FINI_TARGET, NVMF_FINI_TARGET,
NVMF_FINI_SUBSYSTEM, NVMF_FINI_SUBSYSTEM,
}; };
@ -65,6 +67,12 @@ struct nvmf_reactor {
TAILQ_ENTRY(nvmf_reactor) link; TAILQ_ENTRY(nvmf_reactor) link;
}; };
struct nvmf_target_poll_group {
struct spdk_nvmf_poll_group *group;
struct spdk_thread *thread;
TAILQ_ENTRY(nvmf_target_poll_group) link;
};
struct nvmf_target { struct nvmf_target {
struct spdk_nvmf_tgt *tgt; struct spdk_nvmf_tgt *tgt;
@ -72,6 +80,7 @@ struct nvmf_target {
}; };
TAILQ_HEAD(, nvmf_reactor) g_reactors = TAILQ_HEAD_INITIALIZER(g_reactors); TAILQ_HEAD(, nvmf_reactor) g_reactors = TAILQ_HEAD_INITIALIZER(g_reactors);
TAILQ_HEAD(, nvmf_target_poll_group) g_poll_groups = TAILQ_HEAD_INITIALIZER(g_poll_groups);
static struct nvmf_reactor *g_master_reactor = NULL; static struct nvmf_reactor *g_master_reactor = NULL;
static struct nvmf_reactor *g_next_reactor = NULL; static struct nvmf_reactor *g_next_reactor = NULL;
@ -379,12 +388,106 @@ nvmf_create_nvmf_tgt(void)
spdk_nvmf_subsystem_set_allow_any_host(subsystem, true); spdk_nvmf_subsystem_set_allow_any_host(subsystem, true);
fprintf(stdout, "created a nvmf target service\n"); fprintf(stdout, "created a nvmf target service\n");
g_target_state = NVMF_INIT_POLL_GROUPS;
return; return;
error: error:
g_target_state = NVMF_FINI_TARGET; g_target_state = NVMF_FINI_TARGET;
} }
static void
nvmf_tgt_create_poll_groups_done(void *ctx)
{
fprintf(stdout, "create targets's poll groups done\n");
}
static void
nvmf_tgt_create_poll_group(void *ctx)
{
struct nvmf_target_poll_group *pg;
pg = calloc(1, sizeof(struct nvmf_target_poll_group));
if (!pg) {
fprintf(stderr, "failed to allocate poll group\n");
return;
}
pg->thread = spdk_get_thread();
pg->group = spdk_nvmf_poll_group_create(g_nvmf_tgt.tgt);
if (!pg->group) {
fprintf(stderr, "failed to create poll group of the target\n");
free(pg);
return;
}
/* spdk_for_each_channel is asynchronous, but runs on each thread in serial.
* Since this is the only operation occurring on the g_poll_groups list,
* we don't need to take a lock.
*/
TAILQ_INSERT_TAIL(&g_poll_groups, pg, link);
}
static void
nvmf_poll_groups_create(void)
{
/* Send a message to each thread and create a poll group.
* Pgs are used to handle all the connections from the host so we
* would like to create one pg in each core. We use the spdk_for_each
* _thread because we have allocated one lightweight thread per core in
* thread layer. You can also do this by traversing reactors
* or SPDK_ENV_FOREACH_CORE().
*/
spdk_for_each_thread(nvmf_tgt_create_poll_group,
NULL,
nvmf_tgt_create_poll_groups_done);
}
static void
nvmf_tgt_destroy_poll_groups_done(struct spdk_io_channel_iter *i, int status)
{
fprintf(stdout, "destroy targets's poll groups done\n");
g_target_state = NVMF_FINI_TARGET;
nvmf_target_advance_state();
}
static void
nvmf_tgt_destroy_poll_group(struct spdk_io_channel_iter *i)
{
struct spdk_io_channel *io_ch = spdk_io_channel_iter_get_channel(i);
struct spdk_nvmf_poll_group *group = spdk_io_channel_get_ctx(io_ch);
struct nvmf_target_poll_group *pg, *tmp;
/* Spdk_for_each_channel is asynchronous but executes serially.
* That means only a single thread is executing this callback at a time,
* so we can safely touch the g_poll_groups list without a lock.
*/
TAILQ_FOREACH_SAFE(pg, &g_poll_groups, link, tmp) {
if (pg->group == group) {
TAILQ_REMOVE(&g_poll_groups, pg, link);
spdk_nvmf_poll_group_destroy(group);
free(pg);
break;
}
}
spdk_for_each_channel_continue(i, 0);
}
static void
nvmf_poll_groups_destroy(void)
{
/* Send a message to each channel and destroy the poll group.
* Poll groups are I/O channels associated with the spdk_nvmf_tgt object.
* To iterate all poll groups, we can use spdk_for_each_channel.
*/
spdk_for_each_channel(g_nvmf_tgt.tgt,
nvmf_tgt_destroy_poll_group,
NULL,
nvmf_tgt_destroy_poll_groups_done);
}
static void static void
nvmf_subsystem_fini_done(void *cb_arg) nvmf_subsystem_fini_done(void *cb_arg)
{ {
@ -420,6 +523,12 @@ nvmf_target_advance_state(void)
case NVMF_INIT_TARGET: case NVMF_INIT_TARGET:
nvmf_create_nvmf_tgt(); nvmf_create_nvmf_tgt();
break; break;
case NVMF_INIT_POLL_GROUPS:
nvmf_poll_groups_create();
break;
case NVMF_FINI_POLL_GROUPS:
nvmf_poll_groups_destroy();
break;
case NVMF_FINI_TARGET: case NVMF_FINI_TARGET:
nvmf_destroy_nvmf_tgt(); nvmf_destroy_nvmf_tgt();
break; break;
@ -441,15 +550,15 @@ static void
_nvmf_shutdown_cb(void *ctx) _nvmf_shutdown_cb(void *ctx)
{ {
/* Still in initialization state, defer shutdown operation */ /* Still in initialization state, defer shutdown operation */
if (g_target_state < NVMF_INIT_TARGET) { if (g_target_state < NVMF_INIT_POLL_GROUPS) {
spdk_thread_send_msg(spdk_get_thread(), _nvmf_shutdown_cb, NULL); spdk_thread_send_msg(spdk_get_thread(), _nvmf_shutdown_cb, NULL);
return; return;
} else if (g_target_state >= NVMF_FINI_TARGET) { } else if (g_target_state >= NVMF_FINI_POLL_GROUPS) {
/* Already in Shutdown status, ignore the signal */ /* Already in Shutdown status, ignore the signal */
return; return;
} }
g_target_state = NVMF_FINI_TARGET; g_target_state = NVMF_FINI_POLL_GROUPS;
nvmf_target_advance_state(); nvmf_target_advance_state();
} }