bdevperf: Change io_target_groups from array to linked list

This patch changes io_target_group from array to linked list
and removes g_coremap.

In particular, change how to choose io_target_group for each
target, and reverse if condition in performance_dump(). These
changes will the code clearer.

Besides, remove blockdev_heads_destroy() because it just calls
bdevperf_free_targets() now.

Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: I170a1c58fe2df9a11902224dd8634462a9af6123
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/478565
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Shuhei Matsumoto 2020-01-01 16:21:36 -05:00 committed by Tomasz Zawadzki
parent 333fd3ee32
commit 1d0048097d

View File

@ -117,9 +117,11 @@ struct io_target {
struct io_target_group {
TAILQ_HEAD(, io_target) targets;
uint32_t lcore;
TAILQ_ENTRY(io_target_group) link;
};
struct io_target_group *g_head;
TAILQ_HEAD(, io_target_group) g_head = TAILQ_HEAD_INITIALIZER(g_head);
struct io_target_group *g_next_tg;
static uint32_t g_target_count = 0;
/*
@ -225,25 +227,27 @@ verify_data(void *wr_buf, int wr_buf_len, void *rd_buf, int rd_buf_len, int bloc
static int
blockdev_heads_init(void)
{
uint32_t i, idx = 0;
uint32_t core_count = spdk_env_get_core_count();
g_head = calloc(core_count, sizeof(struct io_target_group));
if (!g_head) {
fprintf(stderr, "Cannot allocate g_head array with size=%u\n",
core_count);
return -1;
}
for (i = 0; i < core_count; i++) {
TAILQ_INIT(&g_head[i].targets);
}
uint32_t i;
struct io_target_group *group, *tmp;
SPDK_ENV_FOREACH_CORE(i) {
g_head[idx++].lcore = i;
group = calloc(1, sizeof(*group));
if (!group) {
fprintf(stderr, "Cannot allocate io_target_group\n");
goto error;
}
group->lcore = i;
TAILQ_INIT(&group->targets);
TAILQ_INSERT_TAIL(&g_head, group, link);
}
return 0;
error:
TAILQ_FOREACH_SAFE(group, &g_head, link, tmp) {
TAILQ_REMOVE(&g_head, group, link);
free(group);
}
return -1;
}
static void
@ -265,29 +269,19 @@ bdevperf_free_target(struct io_target *target)
static void
bdevperf_free_targets(void)
{
uint32_t i, core_count;
struct io_target *target, *tmp;
struct io_target_group *group, *tmp_group;
struct io_target *target, *tmp_target;
if (!g_head) {
return;
}
core_count = spdk_env_get_core_count();
for (i = 0; i < core_count; i++) {
TAILQ_FOREACH_SAFE(target, &g_head[i].targets, link, tmp) {
TAILQ_REMOVE(&g_head[i].targets, target, link);
TAILQ_FOREACH_SAFE(group, &g_head, link, tmp_group) {
TAILQ_REMOVE(&g_head, group, link);
TAILQ_FOREACH_SAFE(target, &group->targets, link, tmp_target) {
TAILQ_REMOVE(&group->targets, target, link);
bdevperf_free_target(target);
}
free(group);
}
}
static void
blockdev_heads_destroy(void)
{
bdevperf_free_targets();
free(g_head);
}
static void
bdevperf_target_gone(void *arg)
{
@ -304,10 +298,11 @@ bdevperf_target_gone(void *arg)
static int
bdevperf_construct_target(struct spdk_bdev *bdev)
{
struct io_target_group *group;
struct io_target *target;
size_t align;
int block_size, data_block_size;
int rc, index;
int rc;
if (g_unmap && !spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_UNMAP)) {
printf("Skipping %s because it does not support unmap\n", spdk_bdev_get_name(bdev));
@ -377,11 +372,16 @@ bdevperf_construct_target(struct spdk_bdev *bdev)
target->reset_timer = NULL;
TAILQ_INIT(&target->task_list);
/* Mapping each created target to lcore */
index = g_target_count % spdk_env_get_core_count();
target->lcore = g_head[index].lcore;
target->group = &g_head[index];
TAILQ_INSERT_HEAD(&g_head[index].targets, target, link);
/* Mapping each created target to target group */
if (g_next_tg == NULL) {
g_next_tg = TAILQ_FIRST(&g_head);
assert(g_next_tg != NULL);
}
group = g_next_tg;
g_next_tg = TAILQ_NEXT(g_next_tg, link);
target->lcore = group->lcore;
target->group = group;
TAILQ_INSERT_TAIL(&group->targets, target, link);
g_target_count++;
return 0;
@ -988,20 +988,20 @@ get_ema_io_per_second(struct io_target *target, uint64_t ema_period)
static void
performance_dump(uint64_t io_time_in_usec, uint64_t ema_period)
{
uint32_t index;
unsigned lcore_id;
double io_per_second, mb_per_second;
double total_io_per_second, total_mb_per_second;
struct io_target_group *group;
struct io_target *target;
total_io_per_second = 0;
total_mb_per_second = 0;
for (index = 0; index < spdk_env_get_core_count(); index++) {
if (!TAILQ_EMPTY(&g_head[index].targets)) {
lcore_id = g_head[index].lcore;
TAILQ_FOREACH(group, &g_head, link) {
if (!TAILQ_EMPTY(&group->targets)) {
lcore_id = group->lcore;
printf("\r Logical core: %u\n", lcore_id);
}
TAILQ_FOREACH(target, &g_head[index].targets, link) {
TAILQ_FOREACH(target, &group->targets, link) {
if (ema_period == 0) {
io_per_second = get_cma_io_per_second(target, io_time_in_usec);
} else {
@ -1069,10 +1069,10 @@ static struct bdevperf_task *bdevperf_construct_task_on_target(struct io_target
static int
bdevperf_construct_targets_tasks(void)
{
uint32_t i;
struct io_target_group *group;
struct io_target *target;
struct bdevperf_task *task;
int j, task_num = g_queue_depth;
int i, task_num = g_queue_depth;
/*
* Create the task pool after we have enumerated the targets, so that we know
@ -1084,9 +1084,9 @@ bdevperf_construct_targets_tasks(void)
}
/* Initialize task list for each target */
for (i = 0; i < spdk_env_get_core_count(); i++) {
TAILQ_FOREACH(target, &g_head[i].targets, link) {
for (j = 0; j < task_num; j++) {
TAILQ_FOREACH(group, &g_head, link) {
TAILQ_FOREACH(target, &group->targets, link) {
for (i = 0; i < task_num; i++) {
task = bdevperf_construct_task_on_target(target);
if (task == NULL) {
goto ret;
@ -1249,10 +1249,9 @@ verify_test_params(struct spdk_app_opts *opts)
static int
bdevperf_test(void)
{
uint32_t i;
struct io_target_group *group;
struct spdk_event *event;
int rc;
uint32_t core_count = spdk_min(g_target_count, spdk_env_get_core_count());
rc = bdevperf_construct_targets_tasks();
if (rc) {
@ -1270,10 +1269,10 @@ bdevperf_test(void)
}
/* Send events to start all I/O */
for (i = 0; i < core_count; i++) {
if (!TAILQ_EMPTY(&g_head[i].targets)) {
event = spdk_event_allocate(g_head[i].lcore, bdevperf_submit_on_core,
&g_head[i], NULL);
TAILQ_FOREACH(group, &g_head, link) {
if (!TAILQ_EMPTY(&group->targets)) {
event = spdk_event_allocate(group->lcore, bdevperf_submit_on_core,
group, NULL);
spdk_event_call(event);
}
}
@ -1328,7 +1327,7 @@ bdevperf_stop_io_on_core(void *arg1, void *arg2)
static void
spdk_bdevperf_shutdown_cb(void)
{
uint32_t i;
struct io_target_group *group;
struct spdk_event *event;
g_shutdown = true;
@ -1338,18 +1337,18 @@ spdk_bdevperf_shutdown_cb(void)
return;
}
if (g_head == NULL) {
if (TAILQ_EMPTY(&g_head)) {
spdk_app_stop(0);
return;
}
g_shutdown_tsc = spdk_get_ticks() - g_shutdown_tsc;
/* Send events to stop all I/O on each core */
for (i = 0; i < spdk_env_get_core_count(); i++) {
if (!TAILQ_EMPTY(&g_head[i].targets)) {
event = spdk_event_allocate(g_head[i].lcore, bdevperf_stop_io_on_core,
&g_head[i], NULL);
/* Send events to stop all I/O on each target group */
TAILQ_FOREACH(group, &g_head, link) {
if (!TAILQ_EMPTY(&group->targets)) {
event = spdk_event_allocate(group->lcore, bdevperf_stop_io_on_core,
group, NULL);
spdk_event_call(event);
}
}
@ -1500,7 +1499,7 @@ main(int argc, char **argv)
g_run_failed = true;
}
blockdev_heads_destroy();
bdevperf_free_targets();
spdk_app_fini();
return g_run_failed;
}