bdevperf: Use spdk_for_each_channel() for performance_dump()

Output performance dump per SPDK thread by using
spdk_for_each_channel(). This change is safe even when shutdown
case because spdk_for_each_channel() is serialized if it is
called on the same thread.

Keep lcore information because it is still valuable to know which
lcore each thread ran on for.

Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: I996a4ca2c787d04672743b09a9415145cd8d0171
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/645
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Shuhei Matsumoto 2020-01-08 21:17:55 -05:00 committed by Tomasz Zawadzki
parent a6b53af161
commit e58b96f928

View File

@ -841,43 +841,85 @@ get_ema_io_per_second(struct io_target *target, uint64_t ema_period)
return target->ema_io_per_second; return target->ema_io_per_second;
} }
static void struct perf_dump_ctx {
performance_dump(uint64_t io_time_in_usec, uint64_t ema_period) uint64_t io_time_in_usec;
{ uint64_t ema_period;
unsigned lcore_id; double total_io_per_second;
double io_per_second, mb_per_second; double total_mb_per_second;
double total_io_per_second, total_mb_per_second; };
struct io_target_group *group;
struct io_target *target;
total_io_per_second = 0; static void
total_mb_per_second = 0; _performance_dump_done(struct spdk_io_channel_iter *i, int status)
TAILQ_FOREACH(group, &g_bdevperf.groups, link) { {
if (!TAILQ_EMPTY(&group->targets)) { struct perf_dump_ctx *ctx;
lcore_id = group->lcore;
printf("\r Logical core: %u\n", lcore_id); ctx = spdk_io_channel_iter_get_ctx(i);
}
TAILQ_FOREACH(target, &group->targets, link) {
if (ema_period == 0) {
io_per_second = get_cma_io_per_second(target, io_time_in_usec);
} else {
io_per_second = get_ema_io_per_second(target, ema_period);
}
mb_per_second = io_per_second * g_io_size / (1024 * 1024);
printf("\r %-20s: %10.2f IOPS %10.2f MiB/s\n",
target->name, io_per_second, mb_per_second);
total_io_per_second += io_per_second;
total_mb_per_second += mb_per_second;
}
}
printf("\r =====================================================\n"); printf("\r =====================================================\n");
printf("\r %-20s: %10.2f IOPS %10.2f MiB/s\n", printf("\r %-20s: %10.2f IOPS %10.2f MiB/s\n",
"Total", total_io_per_second, total_mb_per_second); "Total", ctx->total_io_per_second, ctx->total_mb_per_second);
fflush(stdout); fflush(stdout);
free(ctx);
} }
static void
_performance_dump(struct spdk_io_channel_iter *i)
{
struct perf_dump_ctx *ctx;
struct spdk_io_channel *ch;
struct io_target_group *group;
struct io_target *target;
double io_per_second, mb_per_second;
ctx = spdk_io_channel_iter_get_ctx(i);
ch = spdk_io_channel_iter_get_channel(i);
group = spdk_io_channel_get_ctx(ch);
if (TAILQ_EMPTY(&group->targets)) {
goto exit;
}
printf("\r Thread name: %s\n", spdk_thread_get_name(spdk_get_thread()));
printf("\r Logical core: %u\n", group->lcore);
TAILQ_FOREACH(target, &group->targets, link) {
if (ctx->ema_period == 0) {
io_per_second = get_cma_io_per_second(target, ctx->io_time_in_usec);
} else {
io_per_second = get_ema_io_per_second(target, ctx->ema_period);
}
mb_per_second = io_per_second * g_io_size / (1024 * 1024);
printf("\r %-20s: %10.2f IOPS %10.2f MiB/s\n",
target->name, io_per_second, mb_per_second);
ctx->total_io_per_second += io_per_second;
ctx->total_mb_per_second += mb_per_second;
}
fflush(stdout);
exit:
spdk_for_each_channel_continue(i, 0);
}
static void
performance_dump(uint64_t io_time_in_usec, uint64_t ema_period)
{
struct perf_dump_ctx *ctx;
ctx = calloc(1, sizeof(*ctx));
if (ctx == NULL) {
return;
}
ctx->io_time_in_usec = io_time_in_usec;
ctx->ema_period = ema_period;
spdk_for_each_channel(&g_bdevperf, _performance_dump, ctx,
_performance_dump_done);
}
static int static int
performance_statistics_thread(void *arg) performance_statistics_thread(void *arg)
{ {