2022-06-03 19:15:11 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
2022-11-01 20:26:26 +00:00
|
|
|
* Copyright (C) 2021 Intel Corporation.
|
2021-01-04 15:00:58 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "spdk/stdinc.h"
|
|
|
|
|
|
|
|
#include "spdk/env.h"
|
|
|
|
#include "spdk/event.h"
|
|
|
|
#include "spdk/likely.h"
|
|
|
|
#include "spdk/json.h"
|
|
|
|
#include "spdk/jsonrpc.h"
|
|
|
|
#include "spdk/rpc.h"
|
|
|
|
#include "spdk/string.h"
|
|
|
|
#include "spdk/thread.h"
|
2021-03-31 21:50:14 +00:00
|
|
|
#include "spdk/util.h"
|
2021-01-04 15:00:58 +00:00
|
|
|
|
2021-10-19 19:10:01 +00:00
|
|
|
#include "spdk_internal/event.h"
|
|
|
|
|
2021-01-04 15:00:58 +00:00
|
|
|
static bool g_is_running = true;
|
|
|
|
pthread_mutex_t g_sched_list_mutex = PTHREAD_MUTEX_INITIALIZER;
|
2021-09-27 12:47:37 +00:00
|
|
|
#define TIMESLICE_US 100 * 1000
|
test/scheduler: make spdk_for_each_reactor test optional
(84ab68c) test/scheduler: add a for_each_reactor shutdown test
Patch above added regression test for handling spdk_for_each_reactor
during shutdown, by adding constant repeat of this operation
while application is running.
Reactor event processing (especially constant) has impact on the
reactor_interrupt_run(). spdk_fd_group_wait() will almost always
execute an event, skewing the results of scheduler test.
Reactor that should have been idle, will show active usage via
/proc/stat.
Fixes #1950
This patch makes this regression test optional, and enables it
only in test that does not measure CPU utilization from the system.
The ./test/event/scheduler/scheduler.sh is the only one where it is
enabled, as it's purpose is to verify the test scheduler application.
Remaining ./test/scheduler/*.sh tests do verify CPU utilization,
so the regression test is disabled in those.
Modified the for_each_done, to for_each_reactor_start, to better
reflect the intention.
On my system enabling spdk_for_each_reactor test flag on the
scheduler application with no threads (except app thread),
consumes ~20-25% CPU from every core in CPU mask.
Meanwhile disabling it, idle cores are 100% idle
and active cores spend 100% of CPU time in usr.
Change-Id: I40eda15a748e76b95dc5441144cd8931e46edee5
Signed-off-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/15210
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Konrad Sztyber <konrad.sztyber@intel.com>
2022-11-02 14:38:31 +00:00
|
|
|
static bool g_for_each_reactor = false;
|
2021-01-04 15:00:58 +00:00
|
|
|
|
|
|
|
struct sched_thread {
|
|
|
|
struct spdk_thread *thread;
|
|
|
|
struct spdk_poller *poller;
|
2021-09-27 12:47:37 +00:00
|
|
|
struct spdk_poller *idle_poller;
|
2021-01-04 15:00:58 +00:00
|
|
|
int active_percent;
|
|
|
|
struct spdk_jsonrpc_request *request;
|
|
|
|
TAILQ_ENTRY(sched_thread) link;
|
|
|
|
};
|
|
|
|
|
|
|
|
static TAILQ_HEAD(, sched_thread) g_sched_threads = TAILQ_HEAD_INITIALIZER(g_sched_threads);
|
|
|
|
|
|
|
|
struct rpc_thread_create {
|
|
|
|
int active_percent;
|
|
|
|
char *name;
|
|
|
|
char *cpu_mask;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
free_rpc_thread_create(struct rpc_thread_create *req)
|
|
|
|
{
|
|
|
|
free(req->name);
|
|
|
|
free(req->cpu_mask);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct spdk_json_object_decoder rpc_thread_create_decoders[] = {
|
|
|
|
{"active", offsetof(struct rpc_thread_create, active_percent), spdk_json_decode_uint64},
|
|
|
|
{"name", offsetof(struct rpc_thread_create, name), spdk_json_decode_string, true},
|
|
|
|
{"cpu_mask", offsetof(struct rpc_thread_create, cpu_mask), spdk_json_decode_string, true},
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
rpc_scheduler_thread_create_cb(struct spdk_jsonrpc_request *request, uint64_t thread_id)
|
|
|
|
{
|
|
|
|
struct spdk_json_write_ctx *w;
|
|
|
|
|
|
|
|
w = spdk_jsonrpc_begin_result(request);
|
|
|
|
spdk_json_write_uint64(w, thread_id);
|
|
|
|
spdk_jsonrpc_end_result(request, w);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
thread_delete(struct sched_thread *sched_thread)
|
|
|
|
{
|
|
|
|
spdk_poller_unregister(&sched_thread->poller);
|
2021-09-27 12:47:37 +00:00
|
|
|
spdk_poller_unregister(&sched_thread->idle_poller);
|
2021-01-04 15:00:58 +00:00
|
|
|
spdk_thread_exit(sched_thread->thread);
|
|
|
|
|
|
|
|
TAILQ_REMOVE(&g_sched_threads, sched_thread, link);
|
|
|
|
free(sched_thread);
|
|
|
|
|
|
|
|
if (!g_is_running && TAILQ_EMPTY(&g_sched_threads)) {
|
|
|
|
spdk_app_stop(0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2021-09-27 12:47:37 +00:00
|
|
|
poller_run_busy(void *arg)
|
2021-01-04 15:00:58 +00:00
|
|
|
{
|
|
|
|
struct sched_thread *sched_thread = arg;
|
|
|
|
|
|
|
|
if (spdk_unlikely(!g_is_running)) {
|
|
|
|
pthread_mutex_lock(&g_sched_list_mutex);
|
|
|
|
thread_delete(sched_thread);
|
|
|
|
pthread_mutex_unlock(&g_sched_list_mutex);
|
|
|
|
return SPDK_POLLER_IDLE;
|
|
|
|
}
|
|
|
|
|
2021-09-27 12:47:37 +00:00
|
|
|
spdk_delay_us(TIMESLICE_US * sched_thread->active_percent / 100);
|
|
|
|
return SPDK_POLLER_BUSY;
|
|
|
|
}
|
2021-04-21 08:36:15 +00:00
|
|
|
|
2021-09-27 12:47:37 +00:00
|
|
|
static int
|
|
|
|
poller_run_idle(void *arg)
|
|
|
|
{
|
|
|
|
struct sched_thread *sched_thread = arg;
|
2021-01-04 15:00:58 +00:00
|
|
|
|
2021-09-27 12:47:37 +00:00
|
|
|
if (spdk_unlikely(!g_is_running)) {
|
|
|
|
pthread_mutex_lock(&g_sched_list_mutex);
|
|
|
|
thread_delete(sched_thread);
|
|
|
|
pthread_mutex_unlock(&g_sched_list_mutex);
|
|
|
|
return SPDK_POLLER_IDLE;
|
2021-01-04 15:00:58 +00:00
|
|
|
}
|
|
|
|
|
2021-09-27 12:47:37 +00:00
|
|
|
spdk_delay_us(10);
|
2021-01-04 15:00:58 +00:00
|
|
|
return SPDK_POLLER_IDLE;
|
|
|
|
}
|
|
|
|
|
2021-09-27 12:47:37 +00:00
|
|
|
static void
|
|
|
|
update_pollers(struct sched_thread *sched_thread)
|
|
|
|
{
|
|
|
|
spdk_poller_unregister(&sched_thread->poller);
|
|
|
|
if (sched_thread->active_percent > 0) {
|
|
|
|
sched_thread->poller = spdk_poller_register_named(poller_run_busy, sched_thread, TIMESLICE_US,
|
|
|
|
spdk_thread_get_name(sched_thread->thread));
|
|
|
|
assert(sched_thread->poller != NULL);
|
|
|
|
}
|
|
|
|
if (sched_thread->idle_poller == NULL) {
|
|
|
|
sched_thread->idle_poller = spdk_poller_register_named(poller_run_idle, sched_thread, 0,
|
|
|
|
"idle_poller");
|
|
|
|
assert(sched_thread->idle_poller != NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-04 15:00:58 +00:00
|
|
|
static void
|
|
|
|
rpc_register_poller(void *arg)
|
|
|
|
{
|
|
|
|
struct sched_thread *sched_thread = arg;
|
|
|
|
|
2021-09-27 12:47:37 +00:00
|
|
|
update_pollers(sched_thread);
|
2021-01-04 15:00:58 +00:00
|
|
|
|
|
|
|
if (sched_thread->request != NULL) {
|
|
|
|
rpc_scheduler_thread_create_cb(sched_thread->request, spdk_thread_get_id(sched_thread->thread));
|
|
|
|
sched_thread->request = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
rpc_scheduler_thread_create(struct spdk_jsonrpc_request *request,
|
|
|
|
const struct spdk_json_val *params)
|
|
|
|
{
|
|
|
|
struct sched_thread *sched_thread;
|
|
|
|
struct rpc_thread_create req = {0};
|
|
|
|
struct spdk_cpuset *cpu_set = NULL;
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
if (spdk_json_decode_object(params, rpc_thread_create_decoders,
|
|
|
|
SPDK_COUNTOF(rpc_thread_create_decoders),
|
|
|
|
&req)) {
|
|
|
|
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS,
|
|
|
|
"Invalid parameters provided");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (req.active_percent < 0 || req.active_percent > 100) {
|
|
|
|
SPDK_ERRLOG("invalid percent value %d\n", req.active_percent);
|
|
|
|
spdk_jsonrpc_send_error_response(request, -EINVAL, spdk_strerror(EINVAL));
|
|
|
|
free_rpc_thread_create(&req);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (req.cpu_mask != NULL) {
|
|
|
|
cpu_set = calloc(1, sizeof(*cpu_set));
|
|
|
|
assert(cpu_set != NULL);
|
|
|
|
rc = spdk_cpuset_parse(cpu_set, req.cpu_mask);
|
|
|
|
if (rc < 0) {
|
|
|
|
SPDK_ERRLOG("invalid cpumask %s\n", req.cpu_mask);
|
|
|
|
spdk_jsonrpc_send_error_response(request, -EINVAL, spdk_strerror(EINVAL));
|
|
|
|
free_rpc_thread_create(&req);
|
|
|
|
free(cpu_set);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
sched_thread = calloc(1, sizeof(*sched_thread));
|
|
|
|
assert(sched_thread != NULL);
|
|
|
|
|
|
|
|
sched_thread->thread = spdk_thread_create(req.name, cpu_set);
|
|
|
|
assert(sched_thread->thread != NULL);
|
|
|
|
free(cpu_set);
|
|
|
|
|
|
|
|
sched_thread->request = request;
|
|
|
|
sched_thread->active_percent = req.active_percent;
|
|
|
|
|
|
|
|
spdk_thread_send_msg(sched_thread->thread, rpc_register_poller, sched_thread);
|
|
|
|
|
|
|
|
free_rpc_thread_create(&req);
|
|
|
|
|
|
|
|
pthread_mutex_lock(&g_sched_list_mutex);
|
|
|
|
TAILQ_INSERT_TAIL(&g_sched_threads, sched_thread, link);
|
|
|
|
pthread_mutex_unlock(&g_sched_list_mutex);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
SPDK_RPC_REGISTER("scheduler_thread_create", rpc_scheduler_thread_create, SPDK_RPC_RUNTIME)
|
|
|
|
|
|
|
|
struct rpc_thread_set_active_ctx {
|
|
|
|
int active_percent;
|
|
|
|
struct spdk_jsonrpc_request *request;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct rpc_thread_set_active {
|
|
|
|
uint64_t thread_id;
|
|
|
|
int active_percent;
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct spdk_json_object_decoder rpc_thread_set_active_decoders[] = {
|
|
|
|
{"thread_id", offsetof(struct rpc_thread_set_active, thread_id), spdk_json_decode_uint64},
|
|
|
|
{"active", offsetof(struct rpc_thread_set_active, active_percent), spdk_json_decode_uint64},
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
rpc_scheduler_thread_set_active_cb(void *arg)
|
|
|
|
{
|
|
|
|
struct rpc_thread_set_active_ctx *ctx = arg;
|
|
|
|
uint64_t thread_id;
|
|
|
|
struct sched_thread *sched_thread;
|
|
|
|
|
|
|
|
thread_id = spdk_thread_get_id(spdk_get_thread());
|
|
|
|
|
|
|
|
pthread_mutex_lock(&g_sched_list_mutex);
|
|
|
|
TAILQ_FOREACH(sched_thread, &g_sched_threads, link) {
|
|
|
|
if (spdk_thread_get_id(sched_thread->thread) == thread_id) {
|
|
|
|
sched_thread->active_percent = ctx->active_percent;
|
2021-09-27 12:47:37 +00:00
|
|
|
update_pollers(sched_thread);
|
2021-01-04 15:00:58 +00:00
|
|
|
pthread_mutex_unlock(&g_sched_list_mutex);
|
|
|
|
spdk_jsonrpc_send_bool_response(ctx->request, true);
|
|
|
|
free(ctx);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&g_sched_list_mutex);
|
|
|
|
|
|
|
|
spdk_jsonrpc_send_error_response(ctx->request, -ENOENT, spdk_strerror(ENOENT));
|
|
|
|
free(ctx);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
rpc_scheduler_thread_set_active(struct spdk_jsonrpc_request *request,
|
|
|
|
const struct spdk_json_val *params)
|
|
|
|
{
|
|
|
|
struct spdk_thread *thread;
|
|
|
|
struct rpc_thread_set_active req = {0};
|
|
|
|
struct rpc_thread_set_active_ctx *ctx;
|
|
|
|
|
|
|
|
if (spdk_json_decode_object(params, rpc_thread_set_active_decoders,
|
|
|
|
SPDK_COUNTOF(rpc_thread_set_active_decoders),
|
|
|
|
&req)) {
|
|
|
|
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS,
|
|
|
|
"Invalid parameters provided");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (req.active_percent < 0 || req.active_percent > 100) {
|
|
|
|
SPDK_ERRLOG("invalid percent value %d\n", req.active_percent);
|
|
|
|
spdk_jsonrpc_send_error_response(request, -EINVAL, spdk_strerror(EINVAL));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
thread = spdk_thread_get_by_id(req.thread_id);
|
|
|
|
if (thread == NULL) {
|
|
|
|
spdk_jsonrpc_send_error_response(request, -ENOENT, spdk_strerror(ENOENT));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx = calloc(1, sizeof(*ctx));
|
|
|
|
if (ctx == NULL) {
|
|
|
|
spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(-ENOMEM));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
ctx->request = request;
|
|
|
|
ctx->active_percent = req.active_percent;
|
|
|
|
|
|
|
|
spdk_thread_send_msg(thread, rpc_scheduler_thread_set_active_cb, ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
SPDK_RPC_REGISTER("scheduler_thread_set_active", rpc_scheduler_thread_set_active, SPDK_RPC_RUNTIME)
|
|
|
|
|
|
|
|
struct rpc_thread_delete_ctx {
|
|
|
|
struct spdk_jsonrpc_request *request;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct rpc_thread_delete {
|
|
|
|
uint64_t thread_id;
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct spdk_json_object_decoder rpc_thread_delete_decoders[] = {
|
|
|
|
{"thread_id", offsetof(struct rpc_thread_delete, thread_id), spdk_json_decode_uint64},
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
rpc_scheduler_thread_delete_cb(void *arg)
|
|
|
|
{
|
|
|
|
struct rpc_thread_delete_ctx *ctx = arg;
|
|
|
|
struct sched_thread *sched_thread;
|
|
|
|
uint64_t thread_id;
|
|
|
|
|
|
|
|
thread_id = spdk_thread_get_id(spdk_get_thread());
|
|
|
|
|
|
|
|
pthread_mutex_lock(&g_sched_list_mutex);
|
|
|
|
TAILQ_FOREACH(sched_thread, &g_sched_threads, link) {
|
|
|
|
if (spdk_thread_get_id(sched_thread->thread) == thread_id) {
|
|
|
|
thread_delete(sched_thread);
|
|
|
|
pthread_mutex_unlock(&g_sched_list_mutex);
|
|
|
|
spdk_jsonrpc_send_bool_response(ctx->request, true);
|
|
|
|
free(ctx);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&g_sched_list_mutex);
|
|
|
|
|
|
|
|
spdk_jsonrpc_send_error_response(ctx->request, -ENOENT, spdk_strerror(ENOENT));
|
|
|
|
free(ctx);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
rpc_scheduler_thread_delete(struct spdk_jsonrpc_request *request,
|
|
|
|
const struct spdk_json_val *params)
|
|
|
|
{
|
|
|
|
struct spdk_thread *thread;
|
|
|
|
struct rpc_thread_delete req = {0};
|
|
|
|
struct rpc_thread_delete_ctx *ctx;
|
|
|
|
|
|
|
|
if (spdk_json_decode_object(params, rpc_thread_delete_decoders,
|
|
|
|
SPDK_COUNTOF(rpc_thread_delete_decoders),
|
|
|
|
&req)) {
|
|
|
|
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS,
|
|
|
|
"Invalid parameters provided");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
thread = spdk_thread_get_by_id(req.thread_id);
|
|
|
|
if (thread == NULL) {
|
|
|
|
spdk_jsonrpc_send_error_response(request, -ENOENT, spdk_strerror(ENOENT));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx = calloc(1, sizeof(*ctx));
|
|
|
|
if (ctx == NULL) {
|
|
|
|
spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(-ENOMEM));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
ctx->request = request;
|
|
|
|
|
|
|
|
spdk_thread_send_msg(thread, rpc_scheduler_thread_delete_cb, ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
SPDK_RPC_REGISTER("scheduler_thread_delete", rpc_scheduler_thread_delete, SPDK_RPC_RUNTIME)
|
|
|
|
|
|
|
|
static void
|
|
|
|
test_shutdown(void)
|
|
|
|
{
|
|
|
|
g_is_running = false;
|
|
|
|
SPDK_NOTICELOG("Scheduler test application stopped.\n");
|
|
|
|
pthread_mutex_lock(&g_sched_list_mutex);
|
|
|
|
if (TAILQ_EMPTY(&g_sched_threads)) {
|
|
|
|
spdk_app_stop(0);
|
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&g_sched_list_mutex);
|
|
|
|
}
|
|
|
|
|
2021-10-19 19:10:01 +00:00
|
|
|
static void
|
|
|
|
for_each_nop(void *arg1, void *arg2)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
test/scheduler: make spdk_for_each_reactor test optional
(84ab68c) test/scheduler: add a for_each_reactor shutdown test
Patch above added regression test for handling spdk_for_each_reactor
during shutdown, by adding constant repeat of this operation
while application is running.
Reactor event processing (especially constant) has impact on the
reactor_interrupt_run(). spdk_fd_group_wait() will almost always
execute an event, skewing the results of scheduler test.
Reactor that should have been idle, will show active usage via
/proc/stat.
Fixes #1950
This patch makes this regression test optional, and enables it
only in test that does not measure CPU utilization from the system.
The ./test/event/scheduler/scheduler.sh is the only one where it is
enabled, as it's purpose is to verify the test scheduler application.
Remaining ./test/scheduler/*.sh tests do verify CPU utilization,
so the regression test is disabled in those.
Modified the for_each_done, to for_each_reactor_start, to better
reflect the intention.
On my system enabling spdk_for_each_reactor test flag on the
scheduler application with no threads (except app thread),
consumes ~20-25% CPU from every core in CPU mask.
Meanwhile disabling it, idle cores are 100% idle
and active cores spend 100% of CPU time in usr.
Change-Id: I40eda15a748e76b95dc5441144cd8931e46edee5
Signed-off-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/15210
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Konrad Sztyber <konrad.sztyber@intel.com>
2022-11-02 14:38:31 +00:00
|
|
|
for_each_reactor_start(void *arg1, void *arg2)
|
2021-10-19 19:10:01 +00:00
|
|
|
{
|
test/scheduler: make spdk_for_each_reactor test optional
(84ab68c) test/scheduler: add a for_each_reactor shutdown test
Patch above added regression test for handling spdk_for_each_reactor
during shutdown, by adding constant repeat of this operation
while application is running.
Reactor event processing (especially constant) has impact on the
reactor_interrupt_run(). spdk_fd_group_wait() will almost always
execute an event, skewing the results of scheduler test.
Reactor that should have been idle, will show active usage via
/proc/stat.
Fixes #1950
This patch makes this regression test optional, and enables it
only in test that does not measure CPU utilization from the system.
The ./test/event/scheduler/scheduler.sh is the only one where it is
enabled, as it's purpose is to verify the test scheduler application.
Remaining ./test/scheduler/*.sh tests do verify CPU utilization,
so the regression test is disabled in those.
Modified the for_each_done, to for_each_reactor_start, to better
reflect the intention.
On my system enabling spdk_for_each_reactor test flag on the
scheduler application with no threads (except app thread),
consumes ~20-25% CPU from every core in CPU mask.
Meanwhile disabling it, idle cores are 100% idle
and active cores spend 100% of CPU time in usr.
Change-Id: I40eda15a748e76b95dc5441144cd8931e46edee5
Signed-off-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/15210
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Konrad Sztyber <konrad.sztyber@intel.com>
2022-11-02 14:38:31 +00:00
|
|
|
spdk_for_each_reactor(for_each_nop, NULL, NULL, for_each_reactor_start);
|
2021-10-19 19:10:01 +00:00
|
|
|
}
|
|
|
|
|
2021-01-04 15:00:58 +00:00
|
|
|
static void
|
|
|
|
test_start(void *arg1)
|
|
|
|
{
|
|
|
|
SPDK_NOTICELOG("Scheduler test application started.\n");
|
2021-10-19 19:10:01 +00:00
|
|
|
/* Start an spdk_for_each_reactor operation that just keeps
|
|
|
|
* running over and over again until the app exits. This
|
|
|
|
* serves as a regression test for SPDK issue #2206, ensuring
|
|
|
|
* that any pending spdk_for_each_reactor operations are
|
|
|
|
* completed before reactors are shut down.
|
|
|
|
*/
|
test/scheduler: make spdk_for_each_reactor test optional
(84ab68c) test/scheduler: add a for_each_reactor shutdown test
Patch above added regression test for handling spdk_for_each_reactor
during shutdown, by adding constant repeat of this operation
while application is running.
Reactor event processing (especially constant) has impact on the
reactor_interrupt_run(). spdk_fd_group_wait() will almost always
execute an event, skewing the results of scheduler test.
Reactor that should have been idle, will show active usage via
/proc/stat.
Fixes #1950
This patch makes this regression test optional, and enables it
only in test that does not measure CPU utilization from the system.
The ./test/event/scheduler/scheduler.sh is the only one where it is
enabled, as it's purpose is to verify the test scheduler application.
Remaining ./test/scheduler/*.sh tests do verify CPU utilization,
so the regression test is disabled in those.
Modified the for_each_done, to for_each_reactor_start, to better
reflect the intention.
On my system enabling spdk_for_each_reactor test flag on the
scheduler application with no threads (except app thread),
consumes ~20-25% CPU from every core in CPU mask.
Meanwhile disabling it, idle cores are 100% idle
and active cores spend 100% of CPU time in usr.
Change-Id: I40eda15a748e76b95dc5441144cd8931e46edee5
Signed-off-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/15210
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Konrad Sztyber <konrad.sztyber@intel.com>
2022-11-02 14:38:31 +00:00
|
|
|
if (g_for_each_reactor) {
|
|
|
|
for_each_reactor_start(NULL, NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
scheduler_usage(void)
|
|
|
|
{
|
|
|
|
printf(" -f Enable spdk_for_each_reactor regression test\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
scheduler_parse_arg(int ch, char *arg)
|
|
|
|
{
|
|
|
|
switch (ch) {
|
|
|
|
case 'f':
|
|
|
|
g_for_each_reactor = true;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
return 0;
|
2021-01-04 15:00:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
main(int argc, char **argv)
|
|
|
|
{
|
|
|
|
struct spdk_app_opts opts;
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
spdk_app_opts_init(&opts, sizeof(opts));
|
|
|
|
opts.name = "scheduler";
|
|
|
|
opts.shutdown_cb = test_shutdown;
|
|
|
|
|
|
|
|
if ((rc = spdk_app_parse_args(argc, argv, &opts,
|
test/scheduler: make spdk_for_each_reactor test optional
(84ab68c) test/scheduler: add a for_each_reactor shutdown test
Patch above added regression test for handling spdk_for_each_reactor
during shutdown, by adding constant repeat of this operation
while application is running.
Reactor event processing (especially constant) has impact on the
reactor_interrupt_run(). spdk_fd_group_wait() will almost always
execute an event, skewing the results of scheduler test.
Reactor that should have been idle, will show active usage via
/proc/stat.
Fixes #1950
This patch makes this regression test optional, and enables it
only in test that does not measure CPU utilization from the system.
The ./test/event/scheduler/scheduler.sh is the only one where it is
enabled, as it's purpose is to verify the test scheduler application.
Remaining ./test/scheduler/*.sh tests do verify CPU utilization,
so the regression test is disabled in those.
Modified the for_each_done, to for_each_reactor_start, to better
reflect the intention.
On my system enabling spdk_for_each_reactor test flag on the
scheduler application with no threads (except app thread),
consumes ~20-25% CPU from every core in CPU mask.
Meanwhile disabling it, idle cores are 100% idle
and active cores spend 100% of CPU time in usr.
Change-Id: I40eda15a748e76b95dc5441144cd8931e46edee5
Signed-off-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/15210
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Konrad Sztyber <konrad.sztyber@intel.com>
2022-11-02 14:38:31 +00:00
|
|
|
"f", NULL, scheduler_parse_arg, scheduler_usage)) != SPDK_APP_PARSE_ARGS_SUCCESS) {
|
2021-01-04 15:00:58 +00:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = spdk_app_start(&opts, test_start, NULL);
|
|
|
|
|
|
|
|
spdk_app_fini();
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|