From 453b15d62d675a6e29ff42229951f12664bd7578 Mon Sep 17 00:00:00 2001 From: Tomasz Zawadzki Date: Mon, 4 Jan 2021 10:00:58 -0500 Subject: [PATCH] test/scheduler: application to test behaviour of schedulers This is an application designed to provide custom RPC interface for testing scheduler and governor implementations. Based on those RPCs scenarios later can be implemented to verify functionality. Tests added here verify just the app itself. Signed-off-by: Tomasz Zawadzki Change-Id: I74b81234b95d815864cf8264705d728e3a7fe309 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/5763 Community-CI: Broadcom CI Reviewed-by: Maciej Szwed Reviewed-by: Jim Harris Reviewed-by: Michal Berger Tested-by: SPDK CI Jenkins --- test/event/Makefile | 2 +- test/event/event.sh | 7 +- test/event/scheduler/.gitignore | 1 + test/event/scheduler/Makefile | 43 +++ test/event/scheduler/scheduler.c | 392 +++++++++++++++++++++++ test/event/scheduler/scheduler.sh | 40 +++ test/event/scheduler/scheduler_plugin.py | 41 +++ 7 files changed, 523 insertions(+), 3 deletions(-) create mode 100644 test/event/scheduler/.gitignore create mode 100644 test/event/scheduler/Makefile create mode 100644 test/event/scheduler/scheduler.c create mode 100755 test/event/scheduler/scheduler.sh create mode 100644 test/event/scheduler/scheduler_plugin.py diff --git a/test/event/Makefile b/test/event/Makefile index 4b9cab867..198fcd075 100644 --- a/test/event/Makefile +++ b/test/event/Makefile @@ -37,7 +37,7 @@ include $(SPDK_ROOT_DIR)/mk/spdk.common.mk DIRS-y = event_perf reactor reactor_perf ifeq ($(OS),Linux) -DIRS-y += app_repeat +DIRS-y += app_repeat scheduler endif .PHONY: all clean $(DIRS-y) diff --git a/test/event/event.sh b/test/event/event.sh index 3a681311c..1d90fc5dd 100755 --- a/test/event/event.sh +++ b/test/event/event.sh @@ -42,6 +42,9 @@ run_test "event_perf" $testdir/event_perf/event_perf -m 0xF -t 1 run_test "event_reactor" $testdir/reactor/reactor -t 1 run_test "event_reactor_perf" $testdir/reactor_perf/reactor_perf -t 1 -if [ $(uname -s) = Linux ] && modprobe -n nbd; then - run_test "app_repeat" app_repeat_test +if [ $(uname -s) = Linux ]; then + run_test "event_scheduler" $testdir/scheduler/scheduler.sh + if modprobe -n nbd; then + run_test "app_repeat" app_repeat_test + fi fi diff --git a/test/event/scheduler/.gitignore b/test/event/scheduler/.gitignore new file mode 100644 index 000000000..17e0b594c --- /dev/null +++ b/test/event/scheduler/.gitignore @@ -0,0 +1 @@ +scheduler diff --git a/test/event/scheduler/Makefile b/test/event/scheduler/Makefile new file mode 100644 index 000000000..13aa0ab94 --- /dev/null +++ b/test/event/scheduler/Makefile @@ -0,0 +1,43 @@ +# +# BSD LICENSE +# +# Copyright (c) Intel Corporation. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..) +include $(SPDK_ROOT_DIR)/mk/spdk.common.mk +include $(SPDK_ROOT_DIR)/mk/spdk.modules.mk + +APP = scheduler +C_SRCS := scheduler.c + +SPDK_LIB_LIST = $(ALL_MODULES_LIST) event_bdev conf + +include $(SPDK_ROOT_DIR)/mk/spdk.app.mk diff --git a/test/event/scheduler/scheduler.c b/test/event/scheduler/scheduler.c new file mode 100644 index 000000000..a9aa7ba58 --- /dev/null +++ b/test/event/scheduler/scheduler.c @@ -0,0 +1,392 @@ +/*- + * BSD LICENSE + * + * Copyright (c) Intel Corporation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "spdk/stdinc.h" + +#include "spdk/env.h" +#include "spdk/event.h" +#include "spdk/likely.h" +#include "spdk/json.h" +#include "spdk/jsonrpc.h" +#include "spdk/rpc.h" +#include "spdk/string.h" +#include "spdk/thread.h" + +static bool g_is_running = true; +pthread_mutex_t g_sched_list_mutex = PTHREAD_MUTEX_INITIALIZER; + +struct sched_thread { + struct spdk_thread *thread; + struct spdk_poller *poller; + int active_percent; + struct spdk_jsonrpc_request *request; + TAILQ_ENTRY(sched_thread) link; +}; + +static TAILQ_HEAD(, sched_thread) g_sched_threads = TAILQ_HEAD_INITIALIZER(g_sched_threads); + +struct rpc_thread_create { + int active_percent; + char *name; + char *cpu_mask; +}; + +static void +free_rpc_thread_create(struct rpc_thread_create *req) +{ + free(req->name); + free(req->cpu_mask); +} + +static const struct spdk_json_object_decoder rpc_thread_create_decoders[] = { + {"active", offsetof(struct rpc_thread_create, active_percent), spdk_json_decode_uint64}, + {"name", offsetof(struct rpc_thread_create, name), spdk_json_decode_string, true}, + {"cpu_mask", offsetof(struct rpc_thread_create, cpu_mask), spdk_json_decode_string, true}, +}; + +static void +rpc_scheduler_thread_create_cb(struct spdk_jsonrpc_request *request, uint64_t thread_id) +{ + struct spdk_json_write_ctx *w; + + w = spdk_jsonrpc_begin_result(request); + spdk_json_write_uint64(w, thread_id); + spdk_jsonrpc_end_result(request, w); +} + +static void +thread_delete(struct sched_thread *sched_thread) +{ + spdk_poller_unregister(&sched_thread->poller); + spdk_thread_exit(sched_thread->thread); + + TAILQ_REMOVE(&g_sched_threads, sched_thread, link); + free(sched_thread); + + if (!g_is_running && TAILQ_EMPTY(&g_sched_threads)) { + spdk_app_stop(0); + } +} + +static __thread unsigned int seed = 0; + +static int +poller_run(void *arg) +{ + struct sched_thread *sched_thread = arg; + + if (spdk_unlikely(!g_is_running)) { + pthread_mutex_lock(&g_sched_list_mutex); + thread_delete(sched_thread); + pthread_mutex_unlock(&g_sched_list_mutex); + return SPDK_POLLER_IDLE; + } + + spdk_delay_us(1); + + if ((sched_thread->active_percent == 100) || + (sched_thread->active_percent != 0 && (rand_r(&seed) % 100) < sched_thread->active_percent)) { + return SPDK_POLLER_BUSY; + } + + return SPDK_POLLER_IDLE; +} + +static void +rpc_register_poller(void *arg) +{ + struct sched_thread *sched_thread = arg; + + sched_thread->poller = spdk_poller_register_named(poller_run, sched_thread, 0, + spdk_thread_get_name(sched_thread->thread)); + assert(sched_thread->poller != NULL); + + if (sched_thread->request != NULL) { + rpc_scheduler_thread_create_cb(sched_thread->request, spdk_thread_get_id(sched_thread->thread)); + sched_thread->request = NULL; + } +} + +static void +rpc_scheduler_thread_create(struct spdk_jsonrpc_request *request, + const struct spdk_json_val *params) +{ + struct sched_thread *sched_thread; + struct rpc_thread_create req = {0}; + struct spdk_cpuset *cpu_set = NULL; + int rc = 0; + + if (spdk_json_decode_object(params, rpc_thread_create_decoders, + SPDK_COUNTOF(rpc_thread_create_decoders), + &req)) { + spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, + "Invalid parameters provided"); + return; + } + + if (req.active_percent < 0 || req.active_percent > 100) { + SPDK_ERRLOG("invalid percent value %d\n", req.active_percent); + spdk_jsonrpc_send_error_response(request, -EINVAL, spdk_strerror(EINVAL)); + free_rpc_thread_create(&req); + return; + } + + if (req.cpu_mask != NULL) { + cpu_set = calloc(1, sizeof(*cpu_set)); + assert(cpu_set != NULL); + rc = spdk_cpuset_parse(cpu_set, req.cpu_mask); + if (rc < 0) { + SPDK_ERRLOG("invalid cpumask %s\n", req.cpu_mask); + spdk_jsonrpc_send_error_response(request, -EINVAL, spdk_strerror(EINVAL)); + free_rpc_thread_create(&req); + free(cpu_set); + return; + } + } + + sched_thread = calloc(1, sizeof(*sched_thread)); + assert(sched_thread != NULL); + + sched_thread->thread = spdk_thread_create(req.name, cpu_set); + assert(sched_thread->thread != NULL); + free(cpu_set); + + sched_thread->request = request; + sched_thread->active_percent = req.active_percent; + + spdk_thread_send_msg(sched_thread->thread, rpc_register_poller, sched_thread); + + free_rpc_thread_create(&req); + + pthread_mutex_lock(&g_sched_list_mutex); + TAILQ_INSERT_TAIL(&g_sched_threads, sched_thread, link); + pthread_mutex_unlock(&g_sched_list_mutex); + + return; +} + +SPDK_RPC_REGISTER("scheduler_thread_create", rpc_scheduler_thread_create, SPDK_RPC_RUNTIME) + +struct rpc_thread_set_active_ctx { + int active_percent; + struct spdk_jsonrpc_request *request; +}; + +struct rpc_thread_set_active { + uint64_t thread_id; + int active_percent; +}; + +static const struct spdk_json_object_decoder rpc_thread_set_active_decoders[] = { + {"thread_id", offsetof(struct rpc_thread_set_active, thread_id), spdk_json_decode_uint64}, + {"active", offsetof(struct rpc_thread_set_active, active_percent), spdk_json_decode_uint64}, +}; + +static void +rpc_scheduler_thread_set_active_cb(void *arg) +{ + struct rpc_thread_set_active_ctx *ctx = arg; + uint64_t thread_id; + struct sched_thread *sched_thread; + + thread_id = spdk_thread_get_id(spdk_get_thread()); + + pthread_mutex_lock(&g_sched_list_mutex); + TAILQ_FOREACH(sched_thread, &g_sched_threads, link) { + if (spdk_thread_get_id(sched_thread->thread) == thread_id) { + sched_thread->active_percent = ctx->active_percent; + pthread_mutex_unlock(&g_sched_list_mutex); + spdk_jsonrpc_send_bool_response(ctx->request, true); + free(ctx); + return; + } + } + pthread_mutex_unlock(&g_sched_list_mutex); + + spdk_jsonrpc_send_error_response(ctx->request, -ENOENT, spdk_strerror(ENOENT)); + free(ctx); + return; +} + +static void +rpc_scheduler_thread_set_active(struct spdk_jsonrpc_request *request, + const struct spdk_json_val *params) +{ + struct spdk_thread *thread; + struct rpc_thread_set_active req = {0}; + struct rpc_thread_set_active_ctx *ctx; + + if (spdk_json_decode_object(params, rpc_thread_set_active_decoders, + SPDK_COUNTOF(rpc_thread_set_active_decoders), + &req)) { + spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, + "Invalid parameters provided"); + return; + } + + if (req.active_percent < 0 || req.active_percent > 100) { + SPDK_ERRLOG("invalid percent value %d\n", req.active_percent); + spdk_jsonrpc_send_error_response(request, -EINVAL, spdk_strerror(EINVAL)); + return; + } + + thread = spdk_thread_get_by_id(req.thread_id); + if (thread == NULL) { + spdk_jsonrpc_send_error_response(request, -ENOENT, spdk_strerror(ENOENT)); + return; + } + + ctx = calloc(1, sizeof(*ctx)); + if (ctx == NULL) { + spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(-ENOMEM)); + return; + } + ctx->request = request; + ctx->active_percent = req.active_percent; + + spdk_thread_send_msg(thread, rpc_scheduler_thread_set_active_cb, ctx); +} + +SPDK_RPC_REGISTER("scheduler_thread_set_active", rpc_scheduler_thread_set_active, SPDK_RPC_RUNTIME) + +struct rpc_thread_delete_ctx { + struct spdk_jsonrpc_request *request; +}; + +struct rpc_thread_delete { + uint64_t thread_id; +}; + +static const struct spdk_json_object_decoder rpc_thread_delete_decoders[] = { + {"thread_id", offsetof(struct rpc_thread_delete, thread_id), spdk_json_decode_uint64}, +}; + +static void +rpc_scheduler_thread_delete_cb(void *arg) +{ + struct rpc_thread_delete_ctx *ctx = arg; + struct sched_thread *sched_thread; + uint64_t thread_id; + + thread_id = spdk_thread_get_id(spdk_get_thread()); + + pthread_mutex_lock(&g_sched_list_mutex); + TAILQ_FOREACH(sched_thread, &g_sched_threads, link) { + if (spdk_thread_get_id(sched_thread->thread) == thread_id) { + thread_delete(sched_thread); + pthread_mutex_unlock(&g_sched_list_mutex); + spdk_jsonrpc_send_bool_response(ctx->request, true); + free(ctx); + return; + } + } + pthread_mutex_unlock(&g_sched_list_mutex); + + spdk_jsonrpc_send_error_response(ctx->request, -ENOENT, spdk_strerror(ENOENT)); + free(ctx); + return; +} + +static void +rpc_scheduler_thread_delete(struct spdk_jsonrpc_request *request, + const struct spdk_json_val *params) +{ + struct spdk_thread *thread; + struct rpc_thread_delete req = {0}; + struct rpc_thread_delete_ctx *ctx; + + if (spdk_json_decode_object(params, rpc_thread_delete_decoders, + SPDK_COUNTOF(rpc_thread_delete_decoders), + &req)) { + spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, + "Invalid parameters provided"); + return; + } + + thread = spdk_thread_get_by_id(req.thread_id); + if (thread == NULL) { + spdk_jsonrpc_send_error_response(request, -ENOENT, spdk_strerror(ENOENT)); + return; + } + + ctx = calloc(1, sizeof(*ctx)); + if (ctx == NULL) { + spdk_jsonrpc_send_error_response(request, -ENOMEM, spdk_strerror(-ENOMEM)); + return; + } + ctx->request = request; + + spdk_thread_send_msg(thread, rpc_scheduler_thread_delete_cb, ctx); +} + +SPDK_RPC_REGISTER("scheduler_thread_delete", rpc_scheduler_thread_delete, SPDK_RPC_RUNTIME) + +static void +test_shutdown(void) +{ + g_is_running = false; + SPDK_NOTICELOG("Scheduler test application stopped.\n"); + pthread_mutex_lock(&g_sched_list_mutex); + if (TAILQ_EMPTY(&g_sched_threads)) { + spdk_app_stop(0); + } + pthread_mutex_unlock(&g_sched_list_mutex); +} + +static void +test_start(void *arg1) +{ + SPDK_NOTICELOG("Scheduler test application started.\n"); +} + +int +main(int argc, char **argv) +{ + struct spdk_app_opts opts; + int rc = 0; + + spdk_app_opts_init(&opts, sizeof(opts)); + opts.name = "scheduler"; + opts.shutdown_cb = test_shutdown; + + if ((rc = spdk_app_parse_args(argc, argv, &opts, + NULL, NULL, NULL, NULL)) != SPDK_APP_PARSE_ARGS_SUCCESS) { + return rc; + } + + rc = spdk_app_start(&opts, test_start, NULL); + + spdk_app_fini(); + + return rc; +} diff --git a/test/event/scheduler/scheduler.sh b/test/event/scheduler/scheduler.sh new file mode 100755 index 000000000..00a36125b --- /dev/null +++ b/test/event/scheduler/scheduler.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash + +testdir=$(readlink -f $(dirname $0)) +rootdir=$(readlink -f $testdir/../../..) +source $rootdir/test/common/autotest_common.sh + +function scheduler_create_thread() { + $rpc --plugin scheduler_plugin scheduler_thread_create -n active_pinned -m 0x1 -a 100 + $rpc --plugin scheduler_plugin scheduler_thread_create -n active_pinned -m 0x2 -a 100 + $rpc --plugin scheduler_plugin scheduler_thread_create -n active_pinned -m 0x4 -a 100 + $rpc --plugin scheduler_plugin scheduler_thread_create -n active_pinned -m 0x8 -a 100 + $rpc --plugin scheduler_plugin scheduler_thread_create -n idle_pinned -m 0x1 -a 0 + $rpc --plugin scheduler_plugin scheduler_thread_create -n idle_pinned -m 0x2 -a 0 + $rpc --plugin scheduler_plugin scheduler_thread_create -n idle_pinned -m 0x4 -a 0 + $rpc --plugin scheduler_plugin scheduler_thread_create -n idle_pinned -m 0x8 -a 0 + + $rpc --plugin scheduler_plugin scheduler_thread_create -n one_third_active -a 30 + thread_id=$($rpc --plugin scheduler_plugin scheduler_thread_create -n half_active -a 0) + $rpc --plugin scheduler_plugin scheduler_thread_set_active $thread_id 50 + + thread_id=$($rpc --plugin scheduler_plugin scheduler_thread_create -n deleted -a 100) + $rpc --plugin scheduler_plugin scheduler_thread_delete $thread_id +} + +export PYTHONPATH=$testdir +rpc="$rootdir/scripts/rpc.py" + +$testdir/scheduler -m 0xF -p 0x2 --wait-for-rpc & +scheduler_pid=$! +trap 'killprocess $scheduler_pid; exit 1' SIGINT SIGTERM EXIT +waitforlisten $scheduler_pid + +$rpc framework_set_scheduler dynamic +$rpc framework_start_init + +# basic integrity test +run_test "scheduler_create_thread" scheduler_create_thread + +trap - SIGINT SIGTERM EXIT +killprocess $scheduler_pid diff --git a/test/event/scheduler/scheduler_plugin.py b/test/event/scheduler/scheduler_plugin.py new file mode 100644 index 000000000..46e79070c --- /dev/null +++ b/test/event/scheduler/scheduler_plugin.py @@ -0,0 +1,41 @@ +from rpc.client import print_json + + +def thread_create(args): + params = {'active': args.active} + if args.name: + params['name'] = args.name + if args.cpu_mask: + params['cpu_mask'] = args.cpu_mask + return args.client.call('scheduler_thread_create', params) + + +def create_thread(args): + print_json(thread_create(args)) + + +def thread_set_active(args): + params = {'thread_id': args.thread_id, 'active': args.active} + return args.client.call('scheduler_thread_set_active', params) + + +def thread_delete(args): + params = {'thread_id': args.thread_id} + return args.client.call('scheduler_thread_delete', params) + + +def spdk_rpc_plugin_initialize(subparsers): + p = subparsers.add_parser('scheduler_thread_create', help='Create spdk thread') + p.add_argument('-n', '--name', help='Name of spdk thread and poller') + p.add_argument('-m', '--cpu_mask', help='CPU mask for spdk thread') + p.add_argument('-a', '--active', help='Percent of time thread is active', type=int) + p.set_defaults(func=create_thread) + + p = subparsers.add_parser('scheduler_thread_set_active', help='Change percent of time the spdk thread is active') + p.add_argument('thread_id', help='spdk_thread id', type=int) + p.add_argument('active', help='Percent of time thread is active', type=int) + p.set_defaults(func=thread_set_active) + + p = subparsers.add_parser('scheduler_thread_delete', help='Delete spdk thread') + p.add_argument('thread_id', help='spdk_thread id', type=int) + p.set_defaults(func=thread_delete)