From e5ba4daa865c47df9b7164172c7a86712f9ba1cd Mon Sep 17 00:00:00 2001 From: Michal Berger Date: Tue, 7 Dec 2021 11:51:36 +0100 Subject: [PATCH] test/scheduler: Use separete cgroup for the tests Two cgroups are created prior running the scheduler tests: - /cpuset/spdk - /cpuset/all /cpuset/spdk is the cgroup dedicated for the tests, i.e., the SPDK processes executed along the way. The resources consist of the cpus that are initially picked up by isolate_cores.sh. /cpuset/all is the "dummy" cgroup that holds most of the remaining processes that run on the target system - "most" since not every process (especially kernel threads) can be migrated between cgroups. This cgroup's resources include all the online cpus except those selected for the /cpuset/spdk. This should allow for lowering the noise on the target SPDK's cpus and make sure that load on each cpu is generated exclusively by the SPDK. Fixes issue #1950 Signed-off-by: Michal Berger Change-Id: Ic45149f55052ff03bead0b9bea086f95c87ea75d Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/10584 Tested-by: SPDK CI Jenkins Community-CI: Broadcom CI Reviewed-by: Konrad Sztyber Reviewed-by: Tomasz Zawadzki --- test/scheduler/cgroups.sh | 6 ++++++ test/scheduler/common.sh | 2 +- test/scheduler/isolate_cores.sh | 22 ++++++++++++++++++++++ 3 files changed, 29 insertions(+), 1 deletion(-) diff --git a/test/scheduler/cgroups.sh b/test/scheduler/cgroups.sh index 445946cc0..8a3ebbf5a 100644 --- a/test/scheduler/cgroups.sh +++ b/test/scheduler/cgroups.sh @@ -142,5 +142,11 @@ kill_in_cgroup() { fi } +remove_cpuset_cgroup() { + if ((cgroup_version == 2)); then + remove_cgroup /cpuset + fi +} + declare -r sysfs_cgroup=/sys/fs/cgroup cgroup_version=$(check_cgroup) diff --git a/test/scheduler/common.sh b/test/scheduler/common.sh index 11deca08f..f5ac5847c 100644 --- a/test/scheduler/common.sh +++ b/test/scheduler/common.sh @@ -379,7 +379,7 @@ exec_under_dynamic_scheduler() { if [[ -e /proc/$spdk_pid/status ]]; then killprocess "$spdk_pid" fi - "$@" --wait-for-rpc & + exec_in_cgroup "/cpuset/spdk" "$@" --wait-for-rpc & spdk_pid=$! # Give some time for the app to init itself waitforlisten "$spdk_pid" diff --git a/test/scheduler/isolate_cores.sh b/test/scheduler/isolate_cores.sh index 0e806b414..5b272c1e7 100644 --- a/test/scheduler/isolate_cores.sh +++ b/test/scheduler/isolate_cores.sh @@ -3,9 +3,21 @@ xtrace_disable source "$testdir/common.sh" +restore_cgroups() { + xtrace_disable + kill_in_cgroup "/cpuset/spdk" + remove_cgroup "/cpuset/spdk" + remove_cgroup "/cpuset/all" + remove_cpuset_cgroup + xtrace_restore +} + +trap "restore_cgroups" EXIT + # Number of cpus to include in the mask NUM_CPUS=${NUM_CPUS:-8} +init_cpuset_cgroup map_cpus # Build core mask. Avoid all CPUs that may be offline and skip cpu0 @@ -33,6 +45,16 @@ filter_allowed_list all_cpus=("${allowed[@]}") all_cpus_csv=$(fold_array_onto_string "${all_cpus[@]}") all_cpumask=$(mask_cpus "${all_cpus[@]}") +all_cpus_mems=0 + +# Pin spdk cores to a new cgroup +create_cgroup "/cpuset/spdk" +create_cgroup "/cpuset/all" +set_cgroup_attr "/cpuset/spdk" cpuset.cpus "$spdk_cpus_csv" +set_cgroup_attr "/cpuset/spdk" cpuset.mems "$spdk_cpus_mems" +set_cgroup_attr "/cpuset/all" cpuset.cpus "$all_cpus_csv" +set_cgroup_attr "/cpuset/all" cpuset.mems "$all_cpus_mems" +move_cgroup_procs "/cpuset" "/cpuset/all" export \ "spdk_cpumask=$spdk_cpumask" \