Spdk/test/setup/hugepages.sh
Michal Berger 45dc5f12ff test/setup: Lower per_node_2G_alloc to per_node_1G_alloc
For some systems, depending on the mem distribution, 2GB per node may be too much. Just lower it to 1GB as the only thing we are verifying here
is HUGENODE usage in this test.

Also, make sure we always log output from setup.sh.

Change-Id: If7c382b442e9523fe5ad5df03110a8cc5467f1d9
Signed-off-by: Michal Berger <michal.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/17367
Reviewed-by: Jaroslaw Chachulski <jaroslawx.chachulski@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Konrad Sztyber <konrad.sztyber@intel.com>
2023-03-30 07:01:43 +00:00

220 lines
5.2 KiB
Bash
Executable File

#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (C) 2020 Intel Corporation
# All rights reserved.
#
testdir=$(readlink -f "$(dirname "$0")")
rootdir=$(readlink -f "$testdir/../../")
source "$testdir/common.sh"
shopt -s extglob nullglob
declare -a nodes_sys=()
declare -i default_hugepages=0
declare -i no_nodes=0
declare -i nr_hugepages=0
default_hugepages=$(get_meminfo Hugepagesize)
default_huge_nr=/sys/kernel/mm/hugepages/hugepages-${default_hugepages}kB/nr_hugepages
global_huge_nr=/proc/sys/vm/nr_hugepages
# Make sure environment doesn't affect the tests
unset -v HUGE_EVEN_ALLOC
unset -v HUGEMEM
unset -v HUGENODE
unset -v NRHUGE
get_nodes() {
local node
for node in /sys/devices/system/node/node+([0-9]); do
nodes_sys[${node##*node}]=$(< "$node/hugepages/hugepages-${default_hugepages}kB/nr_hugepages")
done
no_nodes=${#nodes_sys[@]}
((no_nodes > 0))
}
clear_hp() {
local node hp
for node in "${!nodes_sys[@]}"; do
for hp in "/sys/devices/system/node/node$node/hugepages/hugepages-"*; do
echo 0 > "$hp/nr_hugepages"
done
done
export CLEAR_HUGE=yes
}
get_test_nr_hugepages() {
local size=$1 # kB
if (($# > 1)); then
shift
local node_ids=("$@")
fi
((size >= default_hugepages))
nr_hugepages=$(((size + default_hugepages - 1) / default_hugepages))
get_test_nr_hugepages_per_node "${node_ids[@]}"
}
get_test_nr_hugepages_per_node() {
local user_nodes=("$@")
local _nr_hugepages=$nr_hugepages
local _no_nodes=$no_nodes
local -g nodes_test=()
if ((${#user_nodes[@]} > 0)); then
for _no_nodes in "${user_nodes[@]}"; do
nodes_test[_no_nodes]=$nr_hugepages
done
return 0
elif ((${#nodes_hp[@]} > 0)); then
for _no_nodes in "${!nodes_hp[@]}"; do
nodes_test[_no_nodes]=${nodes_hp[_no_nodes]}
done
return 0
fi
while ((_no_nodes > 0)); do
nodes_test[_no_nodes - 1]=$((_nr_hugepages / _no_nodes))
: $((_nr_hugepages -= nodes_test[_no_nodes - 1]))
: $((--_no_nodes))
done
}
verify_nr_hugepages() {
local node
local sorted_t
local sorted_s
local surp
local resv
local anon
if [[ $(< /sys/kernel/mm/transparent_hugepage/enabled) != *"[never]"* ]]; then
anon=$(get_meminfo AnonHugePages)
fi
surp=$(get_meminfo HugePages_Surp)
resv=$(get_meminfo HugePages_Rsvd)
echo "nr_hugepages=$nr_hugepages"
echo "resv_hugepages=$resv"
echo "surplus_hugepages=$surp"
echo "anon_hugepages=${anon:-disabled}"
(($(< "$default_huge_nr") == nr_hugepages + surp + resv))
# This knob doesn't account for the surp, resv hugepages
(($(< "$global_huge_nr") == nr_hugepages))
(($(get_meminfo HugePages_Total) == nr_hugepages + surp + resv))
get_nodes
# Take global resv and per-node surplus hugepages into account
for node in "${!nodes_test[@]}"; do
((nodes_test[node] += resv))
((nodes_test[node] += $(get_meminfo HugePages_Surp "$node")))
done
# There's no obvious way of determining which NUMA node is going to end
# up with an odd number of hugepages in case such number was actually
# allocated by the kernel. Considering that, let's simply check if our
# expectation is met by sorting and comparing it with nr of hugepages that
# was actually allocated on each node.
for node in "${!nodes_test[@]}"; do
sorted_t[nodes_test[node]]=1 sorted_s[nodes_sys[node]]=1
echo "node$node=${nodes_sys[node]} expecting ${nodes_test[node]}"
done
[[ ${!sorted_s[*]} == "${!sorted_t[*]}" ]]
}
# Test cases
default_setup() {
# Default HUGEMEM (2G) alloc on node0
get_test_nr_hugepages $((2048 * 1024)) 0
setup output
verify_nr_hugepages
}
per_node_1G_alloc() {
# 1G alloc per node, total N*1G pages
local IFS=","
get_test_nr_hugepages $((1024 * 1024)) "${!nodes_sys[@]}"
NRHUGE=$nr_hugepages HUGENODE="${!nodes_sys[*]}" setup output
nr_hugepages=$((nr_hugepages * ${#nodes_sys[@]})) verify_nr_hugepages
}
even_2G_alloc() {
# 2G alloc spread across N nodes
get_test_nr_hugepages $((2048 * 1024))
NRHUGE=$nr_hugepages HUGE_EVEN_ALLOC=yes setup output
verify_nr_hugepages
}
odd_alloc() {
# Odd 2049MB alloc across N nodes
get_test_nr_hugepages $((2049 * 1024))
HUGEMEM=2049 HUGE_EVEN_ALLOC=yes setup output
verify_nr_hugepages
}
custom_alloc() {
# Custom alloc: node0 == 1GB [node1 == 2 GB]
local IFS=","
local node
local nodes_hp=()
local nr_hugepages=0 _nr_hugepages=0
get_test_nr_hugepages $((1024 * 1024))
nodes_hp[0]=$nr_hugepages
if ((${#nodes_sys[@]} > 1)); then
get_test_nr_hugepages $((2048 * 1024))
nodes_hp[1]=$nr_hugepages
fi
for node in "${!nodes_hp[@]}"; do
HUGENODE+=("nodes_hp[$node]=${nodes_hp[node]}")
((_nr_hugepages += nodes_hp[node]))
done
get_test_nr_hugepages_per_node
HUGENODE="${HUGENODE[*]}" setup output
nr_hugepages=$_nr_hugepages verify_nr_hugepages
}
no_shrink_alloc() {
# Defalut HUGEMEM (2G) alloc on node0
# attempt to shrink by half: 2G should remain
get_test_nr_hugepages $((2048 * 1024)) 0
# Verify the default first
setup output
verify_nr_hugepages
# Now attempt to shrink the hp number
CLEAR_HUGE=no NRHUGE=$((nr_hugepages / 2)) setup output
# 2G should remain
verify_nr_hugepages
}
get_nodes
clear_hp
run_test "default_setup" default_setup
run_test "per_node_1G_alloc" per_node_1G_alloc
run_test "even_2G_alloc" even_2G_alloc
run_test "odd_alloc" odd_alloc
run_test "custom_alloc" custom_alloc
run_test "no_shrink_alloc" no_shrink_alloc
clear_hp