Spdk/test/setup/hugepages.sh
paul luse eb53c23236 add (c) and SPDX header to bash files as needed
per Intel policy to include file commit date using git cmd
below.  The policy does not apply to non-Intel (C) notices.

git log --follow -C90% --format=%ad --date default <file> | tail -1

and then pull just the year from the result.

Intel copyrights were not added to files where Intel either had
no contribution ot the contribution lacked substance (ie license
header updates, formatting changes, etc)

For intel copyrights added, --follow and -C95% were used.

Signed-off-by: paul luse <paul.e.luse@intel.com>
Change-Id: I2ef86976095b88a9bf5b1003e59f3943cd6bbe4c
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/15209
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Mellanox Build Bot
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Krzysztof Karas <krzysztof.karas@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
2022-11-29 08:27:51 +00:00

220 lines
5.2 KiB
Bash
Executable File

#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (C) 2020 Intel Corporation
# All rights reserved.
#
testdir=$(readlink -f "$(dirname "$0")")
rootdir=$(readlink -f "$testdir/../../")
source "$testdir/common.sh"
shopt -s extglob nullglob
declare -a nodes_sys=()
declare -i default_hugepages=0
declare -i no_nodes=0
declare -i nr_hugepages=0
default_hugepages=$(get_meminfo Hugepagesize)
default_huge_nr=/sys/kernel/mm/hugepages/hugepages-${default_hugepages}kB/nr_hugepages
global_huge_nr=/proc/sys/vm/nr_hugepages
# Make sure environment doesn't affect the tests
unset -v HUGE_EVEN_ALLOC
unset -v HUGEMEM
unset -v HUGENODE
unset -v NRHUGE
get_nodes() {
local node
for node in /sys/devices/system/node/node+([0-9]); do
nodes_sys[${node##*node}]=$(< "$node/hugepages/hugepages-${default_hugepages}kB/nr_hugepages")
done
no_nodes=${#nodes_sys[@]}
((no_nodes > 0))
}
clear_hp() {
local node hp
for node in "${!nodes_sys[@]}"; do
for hp in "/sys/devices/system/node/node$node/hugepages/hugepages-"*; do
echo 0 > "$hp/nr_hugepages"
done
done
export CLEAR_HUGE=yes
}
get_test_nr_hugepages() {
local size=$1 # kB
if (($# > 1)); then
shift
local node_ids=("$@")
fi
((size >= default_hugepages))
nr_hugepages=$(((size + default_hugepages - 1) / default_hugepages))
get_test_nr_hugepages_per_node "${node_ids[@]}"
}
get_test_nr_hugepages_per_node() {
local user_nodes=("$@")
local _nr_hugepages=$nr_hugepages
local _no_nodes=$no_nodes
local -g nodes_test=()
if ((${#user_nodes[@]} > 0)); then
for _no_nodes in "${user_nodes[@]}"; do
nodes_test[_no_nodes]=$nr_hugepages
done
return 0
elif ((${#nodes_hp[@]} > 0)); then
for _no_nodes in "${!nodes_hp[@]}"; do
nodes_test[_no_nodes]=${nodes_hp[_no_nodes]}
done
return 0
fi
while ((_no_nodes > 0)); do
nodes_test[_no_nodes - 1]=$((_nr_hugepages / _no_nodes))
: $((_nr_hugepages -= nodes_test[_no_nodes - 1]))
: $((--_no_nodes))
done
}
verify_nr_hugepages() {
local node
local sorted_t
local sorted_s
local surp
local resv
local anon
if [[ $(< /sys/kernel/mm/transparent_hugepage/enabled) != *"[never]"* ]]; then
anon=$(get_meminfo AnonHugePages)
fi
surp=$(get_meminfo HugePages_Surp)
resv=$(get_meminfo HugePages_Rsvd)
echo "nr_hugepages=$nr_hugepages"
echo "resv_hugepages=$resv"
echo "surplus_hugepages=$surp"
echo "anon_hugepages=${anon:-disabled}"
(($(< "$default_huge_nr") == nr_hugepages + surp + resv))
# This knob doesn't account for the surp, resv hugepages
(($(< "$global_huge_nr") == nr_hugepages))
(($(get_meminfo HugePages_Total) == nr_hugepages + surp + resv))
get_nodes
# Take global resv and per-node surplus hugepages into account
for node in "${!nodes_test[@]}"; do
((nodes_test[node] += resv))
((nodes_test[node] += $(get_meminfo HugePages_Surp "$node")))
done
# There's no obvious way of determining which NUMA node is going to end
# up with an odd number of hugepages in case such number was actually
# allocated by the kernel. Considering that, let's simply check if our
# expectation is met by sorting and comparing it with nr of hugepages that
# was actually allocated on each node.
for node in "${!nodes_test[@]}"; do
sorted_t[nodes_test[node]]=1 sorted_s[nodes_sys[node]]=1
echo "node$node=${nodes_sys[node]} expecting ${nodes_test[node]}"
done
[[ ${!sorted_s[*]} == "${!sorted_t[*]}" ]]
}
# Test cases
default_setup() {
# Default HUGEMEM (2G) alloc on node0
get_test_nr_hugepages $((2048 * 1024)) 0
setup
verify_nr_hugepages
}
per_node_2G_alloc() {
# 2G alloc per node, total N*2G pages
local IFS=","
get_test_nr_hugepages $((2048 * 1024)) "${!nodes_sys[@]}"
NRHUGE=$nr_hugepages HUGENODE="${!nodes_sys[*]}" setup
nr_hugepages=$((nr_hugepages * ${#nodes_sys[@]})) verify_nr_hugepages
}
even_2G_alloc() {
# 2G alloc spread across N nodes
get_test_nr_hugepages $((2048 * 1024))
NRHUGE=$nr_hugepages HUGE_EVEN_ALLOC=yes setup
verify_nr_hugepages
}
odd_alloc() {
# Odd 2049MB alloc across N nodes
get_test_nr_hugepages $((2049 * 1024))
HUGEMEM=2049 HUGE_EVEN_ALLOC=yes setup
verify_nr_hugepages
}
custom_alloc() {
# Custom alloc: node0 == 1GB [node1 == 2 GB]
local IFS=","
local node
local nodes_hp=()
local nr_hugepages=0 _nr_hugepages=0
get_test_nr_hugepages $((1024 * 1024))
nodes_hp[0]=$nr_hugepages
if ((${#nodes_sys[@]} > 1)); then
get_test_nr_hugepages $((2048 * 1024))
nodes_hp[1]=$nr_hugepages
fi
for node in "${!nodes_hp[@]}"; do
HUGENODE+=("nodes_hp[$node]=${nodes_hp[node]}")
((_nr_hugepages += nodes_hp[node]))
done
get_test_nr_hugepages_per_node
HUGENODE="${HUGENODE[*]}" setup
nr_hugepages=$_nr_hugepages verify_nr_hugepages
}
no_shrink_alloc() {
# Defalut HUGEMEM (2G) alloc on node0
# attempt to shrink by half: 2G should remain
get_test_nr_hugepages $((2048 * 1024)) 0
# Verify the default first
setup
verify_nr_hugepages
# Now attempt to shrink the hp number
CLEAR_HUGE=no NRHUGE=$((nr_hugepages / 2)) setup
# 2G should remain
verify_nr_hugepages
}
get_nodes
clear_hp
run_test "default_setup" default_setup
run_test "per_node_2G_alloc" per_node_2G_alloc
run_test "even_2G_alloc" even_2G_alloc
run_test "odd_alloc" odd_alloc
run_test "custom_alloc" custom_alloc
run_test "no_shrink_alloc" no_shrink_alloc
clear_hp