scripts/setup: Don't overwrite already allocated number of hugepages

Don't touch existing hp allocations when they already meet the NRHUGE
requirement. Introduce new environment switch, SHRINK_HUGE, to
override that new behavior.

Signed-off-by: Michal Berger <michal.berger@intel.com>
Change-Id: I0cd124b98c3deb7c21b4fbd57529d0995978c2e5
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/14912
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Michal Berger 2022-10-11 09:27:39 +02:00 committed by Tomasz Zawadzki
parent c77b537786
commit 28bfb8763b
2 changed files with 27 additions and 0 deletions

View File

@ -57,6 +57,9 @@ function usage() {
echo " 2048 pages for node0, 512 for node1 and default NRHUGE for node2."
echo "HUGEPGSZ Size of the hugepages to use in kB. If not set, kernel's default"
echo " setting is used."
echo "SHRINK_HUGE If set to 'yes', hugepages allocation won't be skipped in case"
echo " number of requested hugepages is lower from what's already"
echo " allocated. Doesn't apply when HUGE_EVEN_ALLOC is in use."
echo "CLEAR_HUGE If set to 'yes', the attempt to remove hugepages from all nodes will"
echo " be made prior to allocation".
echo "PCI_ALLOWED"
@ -467,6 +470,13 @@ check_hugepages_alloc() {
local hp_int=$1
local allocated_hugepages
allocated_hugepages=$(< "$hp_int")
if ((NRHUGE <= allocated_hugepages)) && [[ $SHRINK_HUGE != yes ]]; then
echo "INFO: Requested $NRHUGE hugepages but $allocated_hugepages already allocated ${2:+on node$2}"
return 0
fi
echo $((NRHUGE < 0 ? 0 : NRHUGE)) > "$hp_int"
allocated_hugepages=$(< "$hp_int")

View File

@ -186,6 +186,22 @@ custom_alloc() {
nr_hugepages=$_nr_hugepages verify_nr_hugepages
}
no_shrink_alloc() {
# Defalut HUGEMEM (2G) alloc on node0
# attempt to shrink by half: 2G should remain
get_test_nr_hugepages $((2048 * 1024)) 0
# Verify the default first
setup
verify_nr_hugepages
# Now attempt to shrink the hp number
CLEAR_HUGE=no NRHUGE=$((nr_hugepages / 2)) setup
# 2G should remain
verify_nr_hugepages
}
get_nodes
clear_hp
@ -194,5 +210,6 @@ run_test "per_node_2G_alloc" per_node_2G_alloc
run_test "even_2G_alloc" even_2G_alloc
run_test "odd_alloc" odd_alloc
run_test "custom_alloc" custom_alloc
run_test "no_shrink_alloc" no_shrink_alloc
clear_hp