From 28bfb8763bf72cea49497dadfe9342d6a1e9ad85 Mon Sep 17 00:00:00 2001 From: Michal Berger Date: Tue, 11 Oct 2022 09:27:39 +0200 Subject: [PATCH] scripts/setup: Don't overwrite already allocated number of hugepages Don't touch existing hp allocations when they already meet the NRHUGE requirement. Introduce new environment switch, SHRINK_HUGE, to override that new behavior. Signed-off-by: Michal Berger Change-Id: I0cd124b98c3deb7c21b4fbd57529d0995978c2e5 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/14912 Tested-by: SPDK CI Jenkins Reviewed-by: Tomasz Zawadzki Reviewed-by: Jim Harris --- scripts/setup.sh | 10 ++++++++++ test/setup/hugepages.sh | 17 +++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/scripts/setup.sh b/scripts/setup.sh index 2eba58113..8412e9568 100755 --- a/scripts/setup.sh +++ b/scripts/setup.sh @@ -57,6 +57,9 @@ function usage() { echo " 2048 pages for node0, 512 for node1 and default NRHUGE for node2." echo "HUGEPGSZ Size of the hugepages to use in kB. If not set, kernel's default" echo " setting is used." + echo "SHRINK_HUGE If set to 'yes', hugepages allocation won't be skipped in case" + echo " number of requested hugepages is lower from what's already" + echo " allocated. Doesn't apply when HUGE_EVEN_ALLOC is in use." echo "CLEAR_HUGE If set to 'yes', the attempt to remove hugepages from all nodes will" echo " be made prior to allocation". echo "PCI_ALLOWED" @@ -467,6 +470,13 @@ check_hugepages_alloc() { local hp_int=$1 local allocated_hugepages + allocated_hugepages=$(< "$hp_int") + + if ((NRHUGE <= allocated_hugepages)) && [[ $SHRINK_HUGE != yes ]]; then + echo "INFO: Requested $NRHUGE hugepages but $allocated_hugepages already allocated ${2:+on node$2}" + return 0 + fi + echo $((NRHUGE < 0 ? 0 : NRHUGE)) > "$hp_int" allocated_hugepages=$(< "$hp_int") diff --git a/test/setup/hugepages.sh b/test/setup/hugepages.sh index d66216bb5..0411c6bdb 100755 --- a/test/setup/hugepages.sh +++ b/test/setup/hugepages.sh @@ -186,6 +186,22 @@ custom_alloc() { nr_hugepages=$_nr_hugepages verify_nr_hugepages } +no_shrink_alloc() { + # Defalut HUGEMEM (2G) alloc on node0 + # attempt to shrink by half: 2G should remain + + get_test_nr_hugepages $((2048 * 1024)) 0 + + # Verify the default first + setup + verify_nr_hugepages + + # Now attempt to shrink the hp number + CLEAR_HUGE=no NRHUGE=$((nr_hugepages / 2)) setup + # 2G should remain + verify_nr_hugepages +} + get_nodes clear_hp @@ -194,5 +210,6 @@ run_test "per_node_2G_alloc" per_node_2G_alloc run_test "even_2G_alloc" even_2G_alloc run_test "odd_alloc" odd_alloc run_test "custom_alloc" custom_alloc +run_test "no_shrink_alloc" no_shrink_alloc clear_hp