From 19f0c9a0d06b00f380a649e5a837e4a10267a629 Mon Sep 17 00:00:00 2001 From: Michal Berger Date: Thu, 29 Apr 2021 15:29:46 +0200 Subject: [PATCH] autotest: Replace fio.py with a bash wrapper in tests fio.py simply wraps itself around fio and doesn't do anything that would require python to be in use. Having it in a simple bash form makes it easier to integrate it with autotest's common sh tooling and to debug any potential issues with the underlying tests. This also fixes #1919 by making sure only proper nvme devices are selected for the nvmf targets. Fixes: #1919. Signed-off-by: Michal Berger Change-Id: I111d00df3c7b2517f431cae865e258a665c2ecb3 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/7684 Community-CI: Broadcom CI Tested-by: SPDK CI Jenkins Reviewed-by: Ben Walker Reviewed-by: Tomasz Zawadzki --- scripts/fio-wrapper | 139 ++++++++++++++++++ test/iscsi_tgt/digests/digests.sh | 2 +- test/iscsi_tgt/fio/fio.sh | 2 +- test/iscsi_tgt/ip_migration/ip_migration.sh | 2 +- .../login_redirection/login_redirection.sh | 2 +- test/iscsi_tgt/lvol/iscsi_lvol.sh | 2 +- .../multiconnection/multiconnection.sh | 2 +- test/iscsi_tgt/nvme_remote/fio_remote_nvme.sh | 2 +- test/iscsi_tgt/pmem/iscsi_pmem.sh | 2 +- test/iscsi_tgt/qos/qos.sh | 2 +- test/iscsi_tgt/rbd/rbd.sh | 2 +- test/iscsi_tgt/reset/reset.sh | 2 +- test/iscsi_tgt/trace_record/trace_record.sh | 2 +- test/nvmf/target/fio.sh | 10 +- test/nvmf/target/initiator_timeout.sh | 2 +- test/nvmf/target/multiconnection.sh | 4 +- test/nvmf/target/multipath.sh | 4 +- test/nvmf/target/nmic.sh | 2 +- test/nvmf/target/srq_overwhelm.sh | 2 +- 19 files changed, 163 insertions(+), 24 deletions(-) create mode 100755 scripts/fio-wrapper diff --git a/scripts/fio-wrapper b/scripts/fio-wrapper new file mode 100755 index 000000000..34030c195 --- /dev/null +++ b/scripts/fio-wrapper @@ -0,0 +1,139 @@ +#!/usr/bin/env bash +rootdir=$(readlink -f "$(dirname "$0")/../") + +shopt -s nullglob extglob + +fio_config() { + local devs=("$@") dev + + cat <<- FIO + [global] + thread=1 + invalidate=1 + rw=$testtype + time_based=1 + runtime=$runtime + ioengine=libaio + direct=1 + bs=$blocksize + iodepth=$iodepth + norandommap=$((verify == 1 ? 0 : 1)) + numjobs=$numjobs + verify_dump=1 + FIO + + if ((verify == 1)); then + cat <<- FIO + do_verify=$verify + verify=crc32c-intel + FIO + fi + + for dev in "${!devs[@]}"; do + cat <<- FIO + [job$dev] + filename=/dev/${devs[dev]} + FIO + done +} + +run_fio() { + fio_config "$@" | fio - +} + +get_iscsi() { + while read -r; do + [[ $REPLY =~ "Attached scsi disk "(sd[a-z]+) ]] && echo "${BASH_REMATCH[1]}" + done < <(iscsiadm -m session -P 3) +} + +get_nvme() { + local blocks nvme nvme_sub + for nvme in /sys/class/nvme/nvme+([0-9]); do + # Make sure we touch only the block devices which belong to bdev subsystem and + # use supported protocols. + [[ $(< "$nvme/transport") == tcp || $(< "$nvme/transport") == rdma ]] || continue + for nvme_sub in /sys/class/nvme-subsystem/nvme-subsys+([0-9]); do + [[ -e $nvme_sub/${nvme##*/} ]] || continue + [[ $(< "$nvme_sub/model") == "SPDK bdev Controller"* ]] || continue + blocks+=("$nvme_sub/${nvme##*/}"n*) + done + done + printf '%s\n' "${blocks[@]##*/}" +} + +get_devices() { + local devs=("$@") + + if ((${#devs[@]} == 0)); then + case "$protocol" in + iscsi) devs=($(get_iscsi)) ;; + nvmf) devs=($(get_nvme)) ;; + *) ;; + esac + fi + printf '%s\n' "${devs[@]}" +} + +configure_devices() { + local devs=("$@") dev qd + + for dev in "${devs[@]}"; do + qd=128 + # Disable all merge tries" + echo 2 > "/sys/block/$dev/queue/nomerges" + # FIXME: nr_requests already has its default value at 128. Also, when no + # scheduler is associated with the device this value cannot be changed + # and is automatically adjusted as well. + # echo 128 > "/sys/block/$dev/queue/nr_requests" + if [[ -e /sys/block/$dev/device/queue_depth ]]; then + # FIXME: Is this really needed though? Can't we use the default? This is not + # very deterministic as depending on the device we may end up with different + # qd in the range of 1-128. + while ((qd > 0)) && ! echo "$qd" > "/sys/block/$dev/device/queue_depth"; do + ((--qd)) + done 2> /dev/null + if ((qd == 0)); then + printf 'Failed to set queue_depth (%s)\n' "$dev" + return 1 + fi + printf 'queue_depth set to %u (%s)\n' "$qd" "$dev" + else + printf 'Could not set queue depth (%s)\n' "$dev" >&2 + fi + echo none > "/sys/block/$dev/queue/scheduler" + done +} + +# Defaults +blocksize=4096 +iodepth=1 +numjobs=1 +protocol="nvmf" +runtime=1 +testtype="read" +verify=0 + +# Keep short args compatible with fio.py +while getopts :i:d:n:p:r:t:v arg; do + case "$arg" in + i) blocksize=$OPTARG ;; + d) iodepth=$OPTARG ;; + n) numjobs=$OPTARG ;; + p) protocol=$OPTARG ;; + r) runtime=$OPTARG ;; + t) testtype=$OPTARG ;; + v) verify=1 ;; + *) ;; + esac +done +shift $((OPTIND - 1)) + +devices=($(get_devices "$@")) +if ((${#devices[@]} == 0)); then + printf '* No devices were found for the test, aborting\n' >&2 + exit 1 +fi + +fio_config "${devices[@]}" +configure_devices "${devices[@]}" && run_fio "${devices[@]}" diff --git a/test/iscsi_tgt/digests/digests.sh b/test/iscsi_tgt/digests/digests.sh index 0d46c5dbb..6bbedd3e3 100755 --- a/test/iscsi_tgt/digests/digests.sh +++ b/test/iscsi_tgt/digests/digests.sh @@ -47,7 +47,7 @@ MALLOC_BDEV_SIZE=64 MALLOC_BLOCK_SIZE=512 rpc_py="$rootdir/scripts/rpc.py" -fio_py="$rootdir/scripts/fio.py" +fio_py="$rootdir/scripts/fio-wrapper" timing_enter start_iscsi_tgt diff --git a/test/iscsi_tgt/fio/fio.sh b/test/iscsi_tgt/fio/fio.sh index dc072620f..4710de1f6 100755 --- a/test/iscsi_tgt/fio/fio.sh +++ b/test/iscsi_tgt/fio/fio.sh @@ -56,7 +56,7 @@ MALLOC_BDEV_SIZE=64 MALLOC_BLOCK_SIZE=4096 rpc_py="$rootdir/scripts/rpc.py" -fio_py="$rootdir/scripts/fio.py" +fio_py="$rootdir/scripts/fio-wrapper" timing_enter start_iscsi_tgt diff --git a/test/iscsi_tgt/ip_migration/ip_migration.sh b/test/iscsi_tgt/ip_migration/ip_migration.sh index 402f676f7..44dd766ec 100755 --- a/test/iscsi_tgt/ip_migration/ip_migration.sh +++ b/test/iscsi_tgt/ip_migration/ip_migration.sh @@ -8,7 +8,7 @@ source $rootdir/test/iscsi_tgt/common.sh iscsitestinit rpc_py="$rootdir/scripts/rpc.py" -fio_py="$rootdir/scripts/fio.py" +fio_py="$rootdir/scripts/fio-wrapper" source "$rootdir/test/common/applications.sh" NETMASK=127.0.0.0/24 diff --git a/test/iscsi_tgt/login_redirection/login_redirection.sh b/test/iscsi_tgt/login_redirection/login_redirection.sh index 824eb2e64..c9fb998cb 100755 --- a/test/iscsi_tgt/login_redirection/login_redirection.sh +++ b/test/iscsi_tgt/login_redirection/login_redirection.sh @@ -11,7 +11,7 @@ NULL_BDEV_SIZE=64 NULL_BLOCK_SIZE=512 rpc_py=$rootdir/scripts/rpc.py -fio_py=$rootdir/scripts/fio.py +fio_py=$rootdir/scripts/fio-wrapper rpc_addr1="/var/tmp/spdk0.sock" rpc_addr2="/var/tmp/spdk1.sock" diff --git a/test/iscsi_tgt/lvol/iscsi_lvol.sh b/test/iscsi_tgt/lvol/iscsi_lvol.sh index ad975c636..39ff87a41 100755 --- a/test/iscsi_tgt/lvol/iscsi_lvol.sh +++ b/test/iscsi_tgt/lvol/iscsi_lvol.sh @@ -18,7 +18,7 @@ else fi rpc_py="$rootdir/scripts/rpc.py" -fio_py="$rootdir/scripts/fio.py" +fio_py="$rootdir/scripts/fio-wrapper" timing_enter start_iscsi_tgt diff --git a/test/iscsi_tgt/multiconnection/multiconnection.sh b/test/iscsi_tgt/multiconnection/multiconnection.sh index 09cc9a2fb..a1fc43933 100755 --- a/test/iscsi_tgt/multiconnection/multiconnection.sh +++ b/test/iscsi_tgt/multiconnection/multiconnection.sh @@ -8,7 +8,7 @@ source $rootdir/test/iscsi_tgt/common.sh iscsitestinit rpc_py="$rootdir/scripts/rpc.py" -fio_py="$rootdir/scripts/fio.py" +fio_py="$rootdir/scripts/fio-wrapper" CONNECTION_NUMBER=30 diff --git a/test/iscsi_tgt/nvme_remote/fio_remote_nvme.sh b/test/iscsi_tgt/nvme_remote/fio_remote_nvme.sh index 65a2a1681..47aa5a595 100755 --- a/test/iscsi_tgt/nvme_remote/fio_remote_nvme.sh +++ b/test/iscsi_tgt/nvme_remote/fio_remote_nvme.sh @@ -10,7 +10,7 @@ nvmftestinit iscsitestinit rpc_py="$rootdir/scripts/rpc.py" -fio_py="$rootdir/scripts/fio.py" +fio_py="$rootdir/scripts/fio-wrapper" # Namespaces are NOT used here on purpose. Rxe_cfg utilility used for NVMf tests do not support namespaces. TARGET_IP=127.0.0.1 diff --git a/test/iscsi_tgt/pmem/iscsi_pmem.sh b/test/iscsi_tgt/pmem/iscsi_pmem.sh index da6fd77f4..5765bedfe 100755 --- a/test/iscsi_tgt/pmem/iscsi_pmem.sh +++ b/test/iscsi_tgt/pmem/iscsi_pmem.sh @@ -13,7 +13,7 @@ PMEM_BLOCK_SIZE=512 TGT_NR=10 PMEM_PER_TGT=1 rpc_py="$rootdir/scripts/rpc.py" -fio_py="$rootdir/scripts/fio.py" +fio_py="$rootdir/scripts/fio-wrapper" timing_enter start_iscsi_target "${ISCSI_APP[@]}" -m $ISCSI_TEST_CORE_MASK --wait-for-rpc & diff --git a/test/iscsi_tgt/qos/qos.sh b/test/iscsi_tgt/qos/qos.sh index 6690c1549..31092d5f3 100755 --- a/test/iscsi_tgt/qos/qos.sh +++ b/test/iscsi_tgt/qos/qos.sh @@ -53,7 +53,7 @@ MALLOC_BLOCK_SIZE=512 IOPS_RESULT= BANDWIDTH_RESULT= rpc_py="$rootdir/scripts/rpc.py" -fio_py="$rootdir/scripts/fio.py" +fio_py="$rootdir/scripts/fio-wrapper" timing_enter start_iscsi_tgt diff --git a/test/iscsi_tgt/rbd/rbd.sh b/test/iscsi_tgt/rbd/rbd.sh index 7ab0e0352..8a2a9a702 100755 --- a/test/iscsi_tgt/rbd/rbd.sh +++ b/test/iscsi_tgt/rbd/rbd.sh @@ -13,7 +13,7 @@ trap 'rbd_cleanup; exit 1' SIGINT SIGTERM EXIT timing_exit rbd_setup rpc_py="$rootdir/scripts/rpc.py" -fio_py="$rootdir/scripts/fio.py" +fio_py="$rootdir/scripts/fio-wrapper" timing_enter start_iscsi_tgt diff --git a/test/iscsi_tgt/reset/reset.sh b/test/iscsi_tgt/reset/reset.sh index 7b1d8ada7..5ed96eea8 100755 --- a/test/iscsi_tgt/reset/reset.sh +++ b/test/iscsi_tgt/reset/reset.sh @@ -11,7 +11,7 @@ MALLOC_BDEV_SIZE=64 MALLOC_BLOCK_SIZE=512 rpc_py="$rootdir/scripts/rpc.py" -fio_py="$rootdir/scripts/fio.py" +fio_py="$rootdir/scripts/fio-wrapper" if ! hash sg_reset; then exit 1 diff --git a/test/iscsi_tgt/trace_record/trace_record.sh b/test/iscsi_tgt/trace_record/trace_record.sh index 7e13838ba..5a411e3ae 100755 --- a/test/iscsi_tgt/trace_record/trace_record.sh +++ b/test/iscsi_tgt/trace_record/trace_record.sh @@ -31,7 +31,7 @@ MALLOC_BDEV_SIZE=64 MALLOC_BLOCK_SIZE=4096 rpc_py="$rootdir/scripts/rpc.py" -fio_py="$rootdir/scripts/fio.py" +fio_py="$rootdir/scripts/fio-wrapper" timing_enter start_iscsi_tgt diff --git a/test/nvmf/target/fio.sh b/test/nvmf/target/fio.sh index 4e98d7083..d50853155 100755 --- a/test/nvmf/target/fio.sh +++ b/test/nvmf/target/fio.sh @@ -35,15 +35,15 @@ nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_ waitforserial $NVMF_SERIAL 3 -$rootdir/scripts/fio.py -p nvmf -i 4096 -d 1 -t write -r 1 -v -$rootdir/scripts/fio.py -p nvmf -i 4096 -d 1 -t randwrite -r 1 -v -$rootdir/scripts/fio.py -p nvmf -i 4096 -d 128 -t write -r 1 -v -$rootdir/scripts/fio.py -p nvmf -i 4096 -d 128 -t randwrite -r 1 -v +$rootdir/scripts/fio-wrapper -p nvmf -i 4096 -d 1 -t write -r 1 -v +$rootdir/scripts/fio-wrapper -p nvmf -i 4096 -d 1 -t randwrite -r 1 -v +$rootdir/scripts/fio-wrapper -p nvmf -i 4096 -d 128 -t write -r 1 -v +$rootdir/scripts/fio-wrapper -p nvmf -i 4096 -d 128 -t randwrite -r 1 -v sync #start hotplug test case -$rootdir/scripts/fio.py -p nvmf -i 4096 -d 1 -t read -r 10 & +$rootdir/scripts/fio-wrapper -p nvmf -i 4096 -d 1 -t read -r 10 & fio_pid=$! sleep 3 diff --git a/test/nvmf/target/initiator_timeout.sh b/test/nvmf/target/initiator_timeout.sh index 199983be5..12c759c64 100755 --- a/test/nvmf/target/initiator_timeout.sh +++ b/test/nvmf/target/initiator_timeout.sh @@ -30,7 +30,7 @@ nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_ waitforserial "$NVMF_SERIAL" # Once our timed out I/O complete, we will still have 10 sec of I/O. -$rootdir/scripts/fio.py -p nvmf -i 4096 -d 1 -t write -r 60 -v & +$rootdir/scripts/fio-wrapper -p nvmf -i 4096 -d 1 -t write -r 60 -v & fio_pid=$! sleep 3 diff --git a/test/nvmf/target/multiconnection.sh b/test/nvmf/target/multiconnection.sh index d7e490861..9befe2543 100755 --- a/test/nvmf/target/multiconnection.sh +++ b/test/nvmf/target/multiconnection.sh @@ -37,8 +37,8 @@ for i in $(seq 1 $NVMF_SUBSYS); do waitforserial SPDK$i done -$rootdir/scripts/fio.py -p nvmf -i 262144 -d 64 -t read -r 10 -$rootdir/scripts/fio.py -p nvmf -i 262144 -d 64 -t randwrite -r 10 +$rootdir/scripts/fio-wrapper -p nvmf -i 262144 -d 64 -t read -r 10 +$rootdir/scripts/fio-wrapper -p nvmf -i 262144 -d 64 -t randwrite -r 10 sync for i in $(seq 1 $NVMF_SUBSYS); do diff --git a/test/nvmf/target/multipath.sh b/test/nvmf/target/multipath.sh index 8594b5226..2ed09be7e 100755 --- a/test/nvmf/target/multipath.sh +++ b/test/nvmf/target/multipath.sh @@ -50,7 +50,7 @@ ctrl2_id=$(nvme list-subsys | sed -n "s/traddr=$NVMF_SECOND_TARGET_IP trsvcid=$N # Set IO policy to numa echo numa > /sys/class/nvme-subsystem/nvme-subsys$subsys_id/iopolicy -$rootdir/scripts/fio.py -p nvmf -i 4096 -d 128 -t randrw -r 6 -v & +$rootdir/scripts/fio-wrapper -p nvmf -i 4096 -d 128 -t randrw -r 6 -v & fio_pid=$! sleep 1 @@ -84,7 +84,7 @@ sleep 1 # Set IO policy to round-robin echo round-robin > /sys/class/nvme-subsystem/nvme-subsys$subsys_id/iopolicy -$rootdir/scripts/fio.py -p nvmf -i 4096 -d 128 -t randrw -r 6 -v & +$rootdir/scripts/fio-wrapper -p nvmf -i 4096 -d 128 -t randrw -r 6 -v & fio_pid=$! sleep 1 diff --git a/test/nvmf/target/nmic.sh b/test/nvmf/target/nmic.sh index f8501343d..dd0b07479 100755 --- a/test/nvmf/target/nmic.sh +++ b/test/nvmf/target/nmic.sh @@ -42,7 +42,7 @@ nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_ waitforserial "$NVMF_SERIAL" -$rootdir/scripts/fio.py -p nvmf -i 4096 -d 1 -t write -r 1 -v +$rootdir/scripts/fio-wrapper -p nvmf -i 4096 -d 1 -t write -r 1 -v nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" diff --git a/test/nvmf/target/srq_overwhelm.sh b/test/nvmf/target/srq_overwhelm.sh index 98af97aab..832cbce08 100755 --- a/test/nvmf/target/srq_overwhelm.sh +++ b/test/nvmf/target/srq_overwhelm.sh @@ -31,7 +31,7 @@ done # working even at very high queue depths because the rdma qpair doesn't fail. # It is normal to see the initiator timeout and reconnect waiting for completions from an overwhelmmed target, # but the connection should come up and FIO should complete without errors. -$rootdir/scripts/fio.py -p nvmf -i 1048576 -d 128 -t read -r 10 -n 13 +$rootdir/scripts/fio-wrapper -p nvmf -i 1048576 -d 128 -t read -r 10 -n 13 sync