test/nvme_perf: remove --disk-no option from perf script

Script was written in such a way that tests would be
performed starting with all found NVMe disks in configuration.
Then the number of disks for the workload would be reduced in
loop until reaching 1. This is the scenario used in test
case 1, described in SPDK NVMe Performance Benchmark document.

--disk-no allowed to avoid running test in loop, and specify
just a single value for the number of disks to be tested.

Both loop logic and --disk-no are no longer required as
tests are scheduled and run via CI system which picks the
parameters for each test run.

Change-Id: Iad0f4d0d259ac64680fd5bb69d7cbb7e0afff79c
Signed-off-by: Karol Latecki <karol.latecki@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/3295
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Paul Luse <paul.e.luse@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Karol Latecki 2020-07-10 13:27:25 +02:00 committed by Tomasz Zawadzki
parent 0655f92a0f
commit 20995d370b
2 changed files with 92 additions and 116 deletions

View File

@ -32,7 +32,6 @@ DISKNO=1
CPUS_ALLOWED=1 CPUS_ALLOWED=1
NOIOSCALING=false NOIOSCALING=false
PRECONDITIONING=true PRECONDITIONING=true
ONEWORKLOAD=false
DATE="$(date +'%m_%d_%Y_%H%M%S')" DATE="$(date +'%m_%d_%Y_%H%M%S')"
function discover_bdevs() { function discover_bdevs() {
@ -456,8 +455,6 @@ function usage() {
echo " - kernel-hybrid-polling" echo " - kernel-hybrid-polling"
echo " - kernel-libaio" echo " - kernel-libaio"
echo " - kernel-io-uring" echo " - kernel-io-uring"
echo " --disk-no=INT,ALL Number of disks to test on, this will run one workload on selected number od disks,"
echo " it discards max-disk setting, if =ALL then test on all found disk. [default=$DISKNO]"
echo " --max-disk=INT,ALL Number of disks to test on, this will run multiple workloads with increasing number of disk each run." echo " --max-disk=INT,ALL Number of disks to test on, this will run multiple workloads with increasing number of disk each run."
echo " If =ALL then test on all found disk. [default=$DISKNO]" echo " If =ALL then test on all found disk. [default=$DISKNO]"
echo " --cpu-allowed=INT Comma-separated list of CPU cores used to run the workload. [default=$CPUS_ALLOWED]" echo " --cpu-allowed=INT Comma-separated list of CPU cores used to run the workload. [default=$CPUS_ALLOWED]"
@ -484,10 +481,6 @@ while getopts 'h-:' optchar; do
repeat-no=*) REPEAT_NO="${OPTARG#*=}" ;; repeat-no=*) REPEAT_NO="${OPTARG#*=}" ;;
fio-bin=*) FIO_BIN="${OPTARG#*=}" ;; fio-bin=*) FIO_BIN="${OPTARG#*=}" ;;
driver=*) PLUGIN="${OPTARG#*=}" ;; driver=*) PLUGIN="${OPTARG#*=}" ;;
disk-no=*)
DISKNO="${OPTARG#*=}"
ONEWORKLOAD=true
;;
max-disk=*) DISKNO="${OPTARG#*=}" ;; max-disk=*) DISKNO="${OPTARG#*=}" ;;
cpu-allowed=*) CPUS_ALLOWED="${OPTARG#*=}" ;; cpu-allowed=*) CPUS_ALLOWED="${OPTARG#*=}" ;;
no-preconditioning) PRECONDITIONING=false ;; no-preconditioning) PRECONDITIONING=false ;;

View File

@ -97,127 +97,110 @@ printf "%s,%s,%s,%s,%s,%s,%s,%s\n" $RUNTIME $RAMP_TIME $PLUGIN $IODEPTH $BLK_SIZ
echo "num_of_disks,iops,avg_lat[usec],p99[usec],p99.99[usec],stdev[usec],avg_slat[usec],avg_clat[usec],bw[Kib/s]" >> $result_file echo "num_of_disks,iops,avg_lat[usec],p99[usec],p99.99[usec],stdev[usec],avg_slat[usec],avg_clat[usec],bw[Kib/s]" >> $result_file
#Run each workolad $REPEAT_NO times #Run each workolad $REPEAT_NO times
for ((j = 0; j < REPEAT_NO; j++)); do for ((j = 0; j < REPEAT_NO; j++)); do
#Start with $DISKNO disks and remove 2 disks for each run to avoid preconditioning before each run. cp $BASE_DIR/config.fio.tmp $BASE_DIR/config.fio
for ((k = DISKNO; k >= 1; k -= 2)); do echo "" >> $BASE_DIR/config.fio
cp $BASE_DIR/config.fio.tmp $BASE_DIR/config.fio #The SPDK fio plugin supports submitting/completing I/Os to multiple SSDs from a single thread.
echo "" >> $BASE_DIR/config.fio #Therefore, the per thread queue depth is set to the desired IODEPTH/device X the number of devices per thread.
#The SPDK fio plugin supports submitting/completing I/Os to multiple SSDs from a single thread. if [[ "$PLUGIN" =~ "spdk-plugin" ]] && [[ "$NOIOSCALING" = false ]]; then
#Therefore, the per thread queue depth is set to the desired IODEPTH/device X the number of devices per thread. qd=$((IODEPTH * DISKNO))
if [[ "$PLUGIN" =~ "spdk-plugin" ]] && [[ "$NOIOSCALING" = false ]]; then else
qd=$((IODEPTH * k)) qd=$IODEPTH
else fi
qd=$IODEPTH
fi
if [ $PLUGIN = "spdk-perf-bdev" ]; then if [ $PLUGIN = "spdk-perf-bdev" ]; then
run_bdevperf > $NVME_FIO_RESULTS run_bdevperf > $NVME_FIO_RESULTS
iops_disks[$k]=$((${iops_disks[$k]} + $(get_bdevperf_results iops))) iops_disks=$((iops_disks + $(get_bdevperf_results iops)))
bw[$k]=$((${bw[$k]} + $(get_bdevperf_results bw_Kibs))) bw=$((bw + $(get_bdevperf_results bw_Kibs)))
cp $NVME_FIO_RESULTS $result_dir/perf_results_${MIX}_${PLUGIN}_${NO_CORES}cpus_${DATE}_${k}_disks_${j}.output cp $NVME_FIO_RESULTS $result_dir/perf_results_${MIX}_${PLUGIN}_${NO_CORES}cpus_${DATE}_${k}_disks_${j}.output
elif [ $PLUGIN = "spdk-perf-nvme" ]; then elif [ $PLUGIN = "spdk-perf-nvme" ]; then
run_nvmeperf $k > $NVME_FIO_RESULTS run_nvmeperf $DISKNO > $NVME_FIO_RESULTS
read -r iops bandwidth mean_lat min_lat max_lat <<< $(get_nvmeperf_results) read -r iops bandwidth mean_lat min_lat max_lat <<< $(get_nvmeperf_results)
iops_disks[$k]=$((${iops_disks[$k]} + iops)) iops_disks=$((iops_disks + iops))
bw[$k]=$((${bw[$k]} + bandwidth)) bw=$((bw + bandwidth))
mean_lat_disks_usec[$k]=$((${mean_lat_disks_usec[$k]} + mean_lat)) mean_lat_disks_usec=$((mean_lat_disks_usec + mean_lat))
min_lat_disks_usec[$k]=$((${min_lat_disks_usec[$k]} + min_lat)) min_lat_disks_usec=$((min_lat_disks_usec + min_lat))
max_lat_disks_usec[$k]=$((${max_lat_disks_usec[$k]} + max_lat)) max_lat_disks_usec=$((max_lat_disks_usec + max_lat))
cp $NVME_FIO_RESULTS $result_dir/perf_results_${MIX}_${PLUGIN}_${NO_CORES}cpus_${DATE}_${k}_disks_${j}.output cp $NVME_FIO_RESULTS $result_dir/perf_results_${MIX}_${PLUGIN}_${NO_CORES}cpus_${DATE}_${k}_disks_${j}.output
else else
desc="Running Test: Blocksize=${BLK_SIZE} Workload=$RW MIX=${MIX} qd=${IODEPTH} io_plugin/driver=$PLUGIN" desc="Running Test: Blocksize=${BLK_SIZE} Workload=$RW MIX=${MIX} qd=${IODEPTH} io_plugin/driver=$PLUGIN"
cat <<- EOF >> $BASE_DIR/config.fio cat <<- EOF >> $BASE_DIR/config.fio
rw=$RW rw=$RW
rwmixread=$MIX
iodepth=$qd
bs=$BLK_SIZE
runtime=$RUNTIME
ramp_time=$RAMP_TIME
numjobs=$NUMJOBS
time_based=1
description=$desc
log_avg_msec=250
EOF
create_fio_config $k $PLUGIN "$DISK_NAMES" "$DISKS_NUMA" "$CORES"
echo "USING CONFIG:"
cat $BASE_DIR/config.fio
if [[ "$PLUGIN" =~ "spdk-plugin" ]]; then
run_spdk_nvme_fio $PLUGIN "--output=$NVME_FIO_RESULTS" \
"--write_lat_log=$result_dir/perf_lat_${BLK_SIZE}BS_${IODEPTH}QD_${RW}_${MIX}MIX_${PLUGIN}_${DATE}_${k}disks_${j}"
else
run_nvme_fio $fio_ioengine_opt "--output=$NVME_FIO_RESULTS" \
"--write_lat_log=$result_dir/perf_lat_${BLK_SIZE}BS_${IODEPTH}QD_${RW}_${MIX}MIX_${PLUGIN}_${DATE}_${k}disks_${j}"
fi
#Store values for every number of used disks
#Use recalculated value for mixread param in case rw mode is not rw.
rwmixread=$MIX rwmixread=$MIX
if [[ $RW = *"read"* ]]; then iodepth=$qd
rwmixread=100 bs=$BLK_SIZE
elif [[ $RW = *"write"* ]]; then runtime=$RUNTIME
rwmixread=0 ramp_time=$RAMP_TIME
fi numjobs=$NUMJOBS
iops_disks[$k]=$((iops_disks[k] + $(get_results iops $rwmixread))) time_based=1
mean_lat_disks_usec[$k]=$((mean_lat_disks_usec[k] + $(get_results mean_lat_usec $rwmixread))) description=$desc
p99_lat_disks_usec[$k]=$((p99_lat_disks_usec[k] + $(get_results p99_lat_usec $rwmixread))) log_avg_msec=250
p99_99_lat_disks_usec[$k]=$((p99_99_lat_disks_usec[k] + $(get_results p99_99_lat_usec $rwmixread))) EOF
stdev_disks_usec[$k]=$((stdev_disks_usec[k] + $(get_results stdev_usec $rwmixread)))
mean_slat_disks_usec[$k]=$((mean_slat_disks_usec[k] + $(get_results mean_slat_usec $rwmixread))) create_fio_config $DISKNO $PLUGIN "$DISK_NAMES" "$DISKS_NUMA" "$CORES"
mean_clat_disks_usec[$k]=$((mean_clat_disks_usec[k] + $(get_results mean_clat_usec $rwmixread))) echo "USING CONFIG:"
bw[$k]=$((bw[k] + $(get_results bw_Kibs $rwmixread))) cat $BASE_DIR/config.fio
cp $NVME_FIO_RESULTS $result_dir/perf_results_${MIX}_${PLUGIN}_${NO_CORES}cpus_${DATE}_${k}_disks_${j}.json if [[ "$PLUGIN" =~ "spdk-plugin" ]]; then
cp $BASE_DIR/config.fio $result_dir/config_${MIX}_${PLUGIN}_${NO_CORES}cpus_${DATE}_${k}_disks_${j}.fio run_spdk_nvme_fio $PLUGIN "--output=$NVME_FIO_RESULTS" \
rm -f $BASE_DIR/config.fio "--write_lat_log=$result_dir/perf_lat_${BLK_SIZE}BS_${IODEPTH}QD_${RW}_${MIX}MIX_${PLUGIN}_${DATE}_${k}disks_${j}"
else
run_nvme_fio $fio_ioengine_opt "--output=$NVME_FIO_RESULTS" \
"--write_lat_log=$result_dir/perf_lat_${BLK_SIZE}BS_${IODEPTH}QD_${RW}_${MIX}MIX_${PLUGIN}_${DATE}_${k}disks_${j}"
fi fi
#if tested on only one number of disk #Store values for every number of used disks
if $ONEWORKLOAD; then #Use recalculated value for mixread param in case rw mode is not rw.
break rwmixread=$MIX
if [[ $RW = *"read"* ]]; then
rwmixread=100
elif [[ $RW = *"write"* ]]; then
rwmixread=0
fi fi
done iops_disks=$((iops_disks + $(get_results iops $rwmixread)))
mean_lat_disks_usec=$((mean_lat_disks_usec + $(get_results mean_lat_usec $rwmixread)))
p99_lat_disks_usec=$((p99_lat_disks_usec + $(get_results p99_lat_usec $rwmixread)))
p99_99_lat_disks_usec=$((p99_99_lat_disks_usec + $(get_results p99_99_lat_usec $rwmixread)))
stdev_disks_usec=$((stdev_disks_usec + $(get_results stdev_usec $rwmixread)))
mean_slat_disks_usec=$((mean_slat_disks_usec + $(get_results mean_slat_usec $rwmixread)))
mean_clat_disks_usec=$((mean_clat_disks_usec + $(get_results mean_clat_usec $rwmixread)))
bw=$((bw + $(get_results bw_Kibs $rwmixread)))
cp $NVME_FIO_RESULTS $result_dir/perf_results_${MIX}_${PLUGIN}_${NO_CORES}cpus_${DATE}_${k}_disks_${j}.json
cp $BASE_DIR/config.fio $result_dir/config_${MIX}_${PLUGIN}_${NO_CORES}cpus_${DATE}_${k}_disks_${j}.fio
rm -f $BASE_DIR/config.fio
fi
done done
#Write results to csv file #Write results to csv file
for ((k = DISKNO; k >= 1; k -= 2)); do iops_disks=$((iops_disks / REPEAT_NO))
iops_disks[$k]=$((${iops_disks[$k]} / REPEAT_NO)) bw=$((bw / REPEAT_NO))
if [[ "$PLUGIN" =~ "plugin" ]]; then
mean_lat_disks_usec=$((mean_lat_disks_usec / REPEAT_NO))
p99_lat_disks_usec=$((p99_lat_disks_usec / REPEAT_NO))
p99_99_lat_disks_usec=$((p99_99_lat_disks_usec / REPEAT_NO))
stdev_disks_usec=$((stdev_disks_usec / REPEAT_NO))
mean_slat_disks_usec=$((mean_slat_disks_usec / REPEAT_NO))
mean_clat_disks_usec=$((mean_clat_disks_usec / REPEAT_NO))
elif [[ "$PLUGIN" == "spdk-perf-bdev" ]]; then
mean_lat_disks_usec=0
p99_lat_disks_usec=0
p99_99_lat_disks_usec=0
stdev_disks_usec=0
mean_slat_disks_usec=0
mean_clat_disks_usec=0
elif [[ "$PLUGIN" == "spdk-perf-nvme" ]]; then
mean_lat_disks_usec=$((mean_lat_disks_usec / REPEAT_NO))
p99_lat_disks_usec=0
p99_99_lat_disks_usec=0
stdev_disks_usec=0
mean_slat_disks_usec=0
mean_clat_disks_usec=0
fi
if [[ "$PLUGIN" =~ "plugin" ]]; then printf "%s,%s,%s,%s,%s,%s,%s,%s,%s\n" ${DISKNO} ${iops_disks} ${mean_lat_disks_usec} ${p99_lat_disks_usec} \
mean_lat_disks_usec[$k]=$((${mean_lat_disks_usec[$k]} / REPEAT_NO)) ${p99_99_lat_disks_usec} ${stdev_disks_usec} ${mean_slat_disks_usec} ${mean_clat_disks_usec} ${bw} >> $result_file
p99_lat_disks_usec[$k]=$((${p99_lat_disks_usec[$k]} / REPEAT_NO))
p99_99_lat_disks_usec[$k]=$((${p99_99_lat_disks_usec[$k]} / REPEAT_NO))
stdev_disks_usec[$k]=$((${stdev_disks_usec[$k]} / REPEAT_NO))
mean_slat_disks_usec[$k]=$((${mean_slat_disks_usec[$k]} / REPEAT_NO))
mean_clat_disks_usec[$k]=$((${mean_clat_disks_usec[$k]} / REPEAT_NO))
elif [[ "$PLUGIN" == "spdk-perf-bdev" ]]; then
mean_lat_disks_usec[$k]=0
p99_lat_disks_usec[$k]=0
p99_99_lat_disks_usec[$k]=0
stdev_disks_usec[$k]=0
mean_slat_disks_usec[$k]=0
mean_clat_disks_usec[$k]=0
elif [[ "$PLUGIN" == "spdk-perf-nvme" ]]; then
mean_lat_disks_usec[$k]=$((${mean_lat_disks_usec[$k]} / REPEAT_NO))
p99_lat_disks_usec[$k]=0
p99_99_lat_disks_usec[$k]=0
stdev_disks_usec[$k]=0
mean_slat_disks_usec[$k]=0
mean_clat_disks_usec[$k]=0
fi
bw[$k]=$((${bw[$k]} / REPEAT_NO))
printf "%s,%s,%s,%s,%s,%s,%s,%s,%s\n" ${k} ${iops_disks[$k]} ${mean_lat_disks_usec[$k]} ${p99_lat_disks_usec[$k]} \
${p99_99_lat_disks_usec[$k]} ${stdev_disks_usec[$k]} ${mean_slat_disks_usec[$k]} ${mean_clat_disks_usec[$k]} ${bw[$k]} >> $result_file
#if tested on only one numeber of disk
if $ONEWORKLOAD; then
break
fi
done
if [ $PLUGIN = "kernel-io-uring" ]; then if [ $PLUGIN = "kernel-io-uring" ]; then
# Reload the nvme driver so that other test runs are not affected # Reload the nvme driver so that other test runs are not affected