test/nvme: Add TC3 and TC4 to NVME performance test.
Nvme latency tests IOPS vs. Latency at different queue depths Signed-off-by: Pawel Niedzwiecki <pawelx.niedzwiecki@intel.com> Change-Id: I1192a18fa05f36c74385d286e86db571d52b9224 Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/424063 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Karol Latecki <karol.latecki@intel.com> Reviewed-by: John Kariuki <John.K.Kariuki@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
parent
3856d82b50
commit
550d1b2e24
76
test/nvme/perf/README.md
Normal file
76
test/nvme/perf/README.md
Normal file
@ -0,0 +1,76 @@
|
||||
# Automated script for NVMe performance test
|
||||
|
||||
## Configuration
|
||||
Test is configured by using command-line options.
|
||||
|
||||
### Available options
|
||||
|
||||
#### -h, --help
|
||||
Prints available commands and help.
|
||||
|
||||
#### --run-time
|
||||
Tell fio to terminate processing after the specified period of time. Value in seconds.
|
||||
|
||||
#### --ramp-time
|
||||
Fio will run the specified workload for this amount of time before logging any performance numbers.
|
||||
Value in seconds.
|
||||
|
||||
#### --fio-bin
|
||||
Path to fio binary.
|
||||
|
||||
#### --driver
|
||||
Select between SPDK driver and kernel driver. The Linux Kernel driver has three configurations:
|
||||
Default mode, Hybrid Polling and Classic Polling. The SPDK driver supports 2 fio_plugin modes: bdev and NVMe PMD. Before running test with spdk, you will need to bind NVMe devics to the Linux uio_pci_generic or vfio-pci driver. When running test with the Kernel driver, NVMe devices use the Kernel driver. The 5 valid values for this option are:
|
||||
'bdev', 'nvme', 'kernel-libaio', 'kernel-classic-polling' and 'kernel-hybrid-polling'.
|
||||
|
||||
#### --max-disk
|
||||
This option will run multiple fio jobs with varying number of NVMe devices. First it will start with
|
||||
max-disk number of devices then decrease number of disk by two until there are no more devices.
|
||||
If set to 'all' then max-disk number will be set to all available devices.
|
||||
Only one of the max-disk or disk-no option can be used.
|
||||
|
||||
#### --disk-no
|
||||
This option will run fio job on specified number of NVMe devices. If set to 'all' then max-disk number
|
||||
will be set to all available devices. Only one of the max-disk or disk-no option can be used.
|
||||
|
||||
#### --cpu-allowed
|
||||
Specifies the CPU cores that will be used by fio to execute the performance test cases. When spdk driver is chosen, Nthe script attempts to assign NVMe devices to CPU cores on the same NUMA node. The script will try to align each core with devices matching
|
||||
core's NUMA first but if the is no devices left within the CPU core NUMA then it will use devices from the other
|
||||
NUMA node. It is important to choose cores that will ensure best NUMA node alignment. For example:
|
||||
On System with 8 devices on NUMA node 0 and 8 devices on NUMA node 1, cores 0-27 on numa node 0 and 28-55
|
||||
on numa node 1, if test is set to use 16 disk and four cores then "--cpu-allowed=1,2,28,29" can be used
|
||||
resulting with 4 devices with node0 per core 1 and 2 and 4 devices with node1 per core 28 and 29. If 10 cores
|
||||
are required then best option would be "--cpu-allowed=1,2,3,4,28,29,30,31,32,33" because cores 1-4 will be
|
||||
aligned with 2 devices on numa0 per core and cores 28-33 will be aligned with 1 device on numa1 per core.
|
||||
If kernel driver is chosen then for each job with NVME device, all cpu cores with corresponding NUMA node are picked.
|
||||
|
||||
#### --rw
|
||||
Type of I/O pattern. Accepted values are: randrw, rw
|
||||
|
||||
#### --rwmixread
|
||||
Percentage of a mixed workload that should be reads.
|
||||
|
||||
#### --iodepth
|
||||
Number of I/O units to keep in flight against each file.
|
||||
|
||||
#### --block-size
|
||||
The block size in bytes used for I/O units.
|
||||
|
||||
#### --numjobs
|
||||
Create the specified number of clones of a job.
|
||||
|
||||
#### --repeat-no
|
||||
Specifies how many times run each workload. End results are averages of these workloads
|
||||
|
||||
#### --no-preconditioning
|
||||
By default disks are preconditioned before test using fio with parameters: size=100%, loops=2, bs=1M, w=write,
|
||||
iodepth=32, ioengine=spdk. It can be skiped when this option is set.
|
||||
|
||||
#### "--no-io-scaling"
|
||||
For SPDK fio plugin iodepth is multiplied by number of devices. When this option is set this multiplication will be disabled.
|
||||
|
||||
## Results
|
||||
Results are stored in "results" folder. After each workload, to this folder are copied files with:
|
||||
fio configuration file, json files with fio results and logs with latiencies with sampling interval 250 ms.
|
||||
Number of copied files depends from number of repeats of each workload. Additionall csv file is created with averaged
|
||||
results of all workloads.
|
@ -17,12 +17,13 @@ RAMP_TIME=30
|
||||
BLK_SIZE=4096
|
||||
RW=randrw
|
||||
MIX=100
|
||||
TYPE=("randread" "randrw" "randwrite")
|
||||
IODEPTH=256
|
||||
DISKNO=1
|
||||
ONEWORKLOAD=false
|
||||
CPUS_ALLOWED=1
|
||||
NUMJOBS=1
|
||||
REPEAT_NO=3
|
||||
NOIOSCALING=false
|
||||
|
||||
function get_cores(){
|
||||
local cpu_list="$1"
|
||||
@ -55,6 +56,10 @@ function get_numa_node(){
|
||||
local bdev_bdf=$(jq -r ".[] | select(.name==\"$name\").driver_specific.nvme.pci_address" <<< $bdevs)
|
||||
echo $(cat /sys/bus/pci/devices/$bdev_bdf/numa_node)
|
||||
done
|
||||
else
|
||||
for bdf in $(iter_pci_class_code 01 08 02); do
|
||||
echo $(cat /sys/bus/pci/devices/$bdf/numa_node)
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
@ -70,6 +75,10 @@ function get_disks(){
|
||||
elif [ "$plugin" = "bdev" ]; then
|
||||
local bdevs=$(discover_bdevs $ROOT_DIR $BASE_DIR/bdev.conf)
|
||||
echo $(jq -r '.[].name' <<< $bdevs)
|
||||
else
|
||||
for bdf in $(iter_pci_class_code 01 08 02); do
|
||||
echo $(ls -l /sys/block/ | grep $bdf |awk '{print $9}')
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
@ -102,6 +111,26 @@ function create_fio_config(){
|
||||
local cores_numa=($(get_cores_numa_node "$5"))
|
||||
local disks_per_core=$(($disk_no/$no_cores))
|
||||
local disks_per_core_mod=$(($disk_no%$no_cores))
|
||||
|
||||
# For kernel dirver, each disk will be alligned with all cpus on the same NUMA node
|
||||
if [ "$plugin" != "nvme" ] && [ "$plugin" != "bdev" ]; then
|
||||
for (( i=0; i<$disk_no; i++ ))
|
||||
do
|
||||
sed -i -e "\$a[filename${i}]" $BASE_DIR/config.fio
|
||||
filename="/dev/${disks[$i]}"
|
||||
sed -i -e "\$afilename=$filename" $BASE_DIR/config.fio
|
||||
cpu_used=""
|
||||
for (( j=0; j<$no_cores; j++ ))
|
||||
do
|
||||
core_numa=${cores_numa[$j]}
|
||||
if [ "${disks_numa[$i]}" = "$core_numa" ]; then
|
||||
cpu_used+="${cores[$j]},"
|
||||
fi
|
||||
done
|
||||
sed -i -e "\$acpus_allowed=$cpu_used" $BASE_DIR/config.fio
|
||||
echo "" >> $BASE_DIR/config.fio
|
||||
done
|
||||
else
|
||||
for (( i=0; i<$no_cores; i++ ))
|
||||
do
|
||||
core_numa=${cores_numa[$i]}
|
||||
@ -121,7 +150,7 @@ function create_fio_config(){
|
||||
m=0 #counter of disks per cpu core numa
|
||||
n=0 #counter of all disks
|
||||
while [ "$m" -lt "$total_disks_per_core" ]; do
|
||||
if [ "${disks_numa[$n]}" = "$core_numa" ]; then
|
||||
if [ ${disks_numa[$n]} = $core_numa ]; then
|
||||
m=$(($m+1))
|
||||
if [ "$plugin" = "nvme" ]; then
|
||||
filename='trtype=PCIe traddr='${disks[$n]//:/.}' ns=1'
|
||||
@ -145,6 +174,7 @@ function create_fio_config(){
|
||||
done
|
||||
echo "" >> $BASE_DIR/config.fio
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
function preconditioning(){
|
||||
@ -152,9 +182,8 @@ function preconditioning(){
|
||||
local filename=""
|
||||
local i
|
||||
sed -i -e "\$a[preconditioning]" $BASE_DIR/config.fio
|
||||
for (( i=0; i < $DISKNO; i++ ))
|
||||
do
|
||||
dev_name='trtype=PCIe traddr='${disks[i]//:/.}' ns=1'
|
||||
for bdf in $(iter_pci_class_code 01 08 02); do
|
||||
dev_name='trtype=PCIe traddr='${bdf//:/.}' ns=1'
|
||||
filename+=$(printf %s":" "$dev_name")
|
||||
done
|
||||
echo "** Preconditioning disks, this can take a while, depending on the size of disks."
|
||||
@ -192,6 +221,21 @@ function get_results(){
|
||||
stdev=${stdev%.*}
|
||||
echo $(( $stdev/100000 ))
|
||||
;;
|
||||
mean_slat_usec)
|
||||
mean_slat=$(cat $NVME_FIO_RESULTS | jq -r ".jobs[] | (.read.slat_ns.mean * $reads_pct + .write.slat_ns.mean * $writes_pct)")
|
||||
mean_slat=${mean_slat%.*}
|
||||
echo $(( $mean_slat/100000 ))
|
||||
;;
|
||||
mean_clat_usec)
|
||||
mean_clat=$(cat $NVME_FIO_RESULTS | jq -r ".jobs[] | (.read.clat_ns.mean * $reads_pct + .write.clat_ns.mean * $writes_pct)")
|
||||
mean_clat=${mean_clat%.*}
|
||||
echo $(( $mean_clat/100000 ))
|
||||
;;
|
||||
bw_Kibs)
|
||||
bw=$(cat $NVME_FIO_RESULTS | jq -r ".jobs[] | (.read.bw + .write.bw)")
|
||||
bw=${bw%.*}
|
||||
echo $(( $bw ))
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
@ -209,6 +253,12 @@ function run_spdk_nvme_fio(){
|
||||
sleep 1
|
||||
}
|
||||
|
||||
function run_nvme_fio(){
|
||||
echo "** Running fio test, this can take a while, depending on the run-time and ramp-time setting."
|
||||
$FIO_BIN $BASE_DIR/config.fio --output-format=json "$@"
|
||||
sleep 1
|
||||
}
|
||||
|
||||
function usage()
|
||||
{
|
||||
set +x
|
||||
@ -219,14 +269,19 @@ function usage()
|
||||
echo " --run-time=TIME[s] Tell fio to run the workload for the specified period of time. [default=$RUNTIME]"
|
||||
echo " --ramp-time=TIME[s] Fio will run the specified workload for this amount of time before logging any performance numbers. [default=$RAMP_TIME]"
|
||||
echo " --fio-bin=PATH Path to fio binary. [default=$FIO_BIN]"
|
||||
echo " --fio-plugin=STR Use bdev or nvme fio_plugin. [default=$PLUGIN]"
|
||||
echo " --driver=STR Use 'bdev' or 'nvme' for spdk driver with fio_plugin,"
|
||||
echo " 'kernel-libaio', 'kernel-classic-polling' or 'kernel-hybrid-polling' for kernel driver. [default=$PLUGIN]"
|
||||
echo " --max-disk=INT,ALL Number of disks to test on, this will run multiple workloads with increasing number of disk each run, if =ALL then test on all found disk. [default=$DISKNO]"
|
||||
echo " --disk-no=INT,ALL Number of disks to test on, this will run one workload on selected number od disks, it discards max-disk setting, if =ALL then test on all found disk"
|
||||
echo " --rw=STR Type of I/O pattern. Accepted values are randrw,rw. [default=$RW]"
|
||||
echo " --rwmixread=INT Percentage of a mixed workload that should be reads. [default=$MIX]"
|
||||
echo " --iodepth=INT Number of I/Os to keep in flight against the file. [default=$IODEPTH]"
|
||||
echo " --cpu-allowed=INT Comma-separated list of CPU cores used to run the workload. [default=$CPUS_ALLOWED]"
|
||||
echo " --repeat-no=INT How many times to repeat each workload. [default=$REPEAT_NO]"
|
||||
echo " --block-size=INT The block size in bytes used for I/O units. [default=$BLK_SIZE]"
|
||||
echo " --numjobs=INT Create the specified number of clones of this job. [default=$NUMJOBS]"
|
||||
echo " --no-preconditioning Skip preconditioning"
|
||||
echo " --no-io-scaling Do not scale iodepth for each device in SPDK fio plugin. [default=$NOIOSCALING]"
|
||||
set -x
|
||||
}
|
||||
|
||||
@ -240,11 +295,15 @@ while getopts 'h-:' optchar; do
|
||||
fio-bin=*) FIO_BIN="${OPTARG#*=}" ;;
|
||||
max-disk=*) DISKNO="${OPTARG#*=}" ;;
|
||||
disk-no=*) DISKNO="${OPTARG#*=}"; ONEWORKLOAD=true ;;
|
||||
fio-plugin=*) PLUGIN="${OPTARG#*=}" ;;
|
||||
driver=*) PLUGIN="${OPTARG#*=}" ;;
|
||||
rw=*) RW="${OPTARG#*=}" ;;
|
||||
rwmixread=*) MIX="${OPTARG#*=}" ;;
|
||||
iodepth=*) IODEPTH="${OPTARG#*=}" ;;
|
||||
block-size=*) BLK_SIZE="${OPTARG#*=}" ;;
|
||||
no-preconditioning) PRECONDITIONING=false ;;
|
||||
no-io-scaling) NOIOSCALING=true ;;
|
||||
cpu-allowed=*) CPUS_ALLOWED="${OPTARG#*=}" ;;
|
||||
numjobs=*) NUMJOBS="${OPTARG#*=}" ;;
|
||||
repeat-no=*) REPEAT_NO="${OPTARG#*=}" ;;
|
||||
*) usage $0 echo "Invalid argument '$OPTARG'"; exit 1 ;;
|
||||
esac
|
||||
@ -257,7 +316,11 @@ done
|
||||
trap 'rm -f *.state $BASE_DIR/bdev.conf; print_backtrace' ERR SIGTERM SIGABRT
|
||||
mkdir -p $BASE_DIR/results
|
||||
date="$(date +'%m_%d_%Y_%H%M%S')"
|
||||
disks=($(get_disks nvme))
|
||||
if [ $PLUGIN = "bdev" ]; then
|
||||
$ROOT_DIR/scripts/gen_nvme.sh >> $BASE_DIR/bdev.conf
|
||||
fi
|
||||
|
||||
disks=($(get_disks $PLUGIN))
|
||||
if [[ $DISKNO == "ALL" ]] || [[ $DISKNO == "all" ]]; then
|
||||
DISKNO=${#disks[@]}
|
||||
elif [[ $DISKNO -gt ${#disks[@]} ]] || [[ ! $DISKNO =~ ^[0-9]+$ ]]; then
|
||||
|
@ -13,12 +13,17 @@
|
||||
# then "--cpu-allowed=1,2,28,29" results in a NUMA-balanced configuration with 4 devices on each CPU core.
|
||||
# However, if the test will use 10 CPU cores, then best option would be "--cpu-allowed=1,2,3,4,28,29,30,31,32,33" because cores 1-4 will be aligned with
|
||||
# 2 devices on numa0 per core, cores 28-29 will be aligned with 2 devices on numa1 per core and cores 30-33 with 1 device on numa1 per core.
|
||||
# "--iodepth" - Number of I/Os to keep in flight per devices.
|
||||
# "--fio-plugin" - The SPDK fio plugin used in this test - bdev or nvme
|
||||
# "--iodepth" - Number of I/Os to keep in flight per devices for SPDK fio_plugin and per job for kernel driver.
|
||||
# "--driver" - "This parameter is used to set the ioengine and other fio parameters that determine how fio jobs issue I/O. SPDK supports two modes (nvme and bdev): to use the SPDK BDEV fio plugin set the value to bdev, set the value to nvme to use the SPDK NVME PMD.
|
||||
# "There are 3 modes available for Linux Kernel driver: set the value to kernel-libaio to use the Linux asynchronous I/O engine,
|
||||
# set the value to kernel-classic-polling to use the pvsynch2 ioengine in classic polling mode (100% load on the polling CPU core),
|
||||
# set the value to kernel-hybrid-polling to use the pvsynch2 ioengine in hybrid polling mode where the polling thread sleeps for half the mean device execution time.
|
||||
# "--no-preconditioning" - skip preconditioning - Normally the script will precondition disks to put them in a steady state.
|
||||
# However, preconditioning could be skipped, for example preconditiong has been already made and workload was 100% reads.
|
||||
# "--disk-no" - use specified number of disks for test.
|
||||
# "--repeat-no" Repeat each workolad specified number of times.
|
||||
# "--numjobs" - Number of fio threads running the workload.
|
||||
# "--no-io-scaling" - Set number of iodepth to be per job instead per device for SPDK fio_plugin.
|
||||
# An Example Performance Test Run
|
||||
# "./spdk/test/perf/run_perf.sh --run-time=600 --ramp-time=60 --cpu-allowed=28 --fio-bin=/usr/src/fio/fio\
|
||||
# --rwmixread=100 --iodepth=256 --fio-plugin=bdev --no-preconditioning --disk-no=6"
|
||||
@ -27,9 +32,6 @@
|
||||
# core no 28.
|
||||
BASE_DIR=$(readlink -f $(dirname $0))
|
||||
. $BASE_DIR/common.sh
|
||||
if [ $PLUGIN = "bdev" ]; then
|
||||
$ROOT_DIR/scripts/gen_nvme.sh >> $BASE_DIR/bdev.conf
|
||||
fi
|
||||
|
||||
disk_names=$(get_disks $PLUGIN)
|
||||
disks_numa=$(get_numa_node $PLUGIN "$disk_names")
|
||||
@ -38,18 +40,38 @@ no_cores=($cores)
|
||||
no_cores=${#no_cores[@]}
|
||||
|
||||
if $PRECONDITIONING; then
|
||||
HUGEMEM=8192 $ROOT_DIR/scripts/setup.sh
|
||||
cp $BASE_DIR/config.fio.tmp $BASE_DIR/config.fio
|
||||
preconditioning
|
||||
rm -f $BASE_DIR/config.fio
|
||||
fi
|
||||
|
||||
#Kernel Classic Polling ioengine parameters
|
||||
if [ $PLUGIN = "kernel-classic-polling" ]; then
|
||||
$ROOT_DIR/scripts/setup.sh reset
|
||||
fio_ioengine_opt="--ioengine=pvsync2 --hipri=100"
|
||||
for disk in $disk_names; do
|
||||
echo -1 > /sys/block/$disk/queue/io_poll_delay
|
||||
done
|
||||
#Kernel Hybrid Polling ioengine parameter
|
||||
elif [ $PLUGIN = "kernel-hybrid-polling" ]; then
|
||||
$ROOT_DIR/scripts/setup.sh reset
|
||||
fio_ioengine_opt="--ioengine=pvsync2 --hipri=100"
|
||||
for disk in $disk_names; do
|
||||
echo 0 > /sys/block/$disk/queue/io_poll_delay
|
||||
done
|
||||
elif [ $PLUGIN = "kernel-libaio" ]; then
|
||||
$ROOT_DIR/scripts/setup.sh reset
|
||||
fio_ioengine_opt="--ioengine=libaio"
|
||||
fi
|
||||
|
||||
result_dir=perf_results_${BLK_SIZE}BS_${IODEPTH}QD_${RW}_${MIX}MIX_${PLUGIN}_${date}
|
||||
mkdir $BASE_DIR/results/$result_dir
|
||||
mkdir -p $BASE_DIR/results/$result_dir
|
||||
result_file=$BASE_DIR/results/$result_dir/perf_results_${BLK_SIZE}BS_${IODEPTH}QD_${RW}_${MIX}MIX_${PLUGIN}_${date}.csv
|
||||
unset iops_disks mean_lat_disks p99_lat_disks p99_99_lat_disks stdev_disks
|
||||
unset iops_disks bw mean_lat_disks_usec p99_lat_disks_usec p99_99_lat_disks_usec stdev_disks_usec
|
||||
echo "run-time,ramp-time,fio-plugin,QD,block-size,num-cpu-cores,workload,workload-mix" > $result_file
|
||||
printf "%s,%s,%s,%s,%s,%s,%s,%s\n" $RUNTIME $RAMP_TIME $PLUGIN $IODEPTH $BLK_SIZE $no_cores $RW $MIX >> $result_file
|
||||
echo "num_of_disks,iops,avg_lat[usec],p99[usec],p99.99[usec],stdev[usec]" >> $result_file
|
||||
echo "num_of_disks,iops,avg_lat[usec],p99[usec],p99.99[usec],stdev[usec],avg_slat[usec],avg_clat[usec],bw[Kib/s]" >> $result_file
|
||||
#Run each workolad $REPEAT_NO times
|
||||
for (( j=0; j < $REPEAT_NO; j++ ))
|
||||
do
|
||||
@ -60,12 +82,26 @@ do
|
||||
echo "" >> $BASE_DIR/config.fio
|
||||
#The SPDK fio plugin supports submitting/completing I/Os to multiple SSDs from a single thread.
|
||||
#Therefore, the per thread queue depth is set to the desired IODEPTH/device X the number of devices per thread.
|
||||
if [ "$PLUGIN" = "nvme" ] || [ "$PLUGIN" = "bdev" ] && [ "$NOIOSCALING" = false ]; then
|
||||
qd=$(( $IODEPTH * $k ))
|
||||
filename=$(create_fio_config $k $PLUGIN "$disk_names" "$disks_numa" "$cores")
|
||||
desc="Running Test: Blocksize=${BLK_SIZE} Workload=$RW MIX=${MIX} qd=${IODEPTH} fio_plugin=$PLUGIN"
|
||||
else
|
||||
qd=$IODEPTH
|
||||
fi
|
||||
|
||||
create_fio_config $k $PLUGIN "$disk_names" "$disks_numa" "$cores"
|
||||
desc="Running Test: Blocksize=${BLK_SIZE} Workload=$RW MIX=${MIX} qd=${IODEPTH} io_plugin/driver=$PLUGIN"
|
||||
|
||||
if [ $PLUGIN = "nvme" ] || [ $PLUGIN = "bdev" ]; then
|
||||
run_spdk_nvme_fio $PLUGIN "--runtime=$RUNTIME" "--ramp_time=$RAMP_TIME" "--bs=$BLK_SIZE"\
|
||||
"--rw=$RW" "--rwmixread=$MIX" "--iodepth=$qd" "--output=$NVME_FIO_RESULTS" "--time_based=1"\
|
||||
"--description=$desc"
|
||||
"--numjobs=$NUMJOBS" "--description=$desc" "-log_avg_msec=250"\
|
||||
"--write_lat_log=$BASE_DIR/results/$result_dir/perf_lat_$${BLK_SIZE}BS_${IODEPTH}QD_${RW}_${MIX}MIX_${PLUGIN}_${date}_${k}disks_${j}"
|
||||
else
|
||||
run_nvme_fio $fio_ioengine_opt "--runtime=$RUNTIME" "--ramp_time=$RAMP_TIME" "--bs=$BLK_SIZE"\
|
||||
"--rw=$RW" "--rwmixread=$MIX" "--iodepth=$qd" "--output=$NVME_FIO_RESULTS" "--time_based=1"\
|
||||
"--numjobs=$NUMJOBS" "--description=$desc" "-log_avg_msec=250"\
|
||||
"--write_lat_log=$BASE_DIR/results/$result_dir/perf_lat_${BLK_SIZE}BS_${IODEPTH}QD_${RW}_${MIX}MIX_${PLUGIN}_${date}_${k}disks_${j}"
|
||||
fi
|
||||
|
||||
#Store values for every number of used disks
|
||||
iops_disks[$k]=$((${iops_disks[$k]} + $(get_results iops $MIX)))
|
||||
@ -73,6 +109,10 @@ do
|
||||
p99_lat_disks_usec[$k]=$((${p99_lat_disks_usec[$k]} + $(get_results p99_lat_usec $MIX)))
|
||||
p99_99_lat_disks_usec[$k]=$((${p99_99_lat_disks_usec[$k]} + $(get_results p99_99_lat_usec $MIX)))
|
||||
stdev_disks_usec[$k]=$((${stdev_disks_usec[$k]} + $(get_results stdev_usec $MIX)))
|
||||
|
||||
mean_slat_disks_usec[$k]=$((${mean_slat_disks_usec[$k]} + $(get_results mean_slat_usec $MIX)))
|
||||
mean_clat_disks_usec[$k]=$((${mean_clat_disks_usec[$k]} + $(get_results mean_clat_usec $MIX)))
|
||||
bw[$k]=$((${bw[$k]} + $(get_results bw_Kibs $MIX)))
|
||||
cp $NVME_FIO_RESULTS $BASE_DIR/results/$result_dir/perf_results_${MIX}_${PLUGIN}_${no_cores}cpus_${date}_${k}_disks_${j}.json
|
||||
cp $BASE_DIR/config.fio $BASE_DIR/results/$result_dir/config_${MIX}_${PLUGIN}_${no_cores}cpus_${date}_${k}_disks_${j}.fio
|
||||
rm -f $BASE_DIR/config.fio
|
||||
@ -86,14 +126,17 @@ done
|
||||
#Write results to csv file
|
||||
for (( k=$DISKNO; k >= 1; k-=2 ))
|
||||
do
|
||||
iops_disks[$k]=$((${iops_disks[$k]} / 3))
|
||||
mean_lat_disks_usec[$k]=$((${mean_lat_disks_usec[$k]} / 3))
|
||||
p99_lat_disks_usec[$k]=$((${p99_lat_disks_usec[$k]} / 3))
|
||||
p99_99_lat_disks_usec[$k]=$((${p99_99_lat_disks_usec[$k]} / 3))
|
||||
stdev_disks_usec[$k]=$((${stdev_disks_usec[$k]} / 3))
|
||||
iops_disks[$k]=$((${iops_disks[$k]} / $REPEAT_NO))
|
||||
mean_lat_disks_usec[$k]=$((${mean_lat_disks_usec[$k]} / $REPEAT_NO))
|
||||
p99_lat_disks_usec[$k]=$((${p99_lat_disks_usec[$k]} / $REPEAT_NO))
|
||||
p99_99_lat_disks_usec[$k]=$((${p99_99_lat_disks_usec[$k]} / $REPEAT_NO))
|
||||
stdev_disks_usec[$k]=$((${stdev_disks_usec[$k]} / $REPEAT_NO))
|
||||
mean_slat_disks_usec[$k]=$((${mean_slat_disks_usec[$k]} / $REPEAT_NO))
|
||||
mean_clat_disks_usec[$k]=$((${mean_clat_disks_usec[$k]} / $REPEAT_NO))
|
||||
bw[$k]=$((${bw[$k]} / $REPEAT_NO))
|
||||
|
||||
printf "%s,%s,%s,%s,%s,%s\n" ${k} ${iops_disks[$k]} ${mean_lat_disks_usec[$k]} ${p99_lat_disks_usec[$k]}\
|
||||
${p99_99_lat_disks_usec[$k]} ${stdev_disks_usec[$k]} >> $result_file
|
||||
printf "%s,%s,%s,%s,%s,%s,%s,%s,%s\n" ${k} ${iops_disks[$k]} ${mean_lat_disks_usec[$k]} ${p99_lat_disks_usec[$k]}\
|
||||
${p99_99_lat_disks_usec[$k]} ${stdev_disks_usec[$k]} ${mean_slat_disks_usec[$k]} ${mean_clat_disks_usec[$k]} ${bw[$k]} >> $result_file
|
||||
|
||||
#if tested on only one numeber of disk
|
||||
if $ONEWORKLOAD; then
|
||||
|
Loading…
Reference in New Issue
Block a user