test/vhost: make run_fio.py to just run fio

Make shell scripts prepare fio jobs and just call run_fio.py to run
those jobs. This way run_fio.py don't need to know anything about test
environment configuration.

Change-Id: I10b6954011855e9139ff7b5372070ec553009d33
Signed-off-by: Pawel Wodkowski <pawelx.wodkowski@intel.com>
Reviewed-on: https://review.gerrithub.io/391929
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Karol Latecki <karol.latecki@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
Pawel Wodkowski 2017-12-15 16:11:02 +01:00 committed by Jim Harris
parent 832f4e4df6
commit 3911366d66
6 changed files with 93 additions and 110 deletions

View File

@ -773,6 +773,47 @@ function vm_check_blk_location()
fi
}
function run_fio()
{
local arg
local job_file=""
local fio_bin=""
local vms=()
local out=""
local fio_disks=""
local vm
for arg in $@; do
case "$arg" in
--job-file=*) local job_file="${arg#*=}" ;;
--fio-bin=*) local fio_bin="--fio-bin=${arg#*=}" ;;
--vm=*) vms+=( "${arg#*=}" ) ;;
--out=*)
local out="$arg"
mkdir -p ${out#*=}
;;
*)
error "Invalid argument '$arg'"
return 1
;;
esac
done
# prepare job file for each VM
for vm in ${vms[@]}; do
local vm_num=${vm%%:*}
local vmdisks=${vm#*:}
sed "s@filename=@filename=$vmdisks@" $job_file | vm_ssh $vm_num 'cat > /root/fio.job'
fio_disks+="127.0.0.1:$(vm_fio_socket $vm_num):$vmdisks,"
vm_ssh $vm_num ls -al
vm_ssh $vm_num cat /root/fio.job
done
python $SPDK_BUILD_DIR/test/vhost/common/run_fio.py --job-file=/root/fio.job $fio_bin $out ${fio_disks%,}
}
# Shutdown or kill any running VM and SPDK APP.
#
function at_app_exit()

View File

@ -12,15 +12,17 @@ fio_bin = "fio"
def show_help():
print("""Usage: python run_fio.py [options] [args]
Description:
Run FIO job file 'fio.job' on remote machines.
NOTE: The job file must exist on remote machines on '/root/' directory.
Args:
[VMs] (ex. vm1_IP:vm1_port:vm1_disk1:vm_disk2,vm2_IP:vm2_port:vm2_disk1,etc...)
Options:
-h, --help Show this message.
-j, --job-files Paths to files with custom FIO jobs configuration.
-f, --fio-bin Location of FIO binary (Default "fio")
-j, --job-file Paths to file with FIO job configuration on remote host.
-f, --fio-bin Location of FIO binary on remote host (Default "fio")
-o, --out Directory used to save generated job files and
files with test results (Default: same dir where
this script is located)
files with test results
-p, --perf-vmex Enable aggregating statistic for VMEXITS for VMs
""")
@ -51,7 +53,7 @@ def run_fio(vms, fio_cfg_fname, out_path, perf_vmex=False):
# vm[0] = IP address, vm[1] = Port number
fio_cmd = " ".join([fio_cmd,
"--client={vm_ip},{vm_port}".format(vm_ip=vm[0], vm_port=vm[1]),
"--remote-config /root/{cfg}".format(cfg=fio_cfg_fname)])
"--remote-config {cfg}".format(cfg=fio_cfg_fname)])
print(fio_cmd)
if perf_vmex:
@ -107,12 +109,10 @@ def main():
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(os.path.join(dname, "../../.."))
vms = []
fio_cfgs = []
fio_cfg = None
perf_vmex = False
out_dir = os.path.join(os.getcwd(), "fio_results")
try:
opts, args = getopt.getopt(sys.argv[1:], "hj:f:o:p",
@ -122,60 +122,39 @@ def main():
show_help()
sys.exit(1)
if len(args) < 1:
show_help()
sys.exit(1)
for o, a in opts:
if o in ("-j", "--job-file"):
fio_cfgs = a.split(",")
fio_cfg = a
elif o in ("-h", "--help"):
show_help()
sys.exit(1)
elif o in ("-p", "--perf-vmex"):
perf_vmex = True
elif o in ("-o", "--out"):
out_dir = os.path.join(a, "fio_results")
out_dir = a
elif o in ("-f", "--fio-bin"):
fio_bin = a
if len(fio_cfgs) < 1:
print("ERROR! No FIO jobs provided!")
if fio_cfg is None:
print("ERROR! No FIO job provided!")
sys.exit(1)
if len(args) < 1:
show_help()
sys.exit(1)
else:
# Get IP, port and fio 'filename' information from positional args
for arg in args[0].split(","):
_ = arg.split(":")
ip, port, filenames = _[0], _[1], ":".join(_[2:])
vms.append((ip, port, filenames))
if not os.path.exists(out_dir):
os.mkdir(out_dir)
print("ERROR! Folder {out_dir} does not exist ".format(out_dir=out_dir))
sys.exit(1)
for fio_cfg in fio_cfgs:
fio_cfg_fname = os.path.basename(fio_cfg)
print("Running job file: {0}".format(fio_cfg_fname))
# Get IP, port and fio 'filename' information from positional args
for arg in args[0].split(","):
_ = arg.split(":")
ip, port, filenames = _[0], _[1], ":".join(_[2:])
vms.append((ip, port, filenames))
for i, vm in enumerate(vms):
# VM - tuple of IP / Port / Filename for VM to run test
print("Preparing VM {0} - {1} for FIO job".format(i, vm[0]))
exec_cmd("./test/vhost/common/vm_ssh.sh {vm_num} sh -c 'rm {cfg}'"
.format(vm_num=i, cfg=fio_cfg_fname), blocking=True)
# Copy FIO config to VM
with open(fio_cfg, "r") as fio_cfg_fh:
for line in fio_cfg_fh.readlines():
if "filename" in line:
line = "filename=" + vm[2]
out = exec_cmd("./test/vhost/common/vm_ssh.sh {vm_num} sh -c 'echo {line} >> {cfg}'"
.format(vm_num=i, line=line.strip(), cfg=fio_cfg_fname), blocking=True)
if out[0] != 0:
print("ERROR! While copying FIO job config file to VM {vm_num} - {vm_ip}\n"
.format(vm_num=1, vm_ip=vm[0]))
sys.exit(1)
run_fio(vms, fio_cfg_fname, out_dir, perf_vmex)
print("Running job file: {0}".format(fio_cfg))
run_fio(vms, fio_cfg, out_dir, perf_vmex)
if __name__ == "__main__":
sys.exit(main())

View File

@ -6,14 +6,14 @@ BASE_DIR=$(readlink -f $(dirname $0))
dry_run=false
no_shutdown=false
fio_bin="fio"
fio_jobs="$COMMON_DIR/fio_jobs/"
fio_bin=""
remote_fio_bin=""
fio_jobs=""
test_type=spdk_vhost_scsi
reuse_vms=false
force_build=false
vms=()
used_vms=""
disk_split=""
x=""
function usage()
@ -32,8 +32,7 @@ function usage()
echo " --fio-bin=FIO Use specific fio binary (will be uploaded to VM)"
echo " --qemu-src=QEMU_DIR Location of the QEMU sources"
echo " --dpdk-src=DPDK_DIR Location of the DPDK sources"
echo " --fio-jobs= Fio configs to use for tests. Can point to a directory or"
echo " can point to a directory with regex mask, example: ./dir/*.job"
echo " --fio-job= Fio config to use for test."
echo " All VMs will run the same fio job when FIO executes."
echo " (no unique jobs for specific VMs)"
echo " --work-dir=WORK_DIR Where to find build file. Must exist. [default: $TEST_DIR]"
@ -46,10 +45,6 @@ function usage()
echo " DISKS - VM os test disks/devices path (virtio - optional, kernel_vhost - mandatory)"
echo " If test-type=spdk_vhost_blk then each disk can have additional size parameter, e.g."
echo " --vm=X,os.qcow,DISK_size_35G; unit can be M or G; default - 20G"
echo " --disk-split By default all test types execute fio jobs on all disks which are available on guest"
echo " system. Use this option if only some of the disks should be used for testing."
echo " Example: --disk-split=4,1-3 will result in VM 1 using it's first disk (ex. /dev/sda)"
echo " and VM 2 using it's disks 1-3 (ex. /dev/sdb, /dev/sdc, /dev/sdd)"
exit 0
}
@ -64,13 +59,12 @@ while getopts 'xh-:' optchar; do
fio-bin=*) fio_bin="--fio-bin=${OPTARG#*=}" ;;
qemu-src=*) QEMU_SRC_DIR="${OPTARG#*=}" ;;
dpdk-src=*) DPDK_SRC_DIR="${OPTARG#*=}" ;;
fio-jobs=*) fio_jobs="${OPTARG#*=}" ;;
fio-job=*) fio_job="${OPTARG#*=}" ;;
dry-run) dry_run=true ;;
no-shutdown) no_shutdown=true ;;
test-type=*) test_type="${OPTARG#*=}" ;;
force-build) force_build=true ;;
vm=*) vms+=("${OPTARG#*=}") ;;
disk-split=*) disk_split="${OPTARG#*=}" ;;
*) usage $0 "Invalid argument '$OPTARG'" ;;
esac
;;
@ -82,8 +76,9 @@ while getopts 'xh-:' optchar; do
done
shift $(( OPTIND - 1 ))
if [[ -d "$fio_jobs" ]]; then
fio_jobs="$fio_jobs/*.job"
if [[ ! -r "$fio_job" ]]; then
echo "ERROR: no fio job file specified"
exit 1
fi
. $COMMON_DIR/common.sh
@ -274,23 +269,11 @@ echo ""
echo "INFO: Testing..."
echo "INFO: Running fio jobs ..."
run_fio="python $COMMON_DIR/run_fio.py "
run_fio+="$fio_bin "
run_fio+="--job-file="
for job in $fio_jobs; do
run_fio+="$job,"
done
run_fio="${run_fio::-1}"
run_fio+=" "
run_fio+="--out=$TEST_DIR "
if [[ ! $disk_split == '' ]]; then
run_fio+="--split-disks=$disk_split "
fi
# Check if all VM have disk in tha same location
DISK=""
fio_disks=""
for vm_num in $used_vms; do
vm_dir=$VM_BASE_DIR/$vm_num
@ -308,20 +291,9 @@ for vm_num in $used_vms; do
vm_check_blk_location $vm_num
fi
run_fio+="127.0.0.1:$(cat $vm_dir/fio_socket):"
for disk in $SCSI_DISK; do
run_fio+="/dev/$disk:"
done
run_fio="${run_fio::-1}"
run_fio+=","
fio_disks+=" --vm=${vm_num}$(printf ':/dev/%s' $SCSI_DISK)"
done
run_fio="${run_fio%,}"
run_fio+=" "
run_fio="${run_fio::-1}"
echo -e "$run_fio"
if $dry_run; then
read -p "Enter to kill evething" xx
sleep 3
@ -329,7 +301,7 @@ if $dry_run; then
exit 0
fi
$run_fio
run_fio $fio_bin --job-file="$fio_job" --out="$TEST_DIR/fio_results" $fio_disks
if [[ "$test_type" == "spdk_vhost_scsi" ]]; then
for vm_num in $used_vms; do

View File

@ -13,7 +13,7 @@ else
fio_rw=("randwrite")
fi
function run_fio() {
function run_spdk_fio() {
LD_PRELOAD=$plugindir/fio_plugin /usr/src/fio/fio --ioengine=spdk_bdev --iodepth=128 --bs=4k --runtime=10 $testdir/bdev.fio "$@" --spdk_mem=1024
fio_status=$?
if [ $fio_status != 0 ]; then
@ -114,7 +114,7 @@ for bdev in $bdevs; do
echo -n "$b:" >> $testdir/bdev.fio
done
run_fio --spdk_conf=$testdir/bdev.conf
run_spdk_fio --spdk_conf=$testdir/bdev.conf
timing_exit fio_rw_verify
done
@ -123,7 +123,7 @@ for bdev in $bdevs; do
timing_enter unmap
cp $testdir/../common/fio_jobs/default_initiator.job $testdir/bdev.fio
prepare_fio_job_for_unmap "$bdevs"
run_fio --spdk_conf=$testdir/bdev.conf
run_spdk_fio --spdk_conf=$testdir/bdev.conf
timing_exit unmap
#Host test for +4G
@ -133,7 +133,7 @@ for bdev in $bdevs; do
echo "INFO: Running 4G test $rw for disk $bdev"
cp $testdir/../common/fio_jobs/default_initiator.job $testdir/bdev.fio
prepare_fio_job_4G "$rw" "$bdevs"
run_fio --spdk_conf=$testdir/bdev.conf
run_spdk_fio --spdk_conf=$testdir/bdev.conf
timing_exit fio_4G_rw_verify
done
fi

View File

@ -47,7 +47,7 @@ while getopts 'xh-:' optchar; do
-)
case "$OPTARG" in
help) usage $0 ;;
fio-bin=*) fio_bin="${OPTARG#*=}" ;;
fio-bin=*) fio_bin="--fio-bin=${OPTARG#*=}" ;;
vm-count=*) vm_count="${OPTARG#*=}" ;;
max-disks=*) max_disks="${OPTARG#*=}" ;;
ctrl-type=*) ctrl_type="${OPTARG#*=}" ;;
@ -202,17 +202,15 @@ $COMMON_DIR/vm_run.sh $x --work-dir=$TEST_DIR $used_vms
vm_wait_for_boot 600 $used_vms
# Get disk names from VMs and run FIO traffic
run_fio="python $COMMON_DIR/run_fio.py --fio-bin=$fio_bin"
run_fio+=" --job-file=$COMMON_DIR/fio_jobs/default_integrity.job"
run_fio+=" --out=$TEST_DIR "
fio_disks=""
for vm_num in $used_vms; do
vm_dir=$VM_BASE_DIR/$vm_num
qemu_mask_param="VM_${vm_num}_qemu_mask"
host_name="VM-$vm_num-${!qemu_mask_param}"
vm_ssh $vm_num "hostname $host_name"
vm_start_fio_server --fio-bin=$fio_bin $vm_num
vm_start_fio_server $fio_bin $vm_num
if [[ "$ctrl_type" == "vhost_scsi" ]]; then
vm_check_scsi_location $vm_num
@ -220,18 +218,11 @@ for vm_num in $used_vms; do
vm_check_blk_location $vm_num
fi
run_fio+="127.0.0.1:$(cat $vm_dir/fio_socket):"
for disk in $SCSI_DISK; do
run_fio+="/dev/$disk:"
done
run_fio="${run_fio::-1}"
run_fio+=","
fio_disks+=" --vm=${vm_num}$(printf ':/dev/%s' $SCSI_DISK)"
done
run_fio="${run_fio::-1}"
# Run FIO traffic
echo -e "$run_fio"
$run_fio
run_fio $fio_bin --job-file=$COMMON_DIR/fio_jobs/default_integrity.job --out="$TEST_DIR/fio_results" $fio_disks
echo "INFO: Shutting down virtual machines..."
vm_shutdown_all

View File

@ -60,7 +60,7 @@ case $1 in
./fiotest/autotest.sh --fio-bin=/home/sys_sgsw/fio_ubuntu \
--vm=0,$VM_IMAGE,Nvme0n1p0 \
--test-type=spdk_vhost_scsi \
--fio-jobs=$WORKDIR/common/fio_jobs/default_performance.job \
--fio-job=$WORKDIR/common/fio_jobs/default_performance.job \
--qemu-src=/home/sys_sgsw/vhost/qemu
;;
-pb|--performance-blk)
@ -68,7 +68,7 @@ case $1 in
./fiotest/autotest.sh --fio-bin=/home/sys_sgsw/fio_ubuntu \
--vm=0,$VM_IMAGE,Nvme0n1p0 \
--test-type=spdk_vhost_blk \
--fio-jobs=$WORKDIR/common/fio_jobs/default_performance.job \
--fio-job=$WORKDIR/common/fio_jobs/default_performance.job \
--qemu-src=/home/sys_sgsw/vhost/qemu
;;
-i|--integrity)
@ -76,7 +76,7 @@ case $1 in
./fiotest/autotest.sh --fio-bin=/home/sys_sgsw/fio_ubuntu \
--vm=0,$VM_IMAGE,Nvme0n1p0:Nvme0n1p1:Nvme0n1p2:Nvme0n1p3 \
--test-type=spdk_vhost_scsi \
--fio-jobs=$WORKDIR/common/fio_jobs/default_integrity.job \
--fio-job=$WORKDIR/common/fio_jobs/default_integrity.job \
--qemu-src=/home/sys_sgsw/vhost/qemu -x
;;
-ib|--integrity-blk)
@ -84,7 +84,7 @@ case $1 in
./fiotest/autotest.sh --fio-bin=/home/sys_sgsw/fio_ubuntu \
--vm=0,$VM_IMAGE,Nvme0n1p0:Nvme0n1p1:Nvme0n1p2:Nvme0n1p3 \
--test-type=spdk_vhost_blk \
--fio-jobs=$WORKDIR/common/fio_jobs/default_integrity.job \
--fio-job=$WORKDIR/common/fio_jobs/default_integrity.job \
--qemu-src=/home/sys_sgsw/vhost/qemu -x
;;
-fs|--fs-integrity-scsi)
@ -97,12 +97,12 @@ case $1 in
;;
-ils|--integrity-lvol-scsi)
echo 'Running lvol integrity suite...'
./lvol/lvol_test.sh --fio-bin=/home/sys_sgsw/fio_ubuntu \
./lvol/lvol_test.sh -x --fio-bin=/home/sys_sgsw/fio_ubuntu \
--ctrl-type=vhost_scsi
;;
-ilb|--integrity-lvol-blk)
echo 'Running lvol integrity suite...'
./lvol/lvol_test.sh --fio-bin=/home/sys_sgsw/fio_ubuntu \
./lvol/lvol_test.sh -x --fio-bin=/home/sys_sgsw/fio_ubuntu \
--ctrl-type=vhost_blk
;;
-hp|--hotplug)
@ -113,7 +113,7 @@ case $1 in
--vm=2,$VM_IMAGE,Nvme0n1p4:Nvme0n1p5 \
--vm=3,$VM_IMAGE,Nvme0n1p6:Nvme0n1p7 \
--test-type=spdk_vhost_scsi \
--fio-jobs=$WORKDIR/hotplug/fio_jobs/default_integrity.job -x
--fio-job=$WORKDIR/hotplug/fio_jobs/default_integrity.job -x
;;
-ro|--readonly)
echo 'Running readonly tests suite...'