test: remove duplicate fio.py script file

scripts/fio.py and test/nvmf/fio/nvmf_fio.py are almost exact
same script. This commit unifies two files into one.

Change-Id: If753baaeb9f92dad2cda27bb4bed78ade4827d0b
Signed-off-by: Karol Latecki <karol.latecki@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/448656
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Seth Howell <seth.howell5141@gmail.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Vitaliy Mysak <vitaliy.mysak@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Karol Latecki 2019-03-21 17:57:37 +01:00 committed by Jim Harris
parent 02b0230296
commit b6abc16b05
17 changed files with 50 additions and 174 deletions

View File

@ -44,25 +44,27 @@ def interrupt_handler(signum, frame):
def main(): def main():
global fio global fio
if (len(sys.argv) < 5): if (len(sys.argv) < 6):
print("usage:") print("usage:")
print(" " + sys.argv[0] + " <io_size> <queue_depth> <test_type> <runtime>") print(" " + sys.argv[0] + " <nvmf/iscsi> <io_size> <queue_depth> <test_type> <runtime>")
print("advanced usage:") print("advanced usage:")
print("If you want to run fio with verify, please add verify string after runtime.") print("If you want to run fio with verify, please add verify string after runtime.")
print("Currently fio.py only support write rw randwrite randrw with verify enabled.") print("Currently fio.py only support write rw randwrite randrw with verify enabled.")
sys.exit(1) sys.exit(1)
io_size = int(sys.argv[1]) app = str(sys.argv[1])
queue_depth = int(sys.argv[2]) io_size = int(sys.argv[2])
test_type = sys.argv[3] queue_depth = int(sys.argv[3])
runtime = sys.argv[4] test_type = sys.argv[4]
if len(sys.argv) > 5: runtime = sys.argv[5]
verify = True
else:
verify = False verify = False
if len(sys.argv) > 6:
verify = True
devices = get_target_devices() if app == "nvmf":
print(("Found devices: ", devices)) devices = get_nvmf_target_devices()
elif app == "iscsi":
devices = get_iscsi_target_devices()
configure_devices(devices) configure_devices(devices)
try: try:
@ -73,6 +75,8 @@ def main():
sys.exit(1) sys.exit(1)
device_paths = ['/dev/' + dev for dev in devices] device_paths = ['/dev/' + dev for dev in devices]
print("Device paths:")
print(device_paths)
sys.stdout.flush() sys.stdout.flush()
signal.signal(signal.SIGTERM, interrupt_handler) signal.signal(signal.SIGTERM, interrupt_handler)
signal.signal(signal.SIGINT, interrupt_handler) signal.signal(signal.SIGINT, interrupt_handler)
@ -85,11 +89,16 @@ def main():
sys.exit(rc) sys.exit(rc)
def get_target_devices(): def get_iscsi_target_devices():
output = check_output('iscsiadm -m session -P 3', shell=True) output = check_output('iscsiadm -m session -P 3', shell=True)
return re.findall("Attached scsi disk (sd[a-z]+)", output.decode("ascii")) return re.findall("Attached scsi disk (sd[a-z]+)", output.decode("ascii"))
def get_nvmf_target_devices():
output = str(check_output('lsblk -l -o NAME', shell=True).decode())
return re.findall("(nvme[0-9]+n[0-9]+)\n", output)
def create_fio_config(size, q_depth, devices, test, run_time, verify): def create_fio_config(size, q_depth, devices, test, run_time, verify):
norandommap = 0 norandommap = 0
if not verify: if not verify:

View File

@ -11,8 +11,8 @@ function node_login_fio_logout() {
done done
iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
sleep 1 sleep 1
$fio_py 512 1 write 2 $fio_py iscsi 512 1 write 2
$fio_py 512 1 read 2 $fio_py iscsi 512 1 read 2
iscsiadm -m node --logout -p $TARGET_IP:$ISCSI_PORT iscsiadm -m node --logout -p $TARGET_IP:$ISCSI_PORT
sleep 1 sleep 1
} }

View File

@ -36,7 +36,7 @@ function running_config() {
timing_exit start_iscsi_tgt2 timing_exit start_iscsi_tgt2
sleep 1 sleep 1
$fio_py 4096 1 randrw 5 $fio_py iscsi 4096 1 randrw 5
} }
if [ -z "$TARGET_IP" ]; then if [ -z "$TARGET_IP" ]; then
@ -92,12 +92,12 @@ iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
trap "iscsicleanup; killprocess $pid; delete_tmp_files; exit 1" SIGINT SIGTERM EXIT trap "iscsicleanup; killprocess $pid; delete_tmp_files; exit 1" SIGINT SIGTERM EXIT
sleep 1 sleep 1
$fio_py 4096 1 randrw 1 verify $fio_py iscsi 4096 1 randrw 1 verify
$fio_py 131072 32 randrw 1 verify $fio_py iscsi 131072 32 randrw 1 verify
$fio_py 524288 128 randrw 1 verify $fio_py iscsi 524288 128 randrw 1 verify
if [ $RUN_NIGHTLY -eq 1 ]; then if [ $RUN_NIGHTLY -eq 1 ]; then
$fio_py 4096 1 write 300 verify $fio_py iscsi 4096 1 write 300 verify
# Run the running_config test which will generate a config file from the # Run the running_config test which will generate a config file from the
# running iSCSI target, then kill and restart the iSCSI target using the # running iSCSI target, then kill and restart the iSCSI target using the
@ -107,7 +107,7 @@ if [ $RUN_NIGHTLY -eq 1 ]; then
fi fi
# Start hotplug test case. # Start hotplug test case.
$fio_py 1048576 128 rw 10 & $fio_py iscsi 1048576 128 rw 10 &
fio_pid=$! fio_pid=$!
sleep 3 sleep 3

View File

@ -71,7 +71,7 @@ iscsiadm -m node --login -p $MIGRATION_ADDRESS:$ISCSI_PORT
# fio tests for multi-process # fio tests for multi-process
sleep 1 sleep 1
$fio_py 4096 32 randrw 10 & $fio_py iscsi 4096 32 randrw 10 &
fiopid=$! fiopid=$!
sleep 5 sleep 5

View File

@ -70,7 +70,7 @@ iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
timing_exit discovery timing_exit discovery
timing_enter fio timing_enter fio
$fio_py 131072 8 randwrite 10 verify $fio_py iscsi 131072 8 randwrite 10 verify
timing_exit fio timing_exit fio
rm -f ./local-job0-0-verify.state rm -f ./local-job0-0-verify.state

View File

@ -70,8 +70,8 @@ iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
sleep 1 sleep 1
echo "Running FIO" echo "Running FIO"
$fio_py 131072 64 randrw 5 $fio_py iscsi 131072 64 randrw 5
$fio_py 262144 16 randwrite 10 $fio_py iscsi 262144 16 randwrite 10
sync sync
trap - SIGINT SIGTERM EXIT trap - SIGINT SIGTERM EXIT

View File

@ -88,7 +88,7 @@ trap "iscsicleanup; killprocess $iscsipid; killprocess $nvmfpid; \
sleep 1 sleep 1
echo "Running FIO" echo "Running FIO"
$fio_py 4096 1 randrw 1 verify $fio_py iscsi 4096 1 randrw 1 verify
rm -f ./local-job0-0-verify.state rm -f ./local-job0-0-verify.state
iscsicleanup iscsicleanup
@ -97,7 +97,7 @@ killprocess $iscsipid
run_nvme_remote "remote" run_nvme_remote "remote"
echo "Running FIO" echo "Running FIO"
$fio_py 4096 1 randrw 1 verify $fio_py iscsi 4096 1 randrw 1 verify
rm -f ./local-job0-0-verify.state rm -f ./local-job0-0-verify.state
trap - SIGINT SIGTERM EXIT trap - SIGINT SIGTERM EXIT

View File

@ -54,7 +54,7 @@ iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
timing_exit discovery timing_exit discovery
timing_enter fio_test timing_enter fio_test
$fio_py $BLOCKSIZE 64 randwrite $RUNTIME verify $fio_py iscsi $BLOCKSIZE 64 randwrite $RUNTIME verify
timing_exit fio_test timing_exit fio_test
iscsicleanup iscsicleanup

View File

@ -21,7 +21,7 @@ function check_qos_works_well() {
start_io_count=$($rpc_py get_bdevs_iostat -b $3 | jq -r '.[1].bytes_read') start_io_count=$($rpc_py get_bdevs_iostat -b $3 | jq -r '.[1].bytes_read')
fi fi
$fio_py 1024 128 randread 5 $fio_py iscsi 1024 128 randread 5
if [ $LIMIT_TYPE = IOPS ]; then if [ $LIMIT_TYPE = IOPS ]; then
end_io_count=$($rpc_py get_bdevs_iostat -b $3 | jq -r '.[1].num_read_ops') end_io_count=$($rpc_py get_bdevs_iostat -b $3 | jq -r '.[1].num_read_ops')

View File

@ -51,8 +51,8 @@ iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
trap "iscsicleanup; killprocess $pid; rbd_cleanup; exit 1" SIGINT SIGTERM EXIT trap "iscsicleanup; killprocess $pid; rbd_cleanup; exit 1" SIGINT SIGTERM EXIT
sleep 1 sleep 1
$fio_py 4096 1 randrw 1 verify $fio_py iscsi 4096 1 randrw 1 verify
$fio_py 131072 32 randrw 1 verify $fio_py iscsi 131072 32 randrw 1 verify
rm -f ./local-job0-0-verify.state rm -f ./local-job0-0-verify.state

View File

@ -50,7 +50,7 @@ sleep 1
dev=$(iscsiadm -m session -P 3 | grep "Attached scsi disk" | awk '{print $4}') dev=$(iscsiadm -m session -P 3 | grep "Attached scsi disk" | awk '{print $4}')
sleep 1 sleep 1
$fio_py 512 1 read 60 & $fio_py iscsi 512 1 read 60 &
fiopid=$! fiopid=$!
echo "FIO pid: $fiopid" echo "FIO pid: $fiopid"

View File

@ -71,7 +71,7 @@ echo "Trace record pid: $record_pid"
trap "iscsicleanup; killprocess $iscsi_pid; killprocess $record_pid; delete_tmp_files; exit 1" SIGINT SIGTERM EXIT trap "iscsicleanup; killprocess $iscsi_pid; killprocess $record_pid; delete_tmp_files; exit 1" SIGINT SIGTERM EXIT
echo "Running FIO" echo "Running FIO"
$fio_py 131072 32 randrw 1 $fio_py iscsi 131072 32 randrw 1
iscsicleanup iscsicleanup
# Delete Malloc blockdevs and targets # Delete Malloc blockdevs and targets

View File

@ -60,15 +60,15 @@ waitforblk "nvme0n1"
waitforblk "nvme0n2" waitforblk "nvme0n2"
waitforblk "nvme0n3" waitforblk "nvme0n3"
$testdir/nvmf_fio.py 4096 1 write 1 verify $rootdir/scripts/fio.py nvmf 4096 1 write 1 verify
$testdir/nvmf_fio.py 4096 1 randwrite 1 verify $rootdir/scripts/fio.py nvmf 4096 1 randwrite 1 verify
$testdir/nvmf_fio.py 4096 128 write 1 verify $rootdir/scripts/fio.py nvmf 4096 128 write 1 verify
$testdir/nvmf_fio.py 4096 128 randwrite 1 verify $rootdir/scripts/fio.py nvmf 4096 128 randwrite 1 verify
sync sync
#start hotplug test case #start hotplug test case
$testdir/nvmf_fio.py 4096 1 read 10 & $rootdir/scripts/fio.py nvmf 4096 1 read 10 &
fio_pid=$! fio_pid=$!
sleep 3 sleep 3

View File

@ -1,133 +0,0 @@
#!/usr/bin/env python3
from subprocess import check_call, call, check_output, Popen, PIPE, CalledProcessError
import re
import sys
import signal
fio_template = """
[global]
thread=1
invalidate=1
rw=%(testtype)s
time_based=1
runtime=%(runtime)s
ioengine=libaio
direct=1
bs=%(blocksize)d
iodepth=%(iodepth)d
%(verify)s
verify_dump=1
"""
verify_template = """
do_verify=1
verify=meta
verify_pattern="meta"
"""
fio_job_template = """
[job%(jobnumber)d]
filename=%(device)s
"""
def interrupt_handler(signum, frame):
fio.terminate()
print("FIO terminated")
sys.exit(0)
def main():
global fio
if (len(sys.argv) < 5):
print("usage:")
print(" " + sys.argv[0] + " <io_size> <queue_depth> <test_type> <runtime>")
print("advanced usage:")
print("If you want to run fio with verify, please add verify string after runtime.")
print("Currently fio.py only support write rw randwrite randrw with verify enabled.")
sys.exit(1)
io_size = int(sys.argv[1])
queue_depth = int(sys.argv[2])
test_type = sys.argv[3]
runtime = sys.argv[4]
if len(sys.argv) > 5:
verify = True
else:
verify = False
devices = get_target_devices()
print("Found devices: ", devices)
# configure_devices(devices)
try:
fio_executable = check_output("which fio", shell=True).split()[0]
except CalledProcessError as e:
sys.stderr.write(str(e))
sys.stderr.write("\nCan't find the fio binary, please install it.\n")
sys.exit(1)
device_paths = ['/dev/' + dev for dev in devices]
print(device_paths)
sys.stdout.flush()
signal.signal(signal.SIGTERM, interrupt_handler)
signal.signal(signal.SIGINT, interrupt_handler)
fio = Popen([fio_executable, '-'], stdin=PIPE)
fio.communicate(create_fio_config(io_size, queue_depth, device_paths, test_type, runtime, verify))
fio.stdin.close()
rc = fio.wait()
print("FIO completed with code %d\n" % rc)
sys.stdout.flush()
sys.exit(rc)
def get_target_devices():
output = str(check_output('lsblk -l -o NAME', shell=True).decode())
return re.findall("(nvme[0-9]+n[0-9]+)\n", output)
def create_fio_config(size, q_depth, devices, test, run_time, verify):
if not verify:
verifyfio = ""
else:
verifyfio = verify_template
fiofile = fio_template % {"blocksize": size, "iodepth": q_depth,
"testtype": test, "runtime": run_time, "verify": verifyfio}
for (i, dev) in enumerate(devices):
fiofile += fio_job_template % {"jobnumber": i, "device": dev}
return fiofile.encode()
def set_device_parameter(devices, filename_template, value):
for dev in devices:
filename = filename_template % dev
f = open(filename, 'r+b')
f.write(value)
f.close()
def configure_devices(devices):
set_device_parameter(devices, "/sys/block/%s/queue/nomerges", "2")
set_device_parameter(devices, "/sys/block/%s/queue/nr_requests", "128")
requested_qd = 128
qd = requested_qd
while qd > 0:
try:
set_device_parameter(devices, "/sys/block/%s/device/queue_depth", str(qd))
break
except IOError:
qd = qd - 1
if qd == 0:
print("Could not set block device queue depths.")
else:
print("Requested queue_depth {} but only {} is supported.".format(str(requested_qd), str(qd)))
set_device_parameter(devices, "/sys/block/%s/queue/scheduler", "noop")
if __name__ == "__main__":
main()

View File

@ -99,7 +99,7 @@ for i in `seq 1 $SUBSYS_NR`; do
done done
done done
$testdir/../fio/nvmf_fio.py 262144 64 randwrite 10 verify $rootdir/scripts/fio.py nvmf 262144 64 randwrite 10 verify
sync sync
disconnect_nvmf disconnect_nvmf

View File

@ -63,8 +63,8 @@ for i in `seq 1 $NVMF_SUBSYS`; do
waitforblk "nvme${k}n1" waitforblk "nvme${k}n1"
done done
$testdir/../fio/nvmf_fio.py 262144 64 read 10 $rootdir/scripts/fio.py nvmf 262144 64 read 10
$testdir/../fio/nvmf_fio.py 262144 64 randwrite 10 $rootdir/scripts/fio.py nvmf 262144 64 randwrite 10
sync sync
for i in `seq 1 $NVMF_SUBSYS`; do for i in `seq 1 $NVMF_SUBSYS`; do

View File

@ -73,7 +73,7 @@ if [ ! -z $NVMF_SECOND_TARGET_IP ]; then
waitforblk "nvme0n1" waitforblk "nvme0n1"
$testdir/../fio/nvmf_fio.py 4096 1 write 1 verify $rootdir/scripts/fio.py nvmf 4096 1 write 1 verify
fi fi
nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" || true nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" || true