scripts: use python3 in all scripts

This it to prepare for RPM package. Also lower number of dependencies
needed by SPDK tools.

Update changelog to deprecate Python 2 and explicit interpeter invoking
in scripts.

Change-Id: I2497cca721cbcbadc1c99c675f8b8b7f682d5efa
Signed-off-by: Pawel Wodkowski <pawelx.wodkowski@intel.com>
Reviewed-on: https://review.gerrithub.io/425233
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Karol Latecki <karol.latecki@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Pawel Wodkowski 2018-09-11 15:26:14 +02:00 committed by Jim Harris
parent bd0abde125
commit b96f97cf29
67 changed files with 178 additions and 163 deletions

View File

@ -58,6 +58,15 @@ surprises for users who may have DPDK_DIR defined for other reasons.
Users should just use the "configure" script to specify the DPDK Users should just use the "configure" script to specify the DPDK
location before building SPDK. location before building SPDK.
Although we know that many developers still use Python 2 we are officially
switching to Python3 with requirement that all new code must be valid also
for Python 2 up to the EOL which is year 2020.
Invoking interpreter explicitly is forbidden for executable scripts. There
is no need to use syntax like "python ./scripts/rpc.py". All executable
scripts must contain proper shebang pointing to the right interpreter.
Scripts without shebang musn't be executable.
## v18.07: ## v18.07:
### bdev ### bdev

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python #!/usr/bin/env python3
from subprocess import check_call, call, check_output, Popen, PIPE, CalledProcessError from subprocess import check_call, call, check_output, Popen, PIPE, CalledProcessError
import re import re
@ -62,7 +62,7 @@ def main():
verify = False verify = False
devices = get_target_devices() devices = get_target_devices()
print("Found devices: ", devices) print(("Found devices: ", devices))
configure_devices(devices) configure_devices(devices)
try: try:

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python #!/usr/bin/env python3
import os import os
import re import re

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python #!/usr/bin/env python3
# This script runs fio benchmark test on the local nvme device using the SPDK NVMe driver. # This script runs fio benchmark test on the local nvme device using the SPDK NVMe driver.
# Prework: Run script/setup.sh to bind SSDs to SPDK driver. # Prework: Run script/setup.sh to bind SSDs to SPDK driver.
@ -38,7 +38,7 @@ iter_num = ['1']
def run_fio(io_size_bytes, qd, rw_mix, cpu_mask, run_num, workload, run_time_sec): def run_fio(io_size_bytes, qd, rw_mix, cpu_mask, run_num, workload, run_time_sec):
print "Running Test: IO Size=", io_size_bytes, " QD=", qd, " Mix=", rw_mix, "CPU Mask=", cpu_mask print("Running Test: IO Size={} QD={} Mix={} CPU Mask={}".format(io_size_bytes, qd, rw_mix, cpu_mask))
string = "s_" + str(io_size_bytes) + "_q_" + str(qd) + "_m_" + str(rw_mix) + "_c_" + str(cpu_mask) + "_run_" + str(run_num) string = "s_" + str(io_size_bytes) + "_q_" + str(qd) + "_m_" + str(rw_mix) + "_c_" + str(cpu_mask) + "_run_" + str(run_num)
# Call fio # Call fio
@ -49,7 +49,7 @@ def run_fio(io_size_bytes, qd, rw_mix, cpu_mask, run_num, workload, run_time_sec
+ " fio " + str(path_to_fio_conf) + " -output=" + string + " -output-format=json" + " fio " + str(path_to_fio_conf) + " -output=" + string + " -output-format=json"
output = subprocess.check_output(command, shell=True) output = subprocess.check_output(command, shell=True)
print "Finished Test: IO Size=", io_size_bytes, " QD=", qd, " Mix=", rw_mix, " CPU Mask=", cpu_mask print("Finished Test: IO Size={} QD={} Mix={} CPU Mask={}".format(io_size_bytes, qd, rw_mix, cpu_mask))
return return
@ -85,21 +85,21 @@ def parse_results(io_size_bytes, qd, rw_mix, cpu_mask, run_num, workload, run_ti
write_avg_lat = float(data['jobs'][job_pos]['write'][lat]['mean']) write_avg_lat = float(data['jobs'][job_pos]['write'][lat]['mean'])
write_min_lat = float(data['jobs'][job_pos]['write'][lat]['min']) write_min_lat = float(data['jobs'][job_pos]['write'][lat]['min'])
write_max_lat = float(data['jobs'][job_pos]['write'][lat]['max']) write_max_lat = float(data['jobs'][job_pos]['write'][lat]['max'])
print "%-10s" % "IO Size", "%-10s" % "QD", "%-10s" % "Mix", \ print("%-10s" % "IO Size", "%-10s" % "QD", "%-10s" % "Mix",
"%-10s" % "Workload Type", "%-10s" % "CPU Mask", \ "%-10s" % "Workload Type", "%-10s" % "CPU Mask",
"%-10s" % "Run Time", "%-10s" % "Run Num", \ "%-10s" % "Run Time", "%-10s" % "Run Num",
"%-15s" % "Read IOps", \ "%-15s" % "Read IOps",
"%-10s" % "Read MBps", "%-15s" % "Read Avg. Lat(" + lat_units + ")", \ "%-10s" % "Read MBps", "%-15s" % "Read Avg. Lat(" + lat_units + ")",
"%-15s" % "Read Min. Lat(" + lat_units + ")", "%-15s" % "Read Max. Lat(" + lat_units + ")", \ "%-15s" % "Read Min. Lat(" + lat_units + ")", "%-15s" % "Read Max. Lat(" + lat_units + ")",
"%-15s" % "Write IOps", \ "%-15s" % "Write IOps",
"%-10s" % "Write MBps", "%-15s" % "Write Avg. Lat(" + lat_units + ")", \ "%-10s" % "Write MBps", "%-15s" % "Write Avg. Lat(" + lat_units + ")",
"%-15s" % "Write Min. Lat(" + lat_units + ")", "%-15s" % "Write Max. Lat(" + lat_units + ")" "%-15s" % "Write Min. Lat(" + lat_units + ")", "%-15s" % "Write Max. Lat(" + lat_units + ")")
print "%-10s" % io_size_bytes, "%-10s" % qd, "%-10s" % rw_mix, \ print("%-10s" % io_size_bytes, "%-10s" % qd, "%-10s" % rw_mix,
"%-10s" % workload, "%-10s" % cpu_mask, "%-10s" % run_time_sec, \ "%-10s" % workload, "%-10s" % cpu_mask, "%-10s" % run_time_sec,
"%-10s" % run_num, "%-15s" % read_iops, "%-10s" % read_bw, \ "%-10s" % run_num, "%-15s" % read_iops, "%-10s" % read_bw,
"%-15s" % read_avg_lat, "%-15s" % read_min_lat, "%-15s" % read_max_lat, \ "%-15s" % read_avg_lat, "%-15s" % read_min_lat, "%-15s" % read_max_lat,
"%-15s" % write_iops, "%-10s" % write_bw, "%-15s" % write_avg_lat, \ "%-15s" % write_iops, "%-10s" % write_bw, "%-15s" % write_avg_lat,
"%-15s" % write_min_lat, "%-15s" % write_max_lat "%-15s" % write_min_lat, "%-15s" % write_max_lat)
results = results + "," + str(read_iops) + "," + str(read_bw) + "," \ results = results + "," + str(read_iops) + "," + str(read_bw) + "," \
+ str(read_avg_lat) + "," + str(read_min_lat) + "," + str(read_max_lat) \ + str(read_avg_lat) + "," + str(read_min_lat) + "," + str(read_max_lat) \
+ "," + str(write_iops) + "," + str(write_bw) + "," + str(write_avg_lat) \ + "," + str(write_iops) + "," + str(write_bw) + "," + str(write_avg_lat) \
@ -128,12 +128,12 @@ def add_filename_to_conf(conf_file_name, bdf):
if len(sys.argv) != 4: if len(sys.argv) != 4:
print "usage: python ", sys.argv[0], " path_to_fio_conf path_to_ioengine num_ssds" print("usage: " % sys.argv[0] % " path_to_fio_conf path_to_ioengine num_ssds")
sys.exit() sys.exit()
num_ssds = int(sys.argv[3]) num_ssds = int(sys.argv[3])
if num_ssds > get_nvme_devices_count(): if num_ssds > get_nvme_devices_count():
print "System does not have ", num_ssds, " NVMe SSDs." print("System does not have {} NVMe SSDs.".format(num_ssds))
sys.exit() sys.exit()
host_name = os.uname()[1] host_name = os.uname()[1]

View File

@ -7,13 +7,13 @@ rootdir=$(readlink -f $testdir/../../..)
$rootdir/scripts/setup.sh $rootdir/scripts/setup.sh
# Run Performance Test with 1 SSD # Run Performance Test with 1 SSD
python $testdir/run_fio_test.py $testdir/fio_test.conf $rootdir/examples/nvme/fio_plugin/fio_plugin 1 $testdir/run_fio_test.py $testdir/fio_test.conf $rootdir/examples/nvme/fio_plugin/fio_plugin 1
# 2 SSDs test run # 2 SSDs test run
python $testdir/run_fio_test.py $testdir/fio_test.conf $rootdir/examples/nvme/fio_plugin/fio_plugin 2 $testdir/run_fio_test.py $testdir/fio_test.conf $rootdir/examples/nvme/fio_plugin/fio_plugin 2
# 4 SSDs test run # 4 SSDs test run
python $testdir/run_fio_test.py $testdir/fio_test.conf $rootdir/examples/nvme/fio_plugin/fio_plugin 4 $testdir/run_fio_test.py $testdir/fio_test.conf $rootdir/examples/nvme/fio_plugin/fio_plugin 4
# 8 SSDs test run # 8 SSDs test run
python $testdir/run_fio_test.py $testdir/fio_test.conf $rootdir/examples/nvme/fio_plugin/fio_plugin 8 $testdir/run_fio_test.py $testdir/fio_test.conf $rootdir/examples/nvme/fio_plugin/fio_plugin 8

View File

@ -7,7 +7,7 @@ from subprocess import check_call, call, check_output, Popen, PIPE
def range_incl(a, b): def range_incl(a, b):
return range(a, b + 1) return list(range(a, b + 1))
def list_spdk_used_cpus(cpus): def list_spdk_used_cpus(cpus):

View File

@ -37,8 +37,11 @@ if [ -s /etc/redhat-release ]; then
yum install -y doxygen mscgen graphviz yum install -y doxygen mscgen graphviz
# Additional dependencies for building pmem based backends # Additional dependencies for building pmem based backends
yum install -y libpmemblk-devel || true yum install -y libpmemblk-devel || true
# Additional dependencies for SPDK CLI
yum install -y python-configshell python-pexpect python3-configshell python3-pexpect # Additional dependencies for SPDK CLI - not available in rhel and centos
if ! echo "$ID $VERSION_ID" | egrep -q 'rhel 7|centos 7'; then
yum install -y python3-configshell python3-pexpect
fi
elif [ -f /etc/debian_version ]; then elif [ -f /etc/debian_version ]; then
# Includes Ubuntu, Debian # Includes Ubuntu, Debian
apt-get install -y gcc g++ make libcunit1-dev libaio-dev libssl-dev \ apt-get install -y gcc g++ make libcunit1-dev libaio-dev libssl-dev \

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python #!/usr/bin/env python3
from rpc.client import print_dict, JSONRPCException from rpc.client import print_dict, JSONRPCException

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python #!/usr/bin/env python3
import sys import sys
import argparse import argparse
from os import getuid from os import getuid

View File

@ -681,7 +681,7 @@ class UIBdevObj(UINode):
if self.bdev.aliases: if self.bdev.aliases:
alias = self.bdev.aliases[0] alias = self.bdev.aliases[0]
info = ", ".join(filter(None, [alias, size, in_use])) info = ", ".join([_f for _f in [alias, size, in_use] if _f])
return info, True return info, True
@ -697,9 +697,9 @@ class UIVirtioScsiBdevObj(UIBdevObj):
UIBdevObj(bdev, self) UIBdevObj(bdev, self)
def summary(self): def summary(self):
if "socket" in self.bdev.virtio.keys(): if "socket" in list(self.bdev.virtio.keys()):
info = self.bdev.virtio["socket"] info = self.bdev.virtio["socket"]
if "pci_address" in self.bdev.virtio.keys(): if "pci_address" in list(self.bdev.virtio.keys()):
info = self.bdev.virtio["pci_address"] info = self.bdev.virtio["pci_address"]
return info, True return info, True
@ -898,7 +898,7 @@ class UIVhostBlkCtrlObj(UIVhostCtrl):
ro = None ro = None
if self.ctrlr.backend_specific["block"]["readonly"]: if self.ctrlr.backend_specific["block"]["readonly"]:
ro = "Readonly" ro = "Readonly"
info = ", ".join(filter(None, [self.ctrlr.socket, ro])) info = ", ".join([_f for _f in [self.ctrlr.socket, ro] if _f])
return info, True return info, True

View File

@ -72,8 +72,7 @@ class UIRoot(UINode):
# For example logical volumes: listing in menu is "Logical_Volume" # For example logical volumes: listing in menu is "Logical_Volume"
# (cannot have space), but the product name in SPDK is "Logical Volume" # (cannot have space), but the product name in SPDK is "Logical Volume"
bdev_type = bdev_type.replace("_", " ") bdev_type = bdev_type.replace("_", " ")
for bdev in filter(lambda x: bdev_type in x["product_name"].lower(), for bdev in [x for x in self.current_bdevs if bdev_type in x["product_name"].lower()]:
self.current_bdevs):
test = Bdev(bdev) test = Bdev(bdev)
yield test yield test
@ -223,8 +222,7 @@ class UIRoot(UINode):
def get_vhost_ctrlrs(self, ctrlr_type): def get_vhost_ctrlrs(self, ctrlr_type):
if self.is_init: if self.is_init:
self.list_vhost_ctrls() self.list_vhost_ctrls()
for ctrlr in filter(lambda x: ctrlr_type in x["backend_specific"].keys(), for ctrlr in [x for x in self.current_vhost_ctrls if ctrlr_type in list(x["backend_specific"].keys())]:
self.current_vhost_ctrls):
yield VhostCtrlr(ctrlr) yield VhostCtrlr(ctrlr)
@verbose @verbose
@ -259,7 +257,7 @@ class Bdev(object):
# TODO: Document in docstring parameters which describe bdevs. # TODO: Document in docstring parameters which describe bdevs.
# TODO: Possible improvement: JSON schema might be used here in future # TODO: Possible improvement: JSON schema might be used here in future
""" """
for i in bdev_info.keys(): for i in list(bdev_info.keys()):
setattr(self, i, bdev_info[i]) setattr(self, i, bdev_info[i])
@ -271,7 +269,7 @@ class LvolStore(object):
# TODO: Document in docstring parameters which describe bdevs. # TODO: Document in docstring parameters which describe bdevs.
# TODO: Possible improvement: JSON schema might be used here in future # TODO: Possible improvement: JSON schema might be used here in future
""" """
for i in lvs_info.keys(): for i in list(lvs_info.keys()):
setattr(self, i, lvs_info[i]) setattr(self, i, lvs_info[i])
@ -283,5 +281,5 @@ class VhostCtrlr(object):
# TODO: Document in docstring parameters which describe bdevs. # TODO: Document in docstring parameters which describe bdevs.
# TODO: Possible improvement: JSON schema might be used here in future # TODO: Possible improvement: JSON schema might be used here in future
""" """
for i in ctrlr_info.keys(): for i in list(ctrlr_info.keys()):
setattr(self, i, ctrlr_info[i]) setattr(self, i, ctrlr_info[i])

View File

@ -5,7 +5,7 @@ set -e
testdir=$(readlink -f $(dirname $0)) testdir=$(readlink -f $(dirname $0))
rootdir=$(readlink -f $testdir/../..) rootdir=$(readlink -f $testdir/../..)
plugindir=$rootdir/examples/bdev/fio_plugin plugindir=$rootdir/examples/bdev/fio_plugin
rpc_py="python $rootdir/scripts/rpc.py" rpc_py="$rootdir/scripts/rpc.py"
function run_fio() function run_fio()
{ {

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python #!/usr/bin/env python3
from collections import namedtuple from collections import namedtuple
from itertools import islice from itertools import islice
import operator import operator
@ -31,31 +31,31 @@ with open(sys.argv[1] + "/" + sys.argv[2] + ".perf.txt") as f:
for thread in sorted(threads): for thread in sorted(threads):
thread_pct = 0 thread_pct = 0
print print("")
print("Thread: {:s}".format(thread)) print("Thread: {:s}".format(thread))
print(" Percent Module") print(" Percent Module")
print("============================") print("============================")
for key, value in sorted(thread_module_samples.items(), key=operator.itemgetter(1), reverse=True): for key, value in sorted(list(thread_module_samples.items()), key=operator.itemgetter(1), reverse=True):
if key.thread == thread: if key.thread == thread:
print("{:8.4f} {:20s}".format(float(value) * 100 / total_samples, key.module)) print("{:8.4f} {:20s}".format(float(value) * 100 / total_samples, key.module))
thread_pct += float(value) * 100 / total_samples thread_pct += float(value) * 100 / total_samples
print("============================") print("============================")
print("{:8.4f} Total".format(thread_pct)) print("{:8.4f} Total".format(thread_pct))
print print("")
print(" Percent Module Function") print(" Percent Module Function")
print("=================================================================") print("=================================================================")
for key, value in islice(sorted(function_module_samples.items(), key=operator.itemgetter(1), reverse=True), 100): for key, value in islice(sorted(list(function_module_samples.items()), key=operator.itemgetter(1), reverse=True), 100):
print("{:8.4f} {:20s} {:s}".format(float(value) * 100 / total_samples, key.module, key.function)) print(("{:8.4f} {:20s} {:s}".format(float(value) * 100 / total_samples, key.module, key.function)))
print print("")
print print("")
print(" Percent Module") print(" Percent Module")
print("=================================") print("=================================")
for key, value in sorted(module_samples.items(), key=operator.itemgetter(1), reverse=True): for key, value in sorted(list(module_samples.items()), key=operator.itemgetter(1), reverse=True):
print("{:8.4f} {:s}".format(float(value) * 100 / total_samples, key)) print("{:8.4f} {:s}".format(float(value) * 100 / total_samples, key))
print print("")
with open(sys.argv[1] + "/" + sys.argv[2] + "_db_bench.txt") as f: with open(sys.argv[1] + "/" + sys.argv[2] + "_db_bench.txt") as f:
for line in f: for line in f:
if "maxresident" in line: if "maxresident" in line:
@ -67,4 +67,4 @@ with open(sys.argv[1] + "/" + sys.argv[2] + "_db_bench.txt") as f:
print("User: {:8.2f} ({:5.2f}%)".format(user, user * 100 / (user + system))) print("User: {:8.2f} ({:5.2f}%)".format(user, user * 100 / (user + system)))
print("System: {:8.2f} ({:5.2f}%)".format(system, system * 100 / (user + system))) print("System: {:8.2f} ({:5.2f}%)".format(system, system * 100 / (user + system)))
print print("")

View File

@ -10,7 +10,7 @@ timing_enter bdev_io_wait
MALLOC_BDEV_SIZE=64 MALLOC_BDEV_SIZE=64
MALLOC_BLOCK_SIZE=512 MALLOC_BLOCK_SIZE=512
rpc_py="python $rootdir/scripts/rpc.py" rpc_py="$rootdir/scripts/rpc.py"
timing_enter start_iscsi_tgt timing_enter start_iscsi_tgt

View File

@ -1,3 +1,5 @@
#!/usr/bin/env python3
import os import os
import time import time
import sys import sys
@ -47,7 +49,7 @@ def run_case(case, result_list, log_dir_path):
def main(): def main():
if not os.path.exists(CALSOFT_BIN_PATH): if not os.path.exists(CALSOFT_BIN_PATH):
print "The Calsoft test suite is not available on this machine." print("The Calsoft test suite is not available on this machine.")
sys.exit(1) sys.exit(1)
output_dir = sys.argv[1] output_dir = sys.argv[1]
@ -68,7 +70,7 @@ def main():
if not os.path.exists(log_dir): if not os.path.exists(log_dir):
os.mkdir(log_dir) os.mkdir(log_dir)
for case in known_failed_cases: for case in known_failed_cases:
print "Skipping %s. It is known to fail." % (case) print("Skipping %s. It is known to fail." % (case))
case_result_list.append({"Name": case, "Result": "SKIP"}) case_result_list.append({"Name": case, "Result": "SKIP"})
thread_objs = [] thread_objs = []
@ -96,7 +98,7 @@ def main():
else: else:
break break
else: else:
print "Thread timeout" print("Thread timeout")
exit(1) exit(1)
with open(output_file, 'w') as f: with open(output_file, 'w') as f:
json.dump(obj=result, fp=f, indent=2) json.dump(obj=result, fp=f, indent=2)
@ -104,7 +106,7 @@ def main():
failed = 0 failed = 0
for x in case_result_list: for x in case_result_list:
if x["Result"] == "FAIL": if x["Result"] == "FAIL":
print "Test case %s failed." % (x["Name"]) print("Test case %s failed." % (x["Name"]))
failed = 1 failed = 1
exit(failed) exit(failed)

View File

@ -19,8 +19,8 @@ timing_enter calsoft
MALLOC_BDEV_SIZE=64 MALLOC_BDEV_SIZE=64
MALLOC_BLOCK_SIZE=512 MALLOC_BLOCK_SIZE=512
rpc_py="python $rootdir/scripts/rpc.py" rpc_py="$rootdir/scripts/rpc.py"
calsoft_py="python $testdir/calsoft.py" calsoft_py="$testdir/calsoft.py"
# Copy the calsoft config file to /usr/local/etc # Copy the calsoft config file to /usr/local/etc
mkdir -p /usr/local/etc mkdir -p /usr/local/etc

View File

@ -58,8 +58,8 @@ timing_enter digests
MALLOC_BDEV_SIZE=64 MALLOC_BDEV_SIZE=64
MALLOC_BLOCK_SIZE=512 MALLOC_BLOCK_SIZE=512
rpc_py="python $rootdir/scripts/rpc.py" rpc_py="$rootdir/scripts/rpc.py"
fio_py="python $rootdir/scripts/fio.py" fio_py="$rootdir/scripts/fio.py"
timing_enter start_iscsi_tgt timing_enter start_iscsi_tgt

View File

@ -11,7 +11,7 @@ fi
timing_enter ext4test timing_enter ext4test
rpc_py="python $rootdir/scripts/rpc.py" rpc_py="$rootdir/scripts/rpc.py"
timing_enter start_iscsi_tgt timing_enter start_iscsi_tgt

View File

@ -8,7 +8,7 @@ source $rootdir/scripts/common.sh
timing_enter filesystem timing_enter filesystem
rpc_py="python $rootdir/scripts/rpc.py" rpc_py="$rootdir/scripts/rpc.py"
# Remove lvol bdevs and stores. # Remove lvol bdevs and stores.
function remove_backends() { function remove_backends() {
echo "INFO: Removing lvol bdev" echo "INFO: Removing lvol bdev"

View File

@ -54,8 +54,8 @@ cp $testdir/iscsi.conf.in $testdir/iscsi.conf
MALLOC_BDEV_SIZE=64 MALLOC_BDEV_SIZE=64
MALLOC_BLOCK_SIZE=4096 MALLOC_BLOCK_SIZE=4096
rpc_py="python $rootdir/scripts/rpc.py" rpc_py="$rootdir/scripts/rpc.py"
fio_py="python $rootdir/scripts/fio.py" fio_py="$rootdir/scripts/fio.py"
timing_enter start_iscsi_tgt timing_enter start_iscsi_tgt

View File

@ -10,7 +10,7 @@ timing_enter initiator
MALLOC_BDEV_SIZE=64 MALLOC_BDEV_SIZE=64
MALLOC_BLOCK_SIZE=512 MALLOC_BLOCK_SIZE=512
rpc_py="python $rootdir/scripts/rpc.py" rpc_py="$rootdir/scripts/rpc.py"
timing_enter start_iscsi_tgt timing_enter start_iscsi_tgt

View File

@ -5,8 +5,8 @@ rootdir=$(readlink -f $testdir/../../..)
source $rootdir/test/common/autotest_common.sh source $rootdir/test/common/autotest_common.sh
source $rootdir/test/iscsi_tgt/common.sh source $rootdir/test/iscsi_tgt/common.sh
rpc_py="python $rootdir/scripts/rpc.py" rpc_py="$rootdir/scripts/rpc.py"
fio_py="python $rootdir/scripts/fio.py" fio_py="$rootdir/scripts/fio.py"
# Namespaces are NOT used here on purpose. This test requires changes to detect # Namespaces are NOT used here on purpose. This test requires changes to detect
# ifc_index for interface that was put into namespace. Needed for add_ip_address. # ifc_index for interface that was put into namespace. Needed for add_ip_address.

View File

@ -17,8 +17,8 @@ else
NUM_LVOL=2 NUM_LVOL=2
fi fi
rpc_py="python $rootdir/scripts/rpc.py" rpc_py="$rootdir/scripts/rpc.py"
fio_py="python $rootdir/scripts/fio.py" fio_py="$rootdir/scripts/fio.py"
timing_enter start_iscsi_tgt timing_enter start_iscsi_tgt

View File

@ -5,8 +5,8 @@ rootdir=$(readlink -f $testdir/../../..)
source $rootdir/test/common/autotest_common.sh source $rootdir/test/common/autotest_common.sh
source $rootdir/test/iscsi_tgt/common.sh source $rootdir/test/iscsi_tgt/common.sh
rpc_py="python $rootdir/scripts/rpc.py" rpc_py="$rootdir/scripts/rpc.py"
fio_py="python $rootdir/scripts/fio.py" fio_py="$rootdir/scripts/fio.py"
CONNECTION_NUMBER=30 CONNECTION_NUMBER=30

View File

@ -15,8 +15,8 @@ if [ -z $NVMF_FIRST_TARGET_IP ]; then
exit 0 exit 0
fi fi
rpc_py="python $rootdir/scripts/rpc.py" rpc_py="$rootdir/scripts/rpc.py"
fio_py="python $rootdir/scripts/fio.py" fio_py="$rootdir/scripts/fio.py"
NVMF_PORT=4420 NVMF_PORT=4420

View File

@ -12,8 +12,8 @@ PMEM_SIZE=128
PMEM_BLOCK_SIZE=512 PMEM_BLOCK_SIZE=512
TGT_NR=10 TGT_NR=10
PMEM_PER_TGT=1 PMEM_PER_TGT=1
rpc_py="python $rootdir/scripts/rpc.py" rpc_py="$rootdir/scripts/rpc.py"
fio_py="python $rootdir/scripts/fio.py" fio_py="$rootdir/scripts/fio.py"
timing_enter iscsi_pmem timing_enter iscsi_pmem

View File

@ -46,8 +46,8 @@ timing_enter qos
MALLOC_BDEV_SIZE=64 MALLOC_BDEV_SIZE=64
MALLOC_BLOCK_SIZE=512 MALLOC_BLOCK_SIZE=512
IOPS_LIMIT=20000 IOPS_LIMIT=20000
rpc_py="python $rootdir/scripts/rpc.py" rpc_py="$rootdir/scripts/rpc.py"
fio_py="python $rootdir/scripts/fio.py" fio_py="$rootdir/scripts/fio.py"
timing_enter start_iscsi_tgt timing_enter start_iscsi_tgt

View File

@ -17,8 +17,8 @@ timing_exit rbd_setup
timing_enter rbd timing_enter rbd
rpc_py="python $rootdir/scripts/rpc.py" rpc_py="$rootdir/scripts/rpc.py"
fio_py="python $rootdir/scripts/fio.py" fio_py="$rootdir/scripts/fio.py"
timing_enter start_iscsi_tgt timing_enter start_iscsi_tgt

View File

@ -12,8 +12,8 @@ timing_enter reset
MALLOC_BDEV_SIZE=64 MALLOC_BDEV_SIZE=64
MALLOC_BLOCK_SIZE=512 MALLOC_BLOCK_SIZE=512
rpc_py="python $rootdir/scripts/rpc.py" rpc_py="$rootdir/scripts/rpc.py"
fio_py="python $rootdir/scripts/fio.py" fio_py="$rootdir/scripts/fio.py"
if ! hash sg_reset; then if ! hash sg_reset; then
exit 1 exit 1

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python #!/usr/bin/env python3
import os import os
@ -61,7 +61,7 @@ class spdk_rpc(object):
def __getattr__(self, name): def __getattr__(self, name):
def call(*args): def call(*args):
cmd = "python {} {}".format(self.rpc_py, name) cmd = "{} {}".format(self.rpc_py, name)
for arg in args: for arg in args:
cmd += " {}".format(arg) cmd += " {}".format(arg)
return check_output(cmd, shell=True) return check_output(cmd, shell=True)
@ -414,7 +414,7 @@ def verify_get_interfaces(rpc_py):
def help_get_interface_ip_list(rpc_py, nic_name): def help_get_interface_ip_list(rpc_py, nic_name):
rpc = spdk_rpc(rpc_py) rpc = spdk_rpc(rpc_py)
nics = json.loads(rpc.get_interfaces()) nics = json.loads(rpc.get_interfaces())
nic = list(filter(lambda x: x["name"] == nic_name, nics)) nic = list([x for x in nics if x["name"] == nic_name])
verify(len(nic) != 0, 1, verify(len(nic) != 0, 1,
"Nic name: {} is not found in {}".format(nic_name, [x["name"] for x in nics])) "Nic name: {} is not found in {}".format(nic_name, [x["name"] for x in nics]))
return nic[0]["ip_addr"] return nic[0]["ip_addr"]

View File

@ -18,7 +18,7 @@ fi
MALLOC_BDEV_SIZE=64 MALLOC_BDEV_SIZE=64
rpc_py=$rootdir/scripts/rpc.py rpc_py=$rootdir/scripts/rpc.py
rpc_config_py="python $testdir/rpc_config.py" rpc_config_py="$testdir/rpc_config.py"
timing_enter start_iscsi_tgt timing_enter start_iscsi_tgt

View File

@ -1,4 +1,4 @@
#!/usr/bin/python #!/usr/bin/env python3
import os import os
import sys import sys
@ -59,7 +59,7 @@ def get_bdev_destroy_method(bdev):
destroy_method = None destroy_method = None
if 'method' in bdev: if 'method' in bdev:
construct_method = bdev['method'] construct_method = bdev['method']
if construct_method in destroy_method_map.keys(): if construct_method in list(destroy_method_map.keys()):
destroy_method = destroy_method_map[construct_method] destroy_method = destroy_method_map[construct_method]
return destroy_method return destroy_method
@ -168,7 +168,7 @@ def call_test_cmd(func):
try: try:
func(*args, **kwargs) func(*args, **kwargs)
except JSONRPCException as ex: except JSONRPCException as ex:
print(ex.message) print((ex.message))
exit(1) exit(1)
return rpc_test_cmd return rpc_test_cmd
@ -196,7 +196,7 @@ if __name__ == "__main__":
if config is None: if config is None:
return return
if args.verbose: if args.verbose:
print "Calling clear_%s_subsystem" % args.subsystem print("Calling clear_%s_subsystem" % args.subsystem)
globals()["clear_%s_subsystem" % args.subsystem](args, config) globals()["clear_%s_subsystem" % args.subsystem](args, config)
p = subparsers.add_parser('clear_subsystem', help="""Clear configuration of SPDK subsystem using JSON RPC""") p = subparsers.add_parser('clear_subsystem', help="""Clear configuration of SPDK subsystem using JSON RPC""")
@ -208,6 +208,6 @@ if __name__ == "__main__":
try: try:
args.client = rpc.client.JSONRPCClient(args.server_addr, args.port, args.verbose, args.timeout) args.client = rpc.client.JSONRPCClient(args.server_addr, args.port, args.verbose, args.timeout)
except JSONRPCException as ex: except JSONRPCException as ex:
print(ex.message) print((ex.message))
exit(1) exit(1)
args.func(args) args.func(args)

View File

@ -3,9 +3,9 @@ SPDK_BUILD_DIR=$JSON_DIR/../../
source $JSON_DIR/../common/autotest_common.sh source $JSON_DIR/../common/autotest_common.sh
source $JSON_DIR/../nvmf/common.sh source $JSON_DIR/../nvmf/common.sh
spdk_rpc_py="python $SPDK_BUILD_DIR/scripts/rpc.py -s /var/tmp/spdk.sock" spdk_rpc_py="$SPDK_BUILD_DIR/scripts/rpc.py -s /var/tmp/spdk.sock"
spdk_clear_config_py="$JSON_DIR/clear_config.py -s /var/tmp/spdk.sock" spdk_clear_config_py="$JSON_DIR/clear_config.py -s /var/tmp/spdk.sock"
initiator_rpc_py="python $SPDK_BUILD_DIR/scripts/rpc.py -s /var/tmp/virtio.sock" initiator_rpc_py="$SPDK_BUILD_DIR/scripts/rpc.py -s /var/tmp/virtio.sock"
initiator_clear_config_py="$JSON_DIR/clear_config.py -s /var/tmp/virtio.sock" initiator_clear_config_py="$JSON_DIR/clear_config.py -s /var/tmp/virtio.sock"
base_json_config=$JSON_DIR/base_config.json base_json_config=$JSON_DIR/base_config.json
last_json_config=$JSON_DIR/last_config.json last_json_config=$JSON_DIR/last_config.json
@ -205,7 +205,7 @@ function clear_bdev_subsystem_config() {
function test_global_params() { function test_global_params() {
target=$1 target=$1
$rpc_py save_config > $full_config $rpc_py save_config > $full_config
python $JSON_DIR/config_filter.py -method "delete_configs" < $full_config > $base_json_config $JSON_DIR/config_filter.py -method "delete_configs" < $full_config > $base_json_config
if [ $target == "spdk_tgt" ]; then if [ $target == "spdk_tgt" ]; then
killprocess $spdk_tgt_pid killprocess $spdk_tgt_pid
run_spdk_tgt run_spdk_tgt
@ -218,7 +218,7 @@ function test_global_params() {
fi fi
$rpc_py load_config < $full_config $rpc_py load_config < $full_config
$rpc_py save_config > $full_config $rpc_py save_config > $full_config
python $JSON_DIR/config_filter.py -method "delete_configs" < $full_config > $last_json_config $JSON_DIR/config_filter.py -method "delete_configs" < $full_config > $last_json_config
json_diff $base_json_config $last_json_config json_diff $base_json_config $last_json_config
rm $base_json_config $last_json_config rm $base_json_config $last_json_config

View File

@ -1,4 +1,5 @@
#!/usr/bin/python #!/usr/bin/env python3
import sys import sys
import json import json
import argparse import argparse

View File

@ -1,4 +1,5 @@
#!/usr/bin/env python #!/usr/bin/env python3
import sys import sys
from test_cases import * from test_cases import *

View File

@ -1,4 +1,5 @@
import json import json
import sys
from uuid import UUID from uuid import UUID
from subprocess import check_output, CalledProcessError from subprocess import check_output, CalledProcessError
@ -9,7 +10,7 @@ class Spdk_Rpc(object):
def __getattr__(self, name): def __getattr__(self, name):
def call(*args): def call(*args):
cmd = "python {} {}".format(self.rpc_py, name) cmd = "{} {} {}".format(sys.executable, self.rpc_py, name)
for arg in args: for arg in args:
cmd += " {}".format(arg) cmd += " {}".format(arg)
try: try:

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python #!/usr/bin/env python3
import io import io
import time import time
import sys import sys
@ -266,14 +266,14 @@ class TestCases(object):
def get_lvs_size(self, lvs_name="lvs_test"): def get_lvs_size(self, lvs_name="lvs_test"):
lvs = self.c.get_lvol_stores(lvs_name)[0] lvs = self.c.get_lvol_stores(lvs_name)[0]
return int(int(lvs[u'free_clusters'] * lvs['cluster_size']) / MEGABYTE) return int(int(lvs['free_clusters'] * lvs['cluster_size']) / MEGABYTE)
def get_lvs_divided_size(self, split_num, lvs_name="lvs_test"): def get_lvs_divided_size(self, split_num, lvs_name="lvs_test"):
# Actual size of lvol bdevs on creation is rounded up to multiple of cluster size. # Actual size of lvol bdevs on creation is rounded up to multiple of cluster size.
# In order to avoid over provisioning, this function returns # In order to avoid over provisioning, this function returns
# lvol store size in MB divided by split_num - rounded down to multiple of cluster size." # lvol store size in MB divided by split_num - rounded down to multiple of cluster size."
lvs = self.c.get_lvol_stores(lvs_name)[0] lvs = self.c.get_lvol_stores(lvs_name)[0]
return int(int(lvs[u'free_clusters'] / split_num) * lvs['cluster_size'] / MEGABYTE) return int(int(lvs['free_clusters'] / split_num) * lvs['cluster_size'] / MEGABYTE)
def get_lvs_cluster_size(self, lvs_name="lvs_test"): def get_lvs_cluster_size(self, lvs_name="lvs_test"):
lvs = self.c.get_lvol_stores(lvs_name)[0] lvs = self.c.get_lvol_stores(lvs_name)[0]
@ -816,7 +816,7 @@ class TestCases(object):
fail_count = self.c.check_get_lvol_stores(base_name, uuid_store, fail_count = self.c.check_get_lvol_stores(base_name, uuid_store,
self.cluster_size) self.cluster_size)
lvs = self.c.get_lvol_stores() lvs = self.c.get_lvol_stores()
size = int(int(lvs[0][u'free_clusters'] * lvs[0]['cluster_size']) / 4 / MEGABYTE) size = int(int(lvs[0]['free_clusters'] * lvs[0]['cluster_size']) / 4 / MEGABYTE)
# Construct thin provisioned lvol bdev # Construct thin provisioned lvol bdev
uuid_bdev0 = self.c.construct_lvol_bdev(uuid_store, uuid_bdev0 = self.c.construct_lvol_bdev(uuid_store,
@ -881,7 +881,7 @@ class TestCases(object):
fail_count = self.c.check_get_lvol_stores(base_name, uuid_store, fail_count = self.c.check_get_lvol_stores(base_name, uuid_store,
self.cluster_size) self.cluster_size)
lvs = self.c.get_lvol_stores() lvs = self.c.get_lvol_stores()
size = int(int(lvs[0][u'free_clusters'] * lvs[0]['cluster_size']) / 4 / MEGABYTE) size = int(int(lvs[0]['free_clusters'] * lvs[0]['cluster_size']) / 4 / MEGABYTE)
# Create lvol bdev, snapshot it, then clone it and then snapshot the clone # Create lvol bdev, snapshot it, then clone it and then snapshot the clone
uuid_bdev0 = self.c.construct_lvol_bdev(uuid_store, self.lbd_name, size, thin=True) uuid_bdev0 = self.c.construct_lvol_bdev(uuid_store, self.lbd_name, size, thin=True)
@ -946,7 +946,7 @@ class TestCases(object):
fail_count = self.c.check_get_lvol_stores(base_name, uuid_store, fail_count = self.c.check_get_lvol_stores(base_name, uuid_store,
self.cluster_size) self.cluster_size)
lvs = self.c.get_lvol_stores() lvs = self.c.get_lvol_stores()
size = int(int(lvs[0][u'free_clusters'] * lvs[0]['cluster_size']) / 4 / MEGABYTE) size = int(int(lvs[0]['free_clusters'] * lvs[0]['cluster_size']) / 4 / MEGABYTE)
# Create lvol bdev, snapshot it, then clone it and then snapshot the clone # Create lvol bdev, snapshot it, then clone it and then snapshot the clone
uuid_bdev0 = self.c.construct_lvol_bdev(uuid_store, self.lbd_name, size, thin=True) uuid_bdev0 = self.c.construct_lvol_bdev(uuid_store, self.lbd_name, size, thin=True)
@ -1046,7 +1046,7 @@ class TestCases(object):
bdev_name = self.c.construct_lvol_bdev(uuid_store, self.lbd_name, bdev_name = self.c.construct_lvol_bdev(uuid_store, self.lbd_name,
bdev_size, thin=True) bdev_size, thin=True)
lvs = self.c.get_lvol_stores(self.lvs_name)[0] lvs = self.c.get_lvol_stores(self.lvs_name)[0]
free_clusters_create_lvol = int(lvs[u'free_clusters']) free_clusters_create_lvol = int(lvs['free_clusters'])
# check and save number of free clusters for lvol store # check and save number of free clusters for lvol store
if free_clusters_start != free_clusters_create_lvol: if free_clusters_start != free_clusters_create_lvol:
fail_count += 1 fail_count += 1
@ -1058,7 +1058,7 @@ class TestCases(object):
# write data (lvs cluster size) to created lvol bdev starting from offset 0. # write data (lvs cluster size) to created lvol bdev starting from offset 0.
fail_count += self.run_fio_test("/dev/nbd0", 0, size, "write", "0xcc") fail_count += self.run_fio_test("/dev/nbd0", 0, size, "write", "0xcc")
lvs = self.c.get_lvol_stores(self.lvs_name)[0] lvs = self.c.get_lvol_stores(self.lvs_name)[0]
free_clusters_first_fio = int(lvs[u'free_clusters']) free_clusters_first_fio = int(lvs['free_clusters'])
# check that free clusters on lvol store was decremented by 1 # check that free clusters on lvol store was decremented by 1
if free_clusters_start != free_clusters_first_fio + 1: if free_clusters_start != free_clusters_first_fio + 1:
fail_count += 1 fail_count += 1
@ -1070,7 +1070,7 @@ class TestCases(object):
# write data (lvs cluster size) to lvol bdev with offset set to one and half of cluster size # write data (lvs cluster size) to lvol bdev with offset set to one and half of cluster size
fail_count += self.run_fio_test(nbd_name, offset, size, "write", "0xcc") fail_count += self.run_fio_test(nbd_name, offset, size, "write", "0xcc")
lvs = self.c.get_lvol_stores(self.lvs_name)[0] lvs = self.c.get_lvol_stores(self.lvs_name)[0]
free_clusters_second_fio = int(lvs[u'free_clusters']) free_clusters_second_fio = int(lvs['free_clusters'])
# check that free clusters on lvol store was decremented by 2 # check that free clusters on lvol store was decremented by 2
if free_clusters_start != free_clusters_second_fio + 3: if free_clusters_start != free_clusters_second_fio + 3:
fail_count += 1 fail_count += 1
@ -1081,7 +1081,7 @@ class TestCases(object):
# write data to lvol bdev to the end of its size # write data to lvol bdev to the end of its size
fail_count += self.run_fio_test(nbd_name, offset, size, "write", "0xcc") fail_count += self.run_fio_test(nbd_name, offset, size, "write", "0xcc")
lvs = self.c.get_lvol_stores(self.lvs_name)[0] lvs = self.c.get_lvol_stores(self.lvs_name)[0]
free_clusters_third_fio = int(lvs[u'free_clusters']) free_clusters_third_fio = int(lvs['free_clusters'])
# check that lvol store free clusters number equals to 0 # check that lvol store free clusters number equals to 0
if free_clusters_third_fio != 0: if free_clusters_third_fio != 0:
fail_count += 1 fail_count += 1
@ -1090,7 +1090,7 @@ class TestCases(object):
# destroy thin provisioned lvol bdev # destroy thin provisioned lvol bdev
fail_count += self.c.destroy_lvol_bdev(lvol_bdev['name']) fail_count += self.c.destroy_lvol_bdev(lvol_bdev['name'])
lvs = self.c.get_lvol_stores(self.lvs_name)[0] lvs = self.c.get_lvol_stores(self.lvs_name)[0]
free_clusters_end = int(lvs[u'free_clusters']) free_clusters_end = int(lvs['free_clusters'])
# check that saved number of free clusters equals to current free clusters # check that saved number of free clusters equals to current free clusters
if free_clusters_start != free_clusters_end: if free_clusters_start != free_clusters_end:
fail_count += 1 fail_count += 1
@ -1264,7 +1264,7 @@ class TestCases(object):
bdev_size, thin=True) bdev_size, thin=True)
lvs = self.c.get_lvol_stores(self.lvs_name)[0] lvs = self.c.get_lvol_stores(self.lvs_name)[0]
free_clusters_create_lvol = int(lvs[u'free_clusters']) free_clusters_create_lvol = int(lvs['free_clusters'])
if free_clusters_start != free_clusters_create_lvol: if free_clusters_start != free_clusters_create_lvol:
fail_count += 1 fail_count += 1
lvol_bdev0 = self.c.get_lvol_bdev_with_name(bdev_name0) lvol_bdev0 = self.c.get_lvol_bdev_with_name(bdev_name0)

View File

@ -8,7 +8,7 @@ source $rootdir/test/nvmf/common.sh
MALLOC_BDEV_SIZE=64 MALLOC_BDEV_SIZE=64
MALLOC_BLOCK_SIZE=512 MALLOC_BLOCK_SIZE=512
rpc_py="python $rootdir/scripts/rpc.py" rpc_py="$rootdir/scripts/rpc.py"
set -e set -e

View File

@ -8,7 +8,7 @@ source $rootdir/test/nvmf/common.sh
NULL_BDEV_SIZE=102400 NULL_BDEV_SIZE=102400
NULL_BLOCK_SIZE=512 NULL_BLOCK_SIZE=512
rpc_py="python $rootdir/scripts/rpc.py" rpc_py="$rootdir/scripts/rpc.py"
set -e set -e

View File

@ -8,7 +8,7 @@ source $rootdir/test/nvmf/common.sh
MALLOC_BDEV_SIZE=64 MALLOC_BDEV_SIZE=64
MALLOC_BLOCK_SIZE=512 MALLOC_BLOCK_SIZE=512
rpc_py="python $rootdir/scripts/rpc.py" rpc_py="$rootdir/scripts/rpc.py"
set -e set -e

View File

@ -8,7 +8,7 @@ source $rootdir/test/nvmf/common.sh
MALLOC_BDEV_SIZE=64 MALLOC_BDEV_SIZE=64
MALLOC_BLOCK_SIZE=512 MALLOC_BLOCK_SIZE=512
rpc_py="python $rootdir/scripts/rpc.py" rpc_py="$rootdir/scripts/rpc.py"
set -e set -e

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python #!/usr/bin/env python3
from subprocess import check_call, call, check_output, Popen, PIPE, CalledProcessError from subprocess import check_call, call, check_output, Popen, PIPE, CalledProcessError
import re import re
@ -37,7 +37,7 @@ filename=%(device)s
def interrupt_handler(signum, frame): def interrupt_handler(signum, frame):
fio.terminate() fio.terminate()
print "FIO terminated" print("FIO terminated")
sys.exit(0) sys.exit(0)
@ -45,11 +45,11 @@ def main():
global fio global fio
if (len(sys.argv) < 5): if (len(sys.argv) < 5):
print "usage:" print("usage:")
print " " + sys.argv[0] + " <io_size> <queue_depth> <test_type> <runtime>" print(" " + sys.argv[0] + " <io_size> <queue_depth> <test_type> <runtime>")
print "advanced usage:" print("advanced usage:")
print "If you want to run fio with verify, please add verify string after runtime." print("If you want to run fio with verify, please add verify string after runtime.")
print "Currently fio.py only support write rw randwrite randrw with verify enabled." print("Currently fio.py only support write rw randwrite randrw with verify enabled.")
sys.exit(1) sys.exit(1)
io_size = int(sys.argv[1]) io_size = int(sys.argv[1])
@ -62,7 +62,7 @@ def main():
verify = False verify = False
devices = get_target_devices() devices = get_target_devices()
print "Found devices: ", devices print("Found devices: ", devices)
# configure_devices(devices) # configure_devices(devices)
try: try:
@ -73,7 +73,7 @@ def main():
sys.exit(1) sys.exit(1)
device_paths = ['/dev/' + dev for dev in devices] device_paths = ['/dev/' + dev for dev in devices]
print device_paths print(device_paths)
sys.stdout.flush() sys.stdout.flush()
signal.signal(signal.SIGTERM, interrupt_handler) signal.signal(signal.SIGTERM, interrupt_handler)
signal.signal(signal.SIGINT, interrupt_handler) signal.signal(signal.SIGINT, interrupt_handler)
@ -81,13 +81,13 @@ def main():
fio.communicate(create_fio_config(io_size, queue_depth, device_paths, test_type, runtime, verify)) fio.communicate(create_fio_config(io_size, queue_depth, device_paths, test_type, runtime, verify))
fio.stdin.close() fio.stdin.close()
rc = fio.wait() rc = fio.wait()
print "FIO completed with code %d\n" % rc print("FIO completed with code %d\n" % rc)
sys.stdout.flush() sys.stdout.flush()
sys.exit(rc) sys.exit(rc)
def get_target_devices(): def get_target_devices():
output = check_output('lsblk -l -o NAME', shell=True) output = str(check_output('lsblk -l -o NAME', shell=True).decode())
return re.findall("(nvme[0-9]+n[0-9]+)\n", output) return re.findall("(nvme[0-9]+n[0-9]+)\n", output)
@ -100,7 +100,7 @@ def create_fio_config(size, q_depth, devices, test, run_time, verify):
"testtype": test, "runtime": run_time, "verify": verifyfio} "testtype": test, "runtime": run_time, "verify": verifyfio}
for (i, dev) in enumerate(devices): for (i, dev) in enumerate(devices):
fiofile += fio_job_template % {"jobnumber": i, "device": dev} fiofile += fio_job_template % {"jobnumber": i, "device": dev}
return fiofile return fiofile.encode()
def set_device_parameter(devices, filename_template, value): def set_device_parameter(devices, filename_template, value):
@ -123,9 +123,9 @@ def configure_devices(devices):
except IOError: except IOError:
qd = qd - 1 qd = qd - 1
if qd == 0: if qd == 0:
print "Could not set block device queue depths." print("Could not set block device queue depths.")
else: else:
print "Requested queue_depth {} but only {} is supported.".format(str(requested_qd), str(qd)) print("Requested queue_depth {} but only {} is supported.".format(str(requested_qd), str(qd)))
set_device_parameter(devices, "/sys/block/%s/queue/scheduler", "noop") set_device_parameter(devices, "/sys/block/%s/queue/scheduler", "noop")

View File

@ -5,7 +5,7 @@ rootdir=$(readlink -f $testdir/../../..)
source $rootdir/test/common/autotest_common.sh source $rootdir/test/common/autotest_common.sh
source $rootdir/test/nvmf/common.sh source $rootdir/test/nvmf/common.sh
rpc_py="python $rootdir/scripts/rpc.py" rpc_py="$rootdir/scripts/rpc.py"
set -e set -e

View File

@ -8,7 +8,7 @@ source $rootdir/test/nvmf/common.sh
MALLOC_BDEV_SIZE=64 MALLOC_BDEV_SIZE=64
MALLOC_BLOCK_SIZE=512 MALLOC_BLOCK_SIZE=512
rpc_py="python $rootdir/scripts/rpc.py" rpc_py="$rootdir/scripts/rpc.py"
set -e set -e

View File

@ -6,7 +6,7 @@ source $rootdir/test/common/autotest_common.sh
source $rootdir/scripts/common.sh source $rootdir/scripts/common.sh
source $rootdir/test/nvmf/common.sh source $rootdir/test/nvmf/common.sh
rpc_py="python $rootdir/scripts/rpc.py" rpc_py="$rootdir/scripts/rpc.py"
set -e set -e

View File

@ -8,7 +8,7 @@ source $rootdir/test/nvmf/common.sh
MALLOC_BDEV_SIZE=64 MALLOC_BDEV_SIZE=64
MALLOC_BLOCK_SIZE=512 MALLOC_BLOCK_SIZE=512
rpc_py="python $rootdir/scripts/rpc.py" rpc_py="$rootdir/scripts/rpc.py"
set -e set -e

View File

@ -8,7 +8,7 @@ source $rootdir/test/nvmf/common.sh
MALLOC_BDEV_SIZE=64 MALLOC_BDEV_SIZE=64
MALLOC_BLOCK_SIZE=512 MALLOC_BLOCK_SIZE=512
rpc_py="python $rootdir/scripts/rpc.py" rpc_py="$rootdir/scripts/rpc.py"
set -e set -e

View File

@ -11,7 +11,7 @@ LVOL_BDEV_SIZE=10
SUBSYS_NR=2 SUBSYS_NR=2
LVOL_BDEVS_NR=6 LVOL_BDEVS_NR=6
rpc_py="python $rootdir/scripts/rpc.py" rpc_py="$rootdir/scripts/rpc.py"
function disconnect_nvmf() function disconnect_nvmf()
{ {

View File

@ -9,7 +9,7 @@ MALLOC_BDEV_SIZE=128
MALLOC_BLOCK_SIZE=512 MALLOC_BLOCK_SIZE=512
NVMF_SUBSYS=11 NVMF_SUBSYS=11
rpc_py="python $rootdir/scripts/rpc.py" rpc_py="$rootdir/scripts/rpc.py"
set -e set -e

View File

@ -15,7 +15,7 @@ spdk_nvme_cli="${DEPENDENCY_DIR}/nvme-cli"
MALLOC_BDEV_SIZE=64 MALLOC_BDEV_SIZE=64
MALLOC_BLOCK_SIZE=512 MALLOC_BLOCK_SIZE=512
rpc_py="python $rootdir/scripts/rpc.py" rpc_py="$rootdir/scripts/rpc.py"
set -e set -e

View File

@ -5,7 +5,7 @@ rootdir=$(readlink -f $testdir/../../..)
source $rootdir/test/common/autotest_common.sh source $rootdir/test/common/autotest_common.sh
source $rootdir/test/nvmf/common.sh source $rootdir/test/nvmf/common.sh
rpc_py="python $rootdir/scripts/rpc.py" rpc_py="$rootdir/scripts/rpc.py"
set -e set -e

View File

@ -8,7 +8,7 @@ source $rootdir/test/nvmf/common.sh
MALLOC_BDEV_SIZE=128 MALLOC_BDEV_SIZE=128
MALLOC_BLOCK_SIZE=512 MALLOC_BLOCK_SIZE=512
rpc_py="python $rootdir/scripts/rpc.py" rpc_py="$rootdir/scripts/rpc.py"
set -e set -e

View File

@ -3,7 +3,7 @@ set -xe
testdir=$(readlink -f $(dirname $0)) testdir=$(readlink -f $(dirname $0))
SPDKCLI_BUILD_DIR=$(readlink -f $testdir/../..) SPDKCLI_BUILD_DIR=$(readlink -f $testdir/../..)
spdkcli_job="python3 $SPDKCLI_BUILD_DIR/test/spdkcli/spdkcli_job.py" spdkcli_job="$SPDKCLI_BUILD_DIR/test/spdkcli/spdkcli_job.py"
. $SPDKCLI_BUILD_DIR/test/common/autotest_common.sh . $SPDKCLI_BUILD_DIR/test/common/autotest_common.sh
function on_error_exit() { function on_error_exit() {
@ -21,7 +21,7 @@ function run_spdk_tgt() {
} }
function check_match() { function check_match() {
python3 $SPDKCLI_BUILD_DIR/scripts/spdkcli.py ll $SPDKCLI_BRANCH > $testdir/match_files/${MATCH_FILE} $SPDKCLI_BUILD_DIR/scripts/spdkcli.py ll $SPDKCLI_BRANCH > $testdir/match_files/${MATCH_FILE}
$SPDKCLI_BUILD_DIR/test/app/match/match -v $testdir/match_files/${MATCH_FILE}.match $SPDKCLI_BUILD_DIR/test/app/match/match -v $testdir/match_files/${MATCH_FILE}.match
rm -f $testdir/match_files/${MATCH_FILE} rm -f $testdir/match_files/${MATCH_FILE}
} }

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python3.5 #!/usr/bin/env python3
import pexpect import pexpect
import os import os
import sys import sys

View File

@ -48,7 +48,7 @@ check_match
timing_exit spdkcli_check_match timing_exit spdkcli_check_match
timing_enter spdkcli_check_match_details timing_enter spdkcli_check_match_details
python3 $SPDKCLI_BUILD_DIR/scripts/spdkcli.py bdevs/split_disk/Nvme0n1p0 show_details | jq -r -S '.' > $testdir/match_files/spdkcli_details_vhost.test $SPDKCLI_BUILD_DIR/scripts/spdkcli.py bdevs/split_disk/Nvme0n1p0 show_details | jq -r -S '.' > $testdir/match_files/spdkcli_details_vhost.test
$SPDKCLI_BUILD_DIR/test/app/match/match -v $testdir/match_files/spdkcli_details_vhost.test.match $SPDKCLI_BUILD_DIR/test/app/match/match -v $testdir/match_files/spdkcli_details_vhost.test.match
rm -f $testdir/match_files/spdkcli_details_vhost.test rm -f $testdir/match_files/spdkcli_details_vhost.test
timing_exit spdkcli_check_match_details timing_exit spdkcli_check_match_details

View File

@ -1052,7 +1052,7 @@ function run_fio()
return 0 return 0
fi fi
python $SPDK_BUILD_DIR/test/vhost/common/run_fio.py --job-file=/root/$job_fname \ $SPDK_BUILD_DIR/test/vhost/common/run_fio.py --job-file=/root/$job_fname \
$([[ ! -z "$fio_bin" ]] && echo "--fio-bin=$fio_bin") \ $([[ ! -z "$fio_bin" ]] && echo "--fio-bin=$fio_bin") \
--out=$out $json ${fio_disks%,} --out=$out $json ${fio_disks%,}
} }

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python #!/usr/bin/env python3
import os import os
import sys import sys
@ -11,7 +11,7 @@ fio_bin = "fio"
def show_help(): def show_help():
print("""Usage: python run_fio.py [options] [args] print("""Usage: {} run_fio.py [options] [args]
Description: Description:
Run FIO job file 'fio.job' on remote machines. Run FIO job file 'fio.job' on remote machines.
NOTE: The job file must exist on remote machines on '/root/' directory. NOTE: The job file must exist on remote machines on '/root/' directory.
@ -25,7 +25,7 @@ def show_help():
files with test results files with test results
-J, --json Use JSON format for output -J, --json Use JSON format for output
-p, --perf-vmex Enable aggregating statistic for VMEXITS for VMs -p, --perf-vmex Enable aggregating statistic for VMEXITS for VMs
""") """.format(os.path.split(sys.executable)[-1]))
def exec_cmd(cmd, blocking): def exec_cmd(cmd, blocking):
@ -34,7 +34,7 @@ def exec_cmd(cmd, blocking):
stderr=subprocess.STDOUT, stdin=subprocess.PIPE) stderr=subprocess.STDOUT, stdin=subprocess.PIPE)
if blocking is True: if blocking is True:
out, _ = p.communicate() out, _ = p.communicate()
return p.returncode, out return p.returncode, out.decode()
return p return p

View File

@ -91,7 +91,7 @@ notice ""
notice "Setting up VM" notice "Setting up VM"
notice "" notice ""
rpc_py="python $SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir)/rpc.sock" rpc_py="$SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir)/rpc.sock"
for vm_conf in ${vms[@]}; do for vm_conf in ${vms[@]}; do
IFS=',' read -ra conf <<< "$vm_conf" IFS=',' read -ra conf <<< "$vm_conf"

View File

@ -63,7 +63,7 @@ tmp_attach_job=$BASE_DIR/fio_jobs/fio_attach.job.tmp
tmp_detach_job=$BASE_DIR/fio_jobs/fio_detach.job.tmp tmp_detach_job=$BASE_DIR/fio_jobs/fio_detach.job.tmp
. $BASE_DIR/../common/common.sh . $BASE_DIR/../common/common.sh
rpc_py="python $SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir)/rpc.sock" rpc_py="$SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir)/rpc.sock"
function print_test_fio_header() { function print_test_fio_header() {
notice "===============" notice "==============="

View File

@ -48,7 +48,7 @@ while getopts 'xh-:' optchar; do
done done
. $(readlink -e "$(dirname $0)/../common/common.sh") || exit 1 . $(readlink -e "$(dirname $0)/../common/common.sh") || exit 1
rpc_py="python $SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir)/rpc.sock" rpc_py="$SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir)/rpc.sock"
trap 'error_exit "${FUNCNAME}" "${LINENO}"' SIGTERM SIGABRT ERR trap 'error_exit "${FUNCNAME}" "${LINENO}"' SIGTERM SIGABRT ERR

View File

@ -9,7 +9,7 @@ LVOL_TEST_DIR=$(readlink -f $(dirname $0))
[[ -z "$COMMON_DIR" ]] && COMMON_DIR="$(cd $LVOL_TEST_DIR/../common && pwd)" [[ -z "$COMMON_DIR" ]] && COMMON_DIR="$(cd $LVOL_TEST_DIR/../common && pwd)"
. $COMMON_DIR/common.sh . $COMMON_DIR/common.sh
rpc_py="python $SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir)/rpc.sock" rpc_py="$SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir)/rpc.sock"
vm_count=1 vm_count=1
max_disks="" max_disks=""

View File

@ -21,7 +21,7 @@ function migration_tc1_configure_vhost()
target_vm=1 target_vm=1
incoming_vm_ctrlr=naa.Malloc0.$incoming_vm incoming_vm_ctrlr=naa.Malloc0.$incoming_vm
target_vm_ctrlr=naa.Malloc0.$target_vm target_vm_ctrlr=naa.Malloc0.$target_vm
rpc="python $SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir)/rpc.sock" rpc="$SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir)/rpc.sock"
trap 'migration_tc1_error_handler; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT trap 'migration_tc1_error_handler; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT

View File

@ -76,9 +76,9 @@ function migration_tc2_configure_vhost()
incoming_vm_ctrlr=naa.VhostScsi0.$incoming_vm incoming_vm_ctrlr=naa.VhostScsi0.$incoming_vm
target_vm_ctrlr=naa.VhostScsi0.$target_vm target_vm_ctrlr=naa.VhostScsi0.$target_vm
rpc_nvmf="python $SPDK_BUILD_DIR/scripts/rpc.py -s $nvmf_dir/rpc.sock" rpc_nvmf="$SPDK_BUILD_DIR/scripts/rpc.py -s $nvmf_dir/rpc.sock"
rpc_0="python $SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock" rpc_0="$SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
rpc_1="python $SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir 1)/rpc.sock" rpc_1="$SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir 1)/rpc.sock"
# Default cleanup/error handlers will not shutdown nvmf_tgt app so setup it # Default cleanup/error handlers will not shutdown nvmf_tgt app so setup it
# here to teardown in cleanup function # here to teardown in cleanup function

View File

@ -94,7 +94,7 @@ function host1_cleanup_vhost()
function host1_start_nvmf() function host1_start_nvmf()
{ {
nvmf_dir="$TEST_DIR/nvmf_tgt" nvmf_dir="$TEST_DIR/nvmf_tgt"
rpc_nvmf="python $SPDK_BUILD_DIR/scripts/rpc.py -s $nvmf_dir/nvmf_rpc.sock" rpc_nvmf="$SPDK_BUILD_DIR/scripts/rpc.py -s $nvmf_dir/nvmf_rpc.sock"
notice "Starting nvmf_tgt instance on local server" notice "Starting nvmf_tgt instance on local server"
mkdir -p $nvmf_dir mkdir -p $nvmf_dir
@ -115,7 +115,7 @@ function host1_start_nvmf()
function host1_start_vhost() function host1_start_vhost()
{ {
rpc_0="python $SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock" rpc_0="$SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
notice "Starting vhost0 instance on local server" notice "Starting vhost0 instance on local server"
trap 'host1_cleanup_vhost; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT trap 'host1_cleanup_vhost; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT

View File

@ -7,7 +7,7 @@ source $MIGRATION_DIR/autotest.config
incoming_vm=1 incoming_vm=1
target_vm=2 target_vm=2
target_vm_ctrl=naa.VhostScsi0.$target_vm target_vm_ctrl=naa.VhostScsi0.$target_vm
rpc="python $SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir 1)/rpc.sock" rpc="$SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir 1)/rpc.sock"
share_dir=$TEST_DIR/share share_dir=$TEST_DIR/share
function host_2_cleanup_vhost() function host_2_cleanup_vhost()

View File

@ -68,7 +68,7 @@ if [[ $RUN_NIGHTLY -eq 1 ]]; then
spdk_vhost_run --json-path=$NEGATIVE_BASE_DIR spdk_vhost_run --json-path=$NEGATIVE_BASE_DIR
notice "" notice ""
rpc_py="python $SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir)/rpc.sock" rpc_py="$SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir)/rpc.sock"
# General commands # General commands
notice "Trying to remove nonexistent controller" notice "Trying to remove nonexistent controller"

View File

@ -101,7 +101,7 @@ done
. $(readlink -e "$(dirname $0)/../common/common.sh") || exit 1 . $(readlink -e "$(dirname $0)/../common/common.sh") || exit 1
. $(readlink -e "$(dirname $0)/../../../scripts/common.sh") || exit 1 . $(readlink -e "$(dirname $0)/../../../scripts/common.sh") || exit 1
COMMON_DIR="$(cd $(readlink -f $(dirname $0))/../common && pwd)" COMMON_DIR="$(cd $(readlink -f $(dirname $0))/../common && pwd)"
rpc_py="python $SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir)/rpc.sock" rpc_py="$SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir)/rpc.sock"
if [[ -n $custom_cpu_cfg ]]; then if [[ -n $custom_cpu_cfg ]]; then
source $custom_cpu_cfg source $custom_cpu_cfg