diff --git a/CHANGELOG.md b/CHANGELOG.md index 8421606e3..13ddbd69b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -58,6 +58,15 @@ surprises for users who may have DPDK_DIR defined for other reasons. Users should just use the "configure" script to specify the DPDK location before building SPDK. +Although we know that many developers still use Python 2 we are officially +switching to Python3 with requirement that all new code must be valid also +for Python 2 up to the EOL which is year 2020. + +Invoking interpreter explicitly is forbidden for executable scripts. There +is no need to use syntax like "python ./scripts/rpc.py". All executable +scripts must contain proper shebang pointing to the right interpreter. +Scripts without shebang musn't be executable. + ## v18.07: ### bdev diff --git a/scripts/fio.py b/scripts/fio.py index 9ec56198c..0868633ee 100755 --- a/scripts/fio.py +++ b/scripts/fio.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from subprocess import check_call, call, check_output, Popen, PIPE, CalledProcessError import re @@ -62,7 +62,7 @@ def main(): verify = False devices = get_target_devices() - print("Found devices: ", devices) + print(("Found devices: ", devices)) configure_devices(devices) try: diff --git a/scripts/genconfig.py b/scripts/genconfig.py index 07278abc8..f46096c58 100755 --- a/scripts/genconfig.py +++ b/scripts/genconfig.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 import os import re diff --git a/scripts/perf/nvme/run_fio_test.py b/scripts/perf/nvme/run_fio_test.py index 400e22c9f..bbbfaa5b4 100755 --- a/scripts/perf/nvme/run_fio_test.py +++ b/scripts/perf/nvme/run_fio_test.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # This script runs fio benchmark test on the local nvme device using the SPDK NVMe driver. # Prework: Run script/setup.sh to bind SSDs to SPDK driver. @@ -38,7 +38,7 @@ iter_num = ['1'] def run_fio(io_size_bytes, qd, rw_mix, cpu_mask, run_num, workload, run_time_sec): - print "Running Test: IO Size=", io_size_bytes, " QD=", qd, " Mix=", rw_mix, "CPU Mask=", cpu_mask + print("Running Test: IO Size={} QD={} Mix={} CPU Mask={}".format(io_size_bytes, qd, rw_mix, cpu_mask)) string = "s_" + str(io_size_bytes) + "_q_" + str(qd) + "_m_" + str(rw_mix) + "_c_" + str(cpu_mask) + "_run_" + str(run_num) # Call fio @@ -49,7 +49,7 @@ def run_fio(io_size_bytes, qd, rw_mix, cpu_mask, run_num, workload, run_time_sec + " fio " + str(path_to_fio_conf) + " -output=" + string + " -output-format=json" output = subprocess.check_output(command, shell=True) - print "Finished Test: IO Size=", io_size_bytes, " QD=", qd, " Mix=", rw_mix, " CPU Mask=", cpu_mask + print("Finished Test: IO Size={} QD={} Mix={} CPU Mask={}".format(io_size_bytes, qd, rw_mix, cpu_mask)) return @@ -85,21 +85,21 @@ def parse_results(io_size_bytes, qd, rw_mix, cpu_mask, run_num, workload, run_ti write_avg_lat = float(data['jobs'][job_pos]['write'][lat]['mean']) write_min_lat = float(data['jobs'][job_pos]['write'][lat]['min']) write_max_lat = float(data['jobs'][job_pos]['write'][lat]['max']) - print "%-10s" % "IO Size", "%-10s" % "QD", "%-10s" % "Mix", \ - "%-10s" % "Workload Type", "%-10s" % "CPU Mask", \ - "%-10s" % "Run Time", "%-10s" % "Run Num", \ - "%-15s" % "Read IOps", \ - "%-10s" % "Read MBps", "%-15s" % "Read Avg. Lat(" + lat_units + ")", \ - "%-15s" % "Read Min. Lat(" + lat_units + ")", "%-15s" % "Read Max. Lat(" + lat_units + ")", \ - "%-15s" % "Write IOps", \ - "%-10s" % "Write MBps", "%-15s" % "Write Avg. Lat(" + lat_units + ")", \ - "%-15s" % "Write Min. Lat(" + lat_units + ")", "%-15s" % "Write Max. Lat(" + lat_units + ")" - print "%-10s" % io_size_bytes, "%-10s" % qd, "%-10s" % rw_mix, \ - "%-10s" % workload, "%-10s" % cpu_mask, "%-10s" % run_time_sec, \ - "%-10s" % run_num, "%-15s" % read_iops, "%-10s" % read_bw, \ - "%-15s" % read_avg_lat, "%-15s" % read_min_lat, "%-15s" % read_max_lat, \ - "%-15s" % write_iops, "%-10s" % write_bw, "%-15s" % write_avg_lat, \ - "%-15s" % write_min_lat, "%-15s" % write_max_lat + print("%-10s" % "IO Size", "%-10s" % "QD", "%-10s" % "Mix", + "%-10s" % "Workload Type", "%-10s" % "CPU Mask", + "%-10s" % "Run Time", "%-10s" % "Run Num", + "%-15s" % "Read IOps", + "%-10s" % "Read MBps", "%-15s" % "Read Avg. Lat(" + lat_units + ")", + "%-15s" % "Read Min. Lat(" + lat_units + ")", "%-15s" % "Read Max. Lat(" + lat_units + ")", + "%-15s" % "Write IOps", + "%-10s" % "Write MBps", "%-15s" % "Write Avg. Lat(" + lat_units + ")", + "%-15s" % "Write Min. Lat(" + lat_units + ")", "%-15s" % "Write Max. Lat(" + lat_units + ")") + print("%-10s" % io_size_bytes, "%-10s" % qd, "%-10s" % rw_mix, + "%-10s" % workload, "%-10s" % cpu_mask, "%-10s" % run_time_sec, + "%-10s" % run_num, "%-15s" % read_iops, "%-10s" % read_bw, + "%-15s" % read_avg_lat, "%-15s" % read_min_lat, "%-15s" % read_max_lat, + "%-15s" % write_iops, "%-10s" % write_bw, "%-15s" % write_avg_lat, + "%-15s" % write_min_lat, "%-15s" % write_max_lat) results = results + "," + str(read_iops) + "," + str(read_bw) + "," \ + str(read_avg_lat) + "," + str(read_min_lat) + "," + str(read_max_lat) \ + "," + str(write_iops) + "," + str(write_bw) + "," + str(write_avg_lat) \ @@ -128,12 +128,12 @@ def add_filename_to_conf(conf_file_name, bdf): if len(sys.argv) != 4: - print "usage: python ", sys.argv[0], " path_to_fio_conf path_to_ioengine num_ssds" + print("usage: " % sys.argv[0] % " path_to_fio_conf path_to_ioengine num_ssds") sys.exit() num_ssds = int(sys.argv[3]) if num_ssds > get_nvme_devices_count(): - print "System does not have ", num_ssds, " NVMe SSDs." + print("System does not have {} NVMe SSDs.".format(num_ssds)) sys.exit() host_name = os.uname()[1] diff --git a/scripts/perf/nvme/run_fio_test.sh b/scripts/perf/nvme/run_fio_test.sh index e9381cbc7..454ea555f 100755 --- a/scripts/perf/nvme/run_fio_test.sh +++ b/scripts/perf/nvme/run_fio_test.sh @@ -7,13 +7,13 @@ rootdir=$(readlink -f $testdir/../../..) $rootdir/scripts/setup.sh # Run Performance Test with 1 SSD -python $testdir/run_fio_test.py $testdir/fio_test.conf $rootdir/examples/nvme/fio_plugin/fio_plugin 1 +$testdir/run_fio_test.py $testdir/fio_test.conf $rootdir/examples/nvme/fio_plugin/fio_plugin 1 # 2 SSDs test run -python $testdir/run_fio_test.py $testdir/fio_test.conf $rootdir/examples/nvme/fio_plugin/fio_plugin 2 +$testdir/run_fio_test.py $testdir/fio_test.conf $rootdir/examples/nvme/fio_plugin/fio_plugin 2 # 4 SSDs test run -python $testdir/run_fio_test.py $testdir/fio_test.conf $rootdir/examples/nvme/fio_plugin/fio_plugin 4 +$testdir/run_fio_test.py $testdir/fio_test.conf $rootdir/examples/nvme/fio_plugin/fio_plugin 4 # 8 SSDs test run -python $testdir/run_fio_test.py $testdir/fio_test.conf $rootdir/examples/nvme/fio_plugin/fio_plugin 8 +$testdir/run_fio_test.py $testdir/fio_test.conf $rootdir/examples/nvme/fio_plugin/fio_plugin 8 diff --git a/scripts/perf/vhost/run_vhost_test.py b/scripts/perf/vhost/run_vhost_test.py index 61ab7e818..bb1f99851 100644 --- a/scripts/perf/vhost/run_vhost_test.py +++ b/scripts/perf/vhost/run_vhost_test.py @@ -7,7 +7,7 @@ from subprocess import check_call, call, check_output, Popen, PIPE def range_incl(a, b): - return range(a, b + 1) + return list(range(a, b + 1)) def list_spdk_used_cpus(cpus): diff --git a/scripts/pkgdep.sh b/scripts/pkgdep.sh index 87da546b5..1cd375a1b 100755 --- a/scripts/pkgdep.sh +++ b/scripts/pkgdep.sh @@ -37,8 +37,11 @@ if [ -s /etc/redhat-release ]; then yum install -y doxygen mscgen graphviz # Additional dependencies for building pmem based backends yum install -y libpmemblk-devel || true - # Additional dependencies for SPDK CLI - yum install -y python-configshell python-pexpect python3-configshell python3-pexpect + + # Additional dependencies for SPDK CLI - not available in rhel and centos + if ! echo "$ID $VERSION_ID" | egrep -q 'rhel 7|centos 7'; then + yum install -y python3-configshell python3-pexpect + fi elif [ -f /etc/debian_version ]; then # Includes Ubuntu, Debian apt-get install -y gcc g++ make libcunit1-dev libaio-dev libssl-dev \ diff --git a/scripts/rpc.py b/scripts/rpc.py index 9d2511907..cba0cc90e 100755 --- a/scripts/rpc.py +++ b/scripts/rpc.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from rpc.client import print_dict, JSONRPCException diff --git a/scripts/spdkcli.py b/scripts/spdkcli.py index e1c16ce86..4412aadfd 100755 --- a/scripts/spdkcli.py +++ b/scripts/spdkcli.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 import sys import argparse from os import getuid diff --git a/scripts/spdkcli/ui_node.py b/scripts/spdkcli/ui_node.py index 3c90bada6..43f6bdfc0 100644 --- a/scripts/spdkcli/ui_node.py +++ b/scripts/spdkcli/ui_node.py @@ -681,7 +681,7 @@ class UIBdevObj(UINode): if self.bdev.aliases: alias = self.bdev.aliases[0] - info = ", ".join(filter(None, [alias, size, in_use])) + info = ", ".join([_f for _f in [alias, size, in_use] if _f]) return info, True @@ -697,9 +697,9 @@ class UIVirtioScsiBdevObj(UIBdevObj): UIBdevObj(bdev, self) def summary(self): - if "socket" in self.bdev.virtio.keys(): + if "socket" in list(self.bdev.virtio.keys()): info = self.bdev.virtio["socket"] - if "pci_address" in self.bdev.virtio.keys(): + if "pci_address" in list(self.bdev.virtio.keys()): info = self.bdev.virtio["pci_address"] return info, True @@ -898,7 +898,7 @@ class UIVhostBlkCtrlObj(UIVhostCtrl): ro = None if self.ctrlr.backend_specific["block"]["readonly"]: ro = "Readonly" - info = ", ".join(filter(None, [self.ctrlr.socket, ro])) + info = ", ".join([_f for _f in [self.ctrlr.socket, ro] if _f]) return info, True diff --git a/scripts/spdkcli/ui_root.py b/scripts/spdkcli/ui_root.py index 455b0a8cb..0cdf2f0f3 100644 --- a/scripts/spdkcli/ui_root.py +++ b/scripts/spdkcli/ui_root.py @@ -72,8 +72,7 @@ class UIRoot(UINode): # For example logical volumes: listing in menu is "Logical_Volume" # (cannot have space), but the product name in SPDK is "Logical Volume" bdev_type = bdev_type.replace("_", " ") - for bdev in filter(lambda x: bdev_type in x["product_name"].lower(), - self.current_bdevs): + for bdev in [x for x in self.current_bdevs if bdev_type in x["product_name"].lower()]: test = Bdev(bdev) yield test @@ -223,8 +222,7 @@ class UIRoot(UINode): def get_vhost_ctrlrs(self, ctrlr_type): if self.is_init: self.list_vhost_ctrls() - for ctrlr in filter(lambda x: ctrlr_type in x["backend_specific"].keys(), - self.current_vhost_ctrls): + for ctrlr in [x for x in self.current_vhost_ctrls if ctrlr_type in list(x["backend_specific"].keys())]: yield VhostCtrlr(ctrlr) @verbose @@ -259,7 +257,7 @@ class Bdev(object): # TODO: Document in docstring parameters which describe bdevs. # TODO: Possible improvement: JSON schema might be used here in future """ - for i in bdev_info.keys(): + for i in list(bdev_info.keys()): setattr(self, i, bdev_info[i]) @@ -271,7 +269,7 @@ class LvolStore(object): # TODO: Document in docstring parameters which describe bdevs. # TODO: Possible improvement: JSON schema might be used here in future """ - for i in lvs_info.keys(): + for i in list(lvs_info.keys()): setattr(self, i, lvs_info[i]) @@ -283,5 +281,5 @@ class VhostCtrlr(object): # TODO: Document in docstring parameters which describe bdevs. # TODO: Possible improvement: JSON schema might be used here in future """ - for i in ctrlr_info.keys(): + for i in list(ctrlr_info.keys()): setattr(self, i, ctrlr_info[i]) diff --git a/test/bdev/blockdev.sh b/test/bdev/blockdev.sh index 712aa8c82..e49ddeb9a 100755 --- a/test/bdev/blockdev.sh +++ b/test/bdev/blockdev.sh @@ -5,7 +5,7 @@ set -e testdir=$(readlink -f $(dirname $0)) rootdir=$(readlink -f $testdir/../..) plugindir=$rootdir/examples/bdev/fio_plugin -rpc_py="python $rootdir/scripts/rpc.py" +rpc_py="$rootdir/scripts/rpc.py" function run_fio() { diff --git a/test/blobfs/rocksdb/postprocess.py b/test/blobfs/rocksdb/postprocess.py index ec18fc56f..1ba8a7302 100755 --- a/test/blobfs/rocksdb/postprocess.py +++ b/test/blobfs/rocksdb/postprocess.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from collections import namedtuple from itertools import islice import operator @@ -31,31 +31,31 @@ with open(sys.argv[1] + "/" + sys.argv[2] + ".perf.txt") as f: for thread in sorted(threads): thread_pct = 0 - print + print("") print("Thread: {:s}".format(thread)) print(" Percent Module") print("============================") - for key, value in sorted(thread_module_samples.items(), key=operator.itemgetter(1), reverse=True): + for key, value in sorted(list(thread_module_samples.items()), key=operator.itemgetter(1), reverse=True): if key.thread == thread: print("{:8.4f} {:20s}".format(float(value) * 100 / total_samples, key.module)) thread_pct += float(value) * 100 / total_samples print("============================") print("{:8.4f} Total".format(thread_pct)) -print +print("") print(" Percent Module Function") print("=================================================================") -for key, value in islice(sorted(function_module_samples.items(), key=operator.itemgetter(1), reverse=True), 100): - print("{:8.4f} {:20s} {:s}".format(float(value) * 100 / total_samples, key.module, key.function)) +for key, value in islice(sorted(list(function_module_samples.items()), key=operator.itemgetter(1), reverse=True), 100): + print(("{:8.4f} {:20s} {:s}".format(float(value) * 100 / total_samples, key.module, key.function))) -print -print +print("") +print("") print(" Percent Module") print("=================================") -for key, value in sorted(module_samples.items(), key=operator.itemgetter(1), reverse=True): +for key, value in sorted(list(module_samples.items()), key=operator.itemgetter(1), reverse=True): print("{:8.4f} {:s}".format(float(value) * 100 / total_samples, key)) -print +print("") with open(sys.argv[1] + "/" + sys.argv[2] + "_db_bench.txt") as f: for line in f: if "maxresident" in line: @@ -67,4 +67,4 @@ with open(sys.argv[1] + "/" + sys.argv[2] + "_db_bench.txt") as f: print("User: {:8.2f} ({:5.2f}%)".format(user, user * 100 / (user + system))) print("System: {:8.2f} ({:5.2f}%)".format(system, system * 100 / (user + system))) -print +print("") diff --git a/test/iscsi_tgt/bdev_io_wait/bdev_io_wait.sh b/test/iscsi_tgt/bdev_io_wait/bdev_io_wait.sh index 099a40d35..94137507f 100755 --- a/test/iscsi_tgt/bdev_io_wait/bdev_io_wait.sh +++ b/test/iscsi_tgt/bdev_io_wait/bdev_io_wait.sh @@ -10,7 +10,7 @@ timing_enter bdev_io_wait MALLOC_BDEV_SIZE=64 MALLOC_BLOCK_SIZE=512 -rpc_py="python $rootdir/scripts/rpc.py" +rpc_py="$rootdir/scripts/rpc.py" timing_enter start_iscsi_tgt diff --git a/test/iscsi_tgt/calsoft/calsoft.py b/test/iscsi_tgt/calsoft/calsoft.py index 6a450d338..2970328e6 100755 --- a/test/iscsi_tgt/calsoft/calsoft.py +++ b/test/iscsi_tgt/calsoft/calsoft.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + import os import time import sys @@ -47,7 +49,7 @@ def run_case(case, result_list, log_dir_path): def main(): if not os.path.exists(CALSOFT_BIN_PATH): - print "The Calsoft test suite is not available on this machine." + print("The Calsoft test suite is not available on this machine.") sys.exit(1) output_dir = sys.argv[1] @@ -68,7 +70,7 @@ def main(): if not os.path.exists(log_dir): os.mkdir(log_dir) for case in known_failed_cases: - print "Skipping %s. It is known to fail." % (case) + print("Skipping %s. It is known to fail." % (case)) case_result_list.append({"Name": case, "Result": "SKIP"}) thread_objs = [] @@ -96,7 +98,7 @@ def main(): else: break else: - print "Thread timeout" + print("Thread timeout") exit(1) with open(output_file, 'w') as f: json.dump(obj=result, fp=f, indent=2) @@ -104,7 +106,7 @@ def main(): failed = 0 for x in case_result_list: if x["Result"] == "FAIL": - print "Test case %s failed." % (x["Name"]) + print("Test case %s failed." % (x["Name"])) failed = 1 exit(failed) diff --git a/test/iscsi_tgt/calsoft/calsoft.sh b/test/iscsi_tgt/calsoft/calsoft.sh index c0a57d103..1a5c39327 100755 --- a/test/iscsi_tgt/calsoft/calsoft.sh +++ b/test/iscsi_tgt/calsoft/calsoft.sh @@ -19,8 +19,8 @@ timing_enter calsoft MALLOC_BDEV_SIZE=64 MALLOC_BLOCK_SIZE=512 -rpc_py="python $rootdir/scripts/rpc.py" -calsoft_py="python $testdir/calsoft.py" +rpc_py="$rootdir/scripts/rpc.py" +calsoft_py="$testdir/calsoft.py" # Copy the calsoft config file to /usr/local/etc mkdir -p /usr/local/etc diff --git a/test/iscsi_tgt/digests/digests.sh b/test/iscsi_tgt/digests/digests.sh index 6e7e9817e..675cf1c1e 100755 --- a/test/iscsi_tgt/digests/digests.sh +++ b/test/iscsi_tgt/digests/digests.sh @@ -58,8 +58,8 @@ timing_enter digests MALLOC_BDEV_SIZE=64 MALLOC_BLOCK_SIZE=512 -rpc_py="python $rootdir/scripts/rpc.py" -fio_py="python $rootdir/scripts/fio.py" +rpc_py="$rootdir/scripts/rpc.py" +fio_py="$rootdir/scripts/fio.py" timing_enter start_iscsi_tgt diff --git a/test/iscsi_tgt/ext4test/ext4test.sh b/test/iscsi_tgt/ext4test/ext4test.sh index 9cef3a97b..37f19b073 100755 --- a/test/iscsi_tgt/ext4test/ext4test.sh +++ b/test/iscsi_tgt/ext4test/ext4test.sh @@ -11,7 +11,7 @@ fi timing_enter ext4test -rpc_py="python $rootdir/scripts/rpc.py" +rpc_py="$rootdir/scripts/rpc.py" timing_enter start_iscsi_tgt diff --git a/test/iscsi_tgt/filesystem/filesystem.sh b/test/iscsi_tgt/filesystem/filesystem.sh index b1652e72d..0c530b3b0 100755 --- a/test/iscsi_tgt/filesystem/filesystem.sh +++ b/test/iscsi_tgt/filesystem/filesystem.sh @@ -8,7 +8,7 @@ source $rootdir/scripts/common.sh timing_enter filesystem -rpc_py="python $rootdir/scripts/rpc.py" +rpc_py="$rootdir/scripts/rpc.py" # Remove lvol bdevs and stores. function remove_backends() { echo "INFO: Removing lvol bdev" diff --git a/test/iscsi_tgt/fio/fio.sh b/test/iscsi_tgt/fio/fio.sh index 093d9695c..e2034bd8d 100755 --- a/test/iscsi_tgt/fio/fio.sh +++ b/test/iscsi_tgt/fio/fio.sh @@ -54,8 +54,8 @@ cp $testdir/iscsi.conf.in $testdir/iscsi.conf MALLOC_BDEV_SIZE=64 MALLOC_BLOCK_SIZE=4096 -rpc_py="python $rootdir/scripts/rpc.py" -fio_py="python $rootdir/scripts/fio.py" +rpc_py="$rootdir/scripts/rpc.py" +fio_py="$rootdir/scripts/fio.py" timing_enter start_iscsi_tgt diff --git a/test/iscsi_tgt/initiator/initiator.sh b/test/iscsi_tgt/initiator/initiator.sh index 129c4fa97..8f3104a4c 100755 --- a/test/iscsi_tgt/initiator/initiator.sh +++ b/test/iscsi_tgt/initiator/initiator.sh @@ -10,7 +10,7 @@ timing_enter initiator MALLOC_BDEV_SIZE=64 MALLOC_BLOCK_SIZE=512 -rpc_py="python $rootdir/scripts/rpc.py" +rpc_py="$rootdir/scripts/rpc.py" timing_enter start_iscsi_tgt diff --git a/test/iscsi_tgt/ip_migration/ip_migration.sh b/test/iscsi_tgt/ip_migration/ip_migration.sh index fbed7045d..25332ff8d 100755 --- a/test/iscsi_tgt/ip_migration/ip_migration.sh +++ b/test/iscsi_tgt/ip_migration/ip_migration.sh @@ -5,8 +5,8 @@ rootdir=$(readlink -f $testdir/../../..) source $rootdir/test/common/autotest_common.sh source $rootdir/test/iscsi_tgt/common.sh -rpc_py="python $rootdir/scripts/rpc.py" -fio_py="python $rootdir/scripts/fio.py" +rpc_py="$rootdir/scripts/rpc.py" +fio_py="$rootdir/scripts/fio.py" # Namespaces are NOT used here on purpose. This test requires changes to detect # ifc_index for interface that was put into namespace. Needed for add_ip_address. diff --git a/test/iscsi_tgt/lvol/iscsi_lvol.sh b/test/iscsi_tgt/lvol/iscsi_lvol.sh index 45c8f8efd..bca999cdf 100755 --- a/test/iscsi_tgt/lvol/iscsi_lvol.sh +++ b/test/iscsi_tgt/lvol/iscsi_lvol.sh @@ -17,8 +17,8 @@ else NUM_LVOL=2 fi -rpc_py="python $rootdir/scripts/rpc.py" -fio_py="python $rootdir/scripts/fio.py" +rpc_py="$rootdir/scripts/rpc.py" +fio_py="$rootdir/scripts/fio.py" timing_enter start_iscsi_tgt diff --git a/test/iscsi_tgt/multiconnection/multiconnection.sh b/test/iscsi_tgt/multiconnection/multiconnection.sh index 5c7f62c59..b793d7518 100755 --- a/test/iscsi_tgt/multiconnection/multiconnection.sh +++ b/test/iscsi_tgt/multiconnection/multiconnection.sh @@ -5,8 +5,8 @@ rootdir=$(readlink -f $testdir/../../..) source $rootdir/test/common/autotest_common.sh source $rootdir/test/iscsi_tgt/common.sh -rpc_py="python $rootdir/scripts/rpc.py" -fio_py="python $rootdir/scripts/fio.py" +rpc_py="$rootdir/scripts/rpc.py" +fio_py="$rootdir/scripts/fio.py" CONNECTION_NUMBER=30 diff --git a/test/iscsi_tgt/nvme_remote/fio_remote_nvme.sh b/test/iscsi_tgt/nvme_remote/fio_remote_nvme.sh index 4ffb701e8..5e08932ce 100755 --- a/test/iscsi_tgt/nvme_remote/fio_remote_nvme.sh +++ b/test/iscsi_tgt/nvme_remote/fio_remote_nvme.sh @@ -15,8 +15,8 @@ if [ -z $NVMF_FIRST_TARGET_IP ]; then exit 0 fi -rpc_py="python $rootdir/scripts/rpc.py" -fio_py="python $rootdir/scripts/fio.py" +rpc_py="$rootdir/scripts/rpc.py" +fio_py="$rootdir/scripts/fio.py" NVMF_PORT=4420 diff --git a/test/iscsi_tgt/pmem/iscsi_pmem.sh b/test/iscsi_tgt/pmem/iscsi_pmem.sh index 2e8f5b5b4..063bb6954 100755 --- a/test/iscsi_tgt/pmem/iscsi_pmem.sh +++ b/test/iscsi_tgt/pmem/iscsi_pmem.sh @@ -12,8 +12,8 @@ PMEM_SIZE=128 PMEM_BLOCK_SIZE=512 TGT_NR=10 PMEM_PER_TGT=1 -rpc_py="python $rootdir/scripts/rpc.py" -fio_py="python $rootdir/scripts/fio.py" +rpc_py="$rootdir/scripts/rpc.py" +fio_py="$rootdir/scripts/fio.py" timing_enter iscsi_pmem diff --git a/test/iscsi_tgt/qos/qos.sh b/test/iscsi_tgt/qos/qos.sh index 1ddb4009a..a5cd36967 100755 --- a/test/iscsi_tgt/qos/qos.sh +++ b/test/iscsi_tgt/qos/qos.sh @@ -46,8 +46,8 @@ timing_enter qos MALLOC_BDEV_SIZE=64 MALLOC_BLOCK_SIZE=512 IOPS_LIMIT=20000 -rpc_py="python $rootdir/scripts/rpc.py" -fio_py="python $rootdir/scripts/fio.py" +rpc_py="$rootdir/scripts/rpc.py" +fio_py="$rootdir/scripts/fio.py" timing_enter start_iscsi_tgt diff --git a/test/iscsi_tgt/rbd/rbd.sh b/test/iscsi_tgt/rbd/rbd.sh index 3ea1c58af..27d861599 100755 --- a/test/iscsi_tgt/rbd/rbd.sh +++ b/test/iscsi_tgt/rbd/rbd.sh @@ -17,8 +17,8 @@ timing_exit rbd_setup timing_enter rbd -rpc_py="python $rootdir/scripts/rpc.py" -fio_py="python $rootdir/scripts/fio.py" +rpc_py="$rootdir/scripts/rpc.py" +fio_py="$rootdir/scripts/fio.py" timing_enter start_iscsi_tgt diff --git a/test/iscsi_tgt/reset/reset.sh b/test/iscsi_tgt/reset/reset.sh index 304cc0cbe..0e986ac5f 100755 --- a/test/iscsi_tgt/reset/reset.sh +++ b/test/iscsi_tgt/reset/reset.sh @@ -12,8 +12,8 @@ timing_enter reset MALLOC_BDEV_SIZE=64 MALLOC_BLOCK_SIZE=512 -rpc_py="python $rootdir/scripts/rpc.py" -fio_py="python $rootdir/scripts/fio.py" +rpc_py="$rootdir/scripts/rpc.py" +fio_py="$rootdir/scripts/fio.py" if ! hash sg_reset; then exit 1 diff --git a/test/iscsi_tgt/rpc_config/rpc_config.py b/test/iscsi_tgt/rpc_config/rpc_config.py index 0af4ab39f..03647c47d 100755 --- a/test/iscsi_tgt/rpc_config/rpc_config.py +++ b/test/iscsi_tgt/rpc_config/rpc_config.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 import os @@ -61,7 +61,7 @@ class spdk_rpc(object): def __getattr__(self, name): def call(*args): - cmd = "python {} {}".format(self.rpc_py, name) + cmd = "{} {}".format(self.rpc_py, name) for arg in args: cmd += " {}".format(arg) return check_output(cmd, shell=True) @@ -414,7 +414,7 @@ def verify_get_interfaces(rpc_py): def help_get_interface_ip_list(rpc_py, nic_name): rpc = spdk_rpc(rpc_py) nics = json.loads(rpc.get_interfaces()) - nic = list(filter(lambda x: x["name"] == nic_name, nics)) + nic = list([x for x in nics if x["name"] == nic_name]) verify(len(nic) != 0, 1, "Nic name: {} is not found in {}".format(nic_name, [x["name"] for x in nics])) return nic[0]["ip_addr"] diff --git a/test/iscsi_tgt/rpc_config/rpc_config.sh b/test/iscsi_tgt/rpc_config/rpc_config.sh index a47e10578..ac5c46470 100755 --- a/test/iscsi_tgt/rpc_config/rpc_config.sh +++ b/test/iscsi_tgt/rpc_config/rpc_config.sh @@ -18,7 +18,7 @@ fi MALLOC_BDEV_SIZE=64 rpc_py=$rootdir/scripts/rpc.py -rpc_config_py="python $testdir/rpc_config.py" +rpc_config_py="$testdir/rpc_config.py" timing_enter start_iscsi_tgt diff --git a/test/json_config/clear_config.py b/test/json_config/clear_config.py index bb83f0072..91780dd05 100755 --- a/test/json_config/clear_config.py +++ b/test/json_config/clear_config.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 import os import sys @@ -59,7 +59,7 @@ def get_bdev_destroy_method(bdev): destroy_method = None if 'method' in bdev: construct_method = bdev['method'] - if construct_method in destroy_method_map.keys(): + if construct_method in list(destroy_method_map.keys()): destroy_method = destroy_method_map[construct_method] return destroy_method @@ -168,7 +168,7 @@ def call_test_cmd(func): try: func(*args, **kwargs) except JSONRPCException as ex: - print(ex.message) + print((ex.message)) exit(1) return rpc_test_cmd @@ -196,7 +196,7 @@ if __name__ == "__main__": if config is None: return if args.verbose: - print "Calling clear_%s_subsystem" % args.subsystem + print("Calling clear_%s_subsystem" % args.subsystem) globals()["clear_%s_subsystem" % args.subsystem](args, config) p = subparsers.add_parser('clear_subsystem', help="""Clear configuration of SPDK subsystem using JSON RPC""") @@ -208,6 +208,6 @@ if __name__ == "__main__": try: args.client = rpc.client.JSONRPCClient(args.server_addr, args.port, args.verbose, args.timeout) except JSONRPCException as ex: - print(ex.message) + print((ex.message)) exit(1) args.func(args) diff --git a/test/json_config/common.sh b/test/json_config/common.sh index c0cbfd25a..5d98a0ee8 100644 --- a/test/json_config/common.sh +++ b/test/json_config/common.sh @@ -3,9 +3,9 @@ SPDK_BUILD_DIR=$JSON_DIR/../../ source $JSON_DIR/../common/autotest_common.sh source $JSON_DIR/../nvmf/common.sh -spdk_rpc_py="python $SPDK_BUILD_DIR/scripts/rpc.py -s /var/tmp/spdk.sock" +spdk_rpc_py="$SPDK_BUILD_DIR/scripts/rpc.py -s /var/tmp/spdk.sock" spdk_clear_config_py="$JSON_DIR/clear_config.py -s /var/tmp/spdk.sock" -initiator_rpc_py="python $SPDK_BUILD_DIR/scripts/rpc.py -s /var/tmp/virtio.sock" +initiator_rpc_py="$SPDK_BUILD_DIR/scripts/rpc.py -s /var/tmp/virtio.sock" initiator_clear_config_py="$JSON_DIR/clear_config.py -s /var/tmp/virtio.sock" base_json_config=$JSON_DIR/base_config.json last_json_config=$JSON_DIR/last_config.json @@ -205,7 +205,7 @@ function clear_bdev_subsystem_config() { function test_global_params() { target=$1 $rpc_py save_config > $full_config - python $JSON_DIR/config_filter.py -method "delete_configs" < $full_config > $base_json_config + $JSON_DIR/config_filter.py -method "delete_configs" < $full_config > $base_json_config if [ $target == "spdk_tgt" ]; then killprocess $spdk_tgt_pid run_spdk_tgt @@ -218,7 +218,7 @@ function test_global_params() { fi $rpc_py load_config < $full_config $rpc_py save_config > $full_config - python $JSON_DIR/config_filter.py -method "delete_configs" < $full_config > $last_json_config + $JSON_DIR/config_filter.py -method "delete_configs" < $full_config > $last_json_config json_diff $base_json_config $last_json_config rm $base_json_config $last_json_config diff --git a/test/json_config/config_filter.py b/test/json_config/config_filter.py index 3ee59a7d0..e1a3dd264 100755 --- a/test/json_config/config_filter.py +++ b/test/json_config/config_filter.py @@ -1,4 +1,5 @@ -#!/usr/bin/python +#!/usr/bin/env python3 + import sys import json import argparse diff --git a/test/lvol/lvol_test.py b/test/lvol/lvol_test.py index ba613812c..50255f1fe 100755 --- a/test/lvol/lvol_test.py +++ b/test/lvol/lvol_test.py @@ -1,4 +1,5 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 + import sys from test_cases import * diff --git a/test/lvol/rpc_commands_lib.py b/test/lvol/rpc_commands_lib.py index fcd16b387..0857c73d9 100644 --- a/test/lvol/rpc_commands_lib.py +++ b/test/lvol/rpc_commands_lib.py @@ -1,4 +1,5 @@ import json +import sys from uuid import UUID from subprocess import check_output, CalledProcessError @@ -9,7 +10,7 @@ class Spdk_Rpc(object): def __getattr__(self, name): def call(*args): - cmd = "python {} {}".format(self.rpc_py, name) + cmd = "{} {} {}".format(sys.executable, self.rpc_py, name) for arg in args: cmd += " {}".format(arg) try: diff --git a/test/lvol/test_cases.py b/test/lvol/test_cases.py index 794ff1c29..9da564a68 100644 --- a/test/lvol/test_cases.py +++ b/test/lvol/test_cases.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 import io import time import sys @@ -266,14 +266,14 @@ class TestCases(object): def get_lvs_size(self, lvs_name="lvs_test"): lvs = self.c.get_lvol_stores(lvs_name)[0] - return int(int(lvs[u'free_clusters'] * lvs['cluster_size']) / MEGABYTE) + return int(int(lvs['free_clusters'] * lvs['cluster_size']) / MEGABYTE) def get_lvs_divided_size(self, split_num, lvs_name="lvs_test"): # Actual size of lvol bdevs on creation is rounded up to multiple of cluster size. # In order to avoid over provisioning, this function returns # lvol store size in MB divided by split_num - rounded down to multiple of cluster size." lvs = self.c.get_lvol_stores(lvs_name)[0] - return int(int(lvs[u'free_clusters'] / split_num) * lvs['cluster_size'] / MEGABYTE) + return int(int(lvs['free_clusters'] / split_num) * lvs['cluster_size'] / MEGABYTE) def get_lvs_cluster_size(self, lvs_name="lvs_test"): lvs = self.c.get_lvol_stores(lvs_name)[0] @@ -816,7 +816,7 @@ class TestCases(object): fail_count = self.c.check_get_lvol_stores(base_name, uuid_store, self.cluster_size) lvs = self.c.get_lvol_stores() - size = int(int(lvs[0][u'free_clusters'] * lvs[0]['cluster_size']) / 4 / MEGABYTE) + size = int(int(lvs[0]['free_clusters'] * lvs[0]['cluster_size']) / 4 / MEGABYTE) # Construct thin provisioned lvol bdev uuid_bdev0 = self.c.construct_lvol_bdev(uuid_store, @@ -881,7 +881,7 @@ class TestCases(object): fail_count = self.c.check_get_lvol_stores(base_name, uuid_store, self.cluster_size) lvs = self.c.get_lvol_stores() - size = int(int(lvs[0][u'free_clusters'] * lvs[0]['cluster_size']) / 4 / MEGABYTE) + size = int(int(lvs[0]['free_clusters'] * lvs[0]['cluster_size']) / 4 / MEGABYTE) # Create lvol bdev, snapshot it, then clone it and then snapshot the clone uuid_bdev0 = self.c.construct_lvol_bdev(uuid_store, self.lbd_name, size, thin=True) @@ -946,7 +946,7 @@ class TestCases(object): fail_count = self.c.check_get_lvol_stores(base_name, uuid_store, self.cluster_size) lvs = self.c.get_lvol_stores() - size = int(int(lvs[0][u'free_clusters'] * lvs[0]['cluster_size']) / 4 / MEGABYTE) + size = int(int(lvs[0]['free_clusters'] * lvs[0]['cluster_size']) / 4 / MEGABYTE) # Create lvol bdev, snapshot it, then clone it and then snapshot the clone uuid_bdev0 = self.c.construct_lvol_bdev(uuid_store, self.lbd_name, size, thin=True) @@ -1046,7 +1046,7 @@ class TestCases(object): bdev_name = self.c.construct_lvol_bdev(uuid_store, self.lbd_name, bdev_size, thin=True) lvs = self.c.get_lvol_stores(self.lvs_name)[0] - free_clusters_create_lvol = int(lvs[u'free_clusters']) + free_clusters_create_lvol = int(lvs['free_clusters']) # check and save number of free clusters for lvol store if free_clusters_start != free_clusters_create_lvol: fail_count += 1 @@ -1058,7 +1058,7 @@ class TestCases(object): # write data (lvs cluster size) to created lvol bdev starting from offset 0. fail_count += self.run_fio_test("/dev/nbd0", 0, size, "write", "0xcc") lvs = self.c.get_lvol_stores(self.lvs_name)[0] - free_clusters_first_fio = int(lvs[u'free_clusters']) + free_clusters_first_fio = int(lvs['free_clusters']) # check that free clusters on lvol store was decremented by 1 if free_clusters_start != free_clusters_first_fio + 1: fail_count += 1 @@ -1070,7 +1070,7 @@ class TestCases(object): # write data (lvs cluster size) to lvol bdev with offset set to one and half of cluster size fail_count += self.run_fio_test(nbd_name, offset, size, "write", "0xcc") lvs = self.c.get_lvol_stores(self.lvs_name)[0] - free_clusters_second_fio = int(lvs[u'free_clusters']) + free_clusters_second_fio = int(lvs['free_clusters']) # check that free clusters on lvol store was decremented by 2 if free_clusters_start != free_clusters_second_fio + 3: fail_count += 1 @@ -1081,7 +1081,7 @@ class TestCases(object): # write data to lvol bdev to the end of its size fail_count += self.run_fio_test(nbd_name, offset, size, "write", "0xcc") lvs = self.c.get_lvol_stores(self.lvs_name)[0] - free_clusters_third_fio = int(lvs[u'free_clusters']) + free_clusters_third_fio = int(lvs['free_clusters']) # check that lvol store free clusters number equals to 0 if free_clusters_third_fio != 0: fail_count += 1 @@ -1090,7 +1090,7 @@ class TestCases(object): # destroy thin provisioned lvol bdev fail_count += self.c.destroy_lvol_bdev(lvol_bdev['name']) lvs = self.c.get_lvol_stores(self.lvs_name)[0] - free_clusters_end = int(lvs[u'free_clusters']) + free_clusters_end = int(lvs['free_clusters']) # check that saved number of free clusters equals to current free clusters if free_clusters_start != free_clusters_end: fail_count += 1 @@ -1264,7 +1264,7 @@ class TestCases(object): bdev_size, thin=True) lvs = self.c.get_lvol_stores(self.lvs_name)[0] - free_clusters_create_lvol = int(lvs[u'free_clusters']) + free_clusters_create_lvol = int(lvs['free_clusters']) if free_clusters_start != free_clusters_create_lvol: fail_count += 1 lvol_bdev0 = self.c.get_lvol_bdev_with_name(bdev_name0) diff --git a/test/nvmf/bdev_io_wait/bdev_io_wait.sh b/test/nvmf/bdev_io_wait/bdev_io_wait.sh index 6a853f714..817e63fb8 100755 --- a/test/nvmf/bdev_io_wait/bdev_io_wait.sh +++ b/test/nvmf/bdev_io_wait/bdev_io_wait.sh @@ -8,7 +8,7 @@ source $rootdir/test/nvmf/common.sh MALLOC_BDEV_SIZE=64 MALLOC_BLOCK_SIZE=512 -rpc_py="python $rootdir/scripts/rpc.py" +rpc_py="$rootdir/scripts/rpc.py" set -e diff --git a/test/nvmf/discovery/discovery.sh b/test/nvmf/discovery/discovery.sh index 6dc094031..eb3c340c3 100755 --- a/test/nvmf/discovery/discovery.sh +++ b/test/nvmf/discovery/discovery.sh @@ -8,7 +8,7 @@ source $rootdir/test/nvmf/common.sh NULL_BDEV_SIZE=102400 NULL_BLOCK_SIZE=512 -rpc_py="python $rootdir/scripts/rpc.py" +rpc_py="$rootdir/scripts/rpc.py" set -e diff --git a/test/nvmf/filesystem/filesystem.sh b/test/nvmf/filesystem/filesystem.sh index 268781d65..982b04155 100755 --- a/test/nvmf/filesystem/filesystem.sh +++ b/test/nvmf/filesystem/filesystem.sh @@ -8,7 +8,7 @@ source $rootdir/test/nvmf/common.sh MALLOC_BDEV_SIZE=64 MALLOC_BLOCK_SIZE=512 -rpc_py="python $rootdir/scripts/rpc.py" +rpc_py="$rootdir/scripts/rpc.py" set -e diff --git a/test/nvmf/fio/fio.sh b/test/nvmf/fio/fio.sh index 4f6869b00..78730efe7 100755 --- a/test/nvmf/fio/fio.sh +++ b/test/nvmf/fio/fio.sh @@ -8,7 +8,7 @@ source $rootdir/test/nvmf/common.sh MALLOC_BDEV_SIZE=64 MALLOC_BLOCK_SIZE=512 -rpc_py="python $rootdir/scripts/rpc.py" +rpc_py="$rootdir/scripts/rpc.py" set -e diff --git a/test/nvmf/fio/nvmf_fio.py b/test/nvmf/fio/nvmf_fio.py index 53c9acc79..6096dd728 100755 --- a/test/nvmf/fio/nvmf_fio.py +++ b/test/nvmf/fio/nvmf_fio.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from subprocess import check_call, call, check_output, Popen, PIPE, CalledProcessError import re @@ -37,7 +37,7 @@ filename=%(device)s def interrupt_handler(signum, frame): fio.terminate() - print "FIO terminated" + print("FIO terminated") sys.exit(0) @@ -45,11 +45,11 @@ def main(): global fio if (len(sys.argv) < 5): - print "usage:" - print " " + sys.argv[0] + " " - print "advanced usage:" - print "If you want to run fio with verify, please add verify string after runtime." - print "Currently fio.py only support write rw randwrite randrw with verify enabled." + print("usage:") + print(" " + sys.argv[0] + " ") + print("advanced usage:") + print("If you want to run fio with verify, please add verify string after runtime.") + print("Currently fio.py only support write rw randwrite randrw with verify enabled.") sys.exit(1) io_size = int(sys.argv[1]) @@ -62,7 +62,7 @@ def main(): verify = False devices = get_target_devices() - print "Found devices: ", devices + print("Found devices: ", devices) # configure_devices(devices) try: @@ -73,7 +73,7 @@ def main(): sys.exit(1) device_paths = ['/dev/' + dev for dev in devices] - print device_paths + print(device_paths) sys.stdout.flush() signal.signal(signal.SIGTERM, interrupt_handler) signal.signal(signal.SIGINT, interrupt_handler) @@ -81,13 +81,13 @@ def main(): fio.communicate(create_fio_config(io_size, queue_depth, device_paths, test_type, runtime, verify)) fio.stdin.close() rc = fio.wait() - print "FIO completed with code %d\n" % rc + print("FIO completed with code %d\n" % rc) sys.stdout.flush() sys.exit(rc) def get_target_devices(): - output = check_output('lsblk -l -o NAME', shell=True) + output = str(check_output('lsblk -l -o NAME', shell=True).decode()) return re.findall("(nvme[0-9]+n[0-9]+)\n", output) @@ -100,7 +100,7 @@ def create_fio_config(size, q_depth, devices, test, run_time, verify): "testtype": test, "runtime": run_time, "verify": verifyfio} for (i, dev) in enumerate(devices): fiofile += fio_job_template % {"jobnumber": i, "device": dev} - return fiofile + return fiofile.encode() def set_device_parameter(devices, filename_template, value): @@ -123,9 +123,9 @@ def configure_devices(devices): except IOError: qd = qd - 1 if qd == 0: - print "Could not set block device queue depths." + print("Could not set block device queue depths.") else: - print "Requested queue_depth {} but only {} is supported.".format(str(requested_qd), str(qd)) + print("Requested queue_depth {} but only {} is supported.".format(str(requested_qd), str(qd))) set_device_parameter(devices, "/sys/block/%s/queue/scheduler", "noop") diff --git a/test/nvmf/host/aer.sh b/test/nvmf/host/aer.sh index 56b8955b2..98fabdace 100755 --- a/test/nvmf/host/aer.sh +++ b/test/nvmf/host/aer.sh @@ -5,7 +5,7 @@ rootdir=$(readlink -f $testdir/../../..) source $rootdir/test/common/autotest_common.sh source $rootdir/test/nvmf/common.sh -rpc_py="python $rootdir/scripts/rpc.py" +rpc_py="$rootdir/scripts/rpc.py" set -e diff --git a/test/nvmf/host/bdevperf.sh b/test/nvmf/host/bdevperf.sh index 5c8bf88e5..0c970101b 100755 --- a/test/nvmf/host/bdevperf.sh +++ b/test/nvmf/host/bdevperf.sh @@ -8,7 +8,7 @@ source $rootdir/test/nvmf/common.sh MALLOC_BDEV_SIZE=64 MALLOC_BLOCK_SIZE=512 -rpc_py="python $rootdir/scripts/rpc.py" +rpc_py="$rootdir/scripts/rpc.py" set -e diff --git a/test/nvmf/host/fio.sh b/test/nvmf/host/fio.sh index b3784eea5..e4391fddb 100755 --- a/test/nvmf/host/fio.sh +++ b/test/nvmf/host/fio.sh @@ -6,7 +6,7 @@ source $rootdir/test/common/autotest_common.sh source $rootdir/scripts/common.sh source $rootdir/test/nvmf/common.sh -rpc_py="python $rootdir/scripts/rpc.py" +rpc_py="$rootdir/scripts/rpc.py" set -e diff --git a/test/nvmf/host/identify.sh b/test/nvmf/host/identify.sh index 469bc0a1b..c2ac51a84 100755 --- a/test/nvmf/host/identify.sh +++ b/test/nvmf/host/identify.sh @@ -8,7 +8,7 @@ source $rootdir/test/nvmf/common.sh MALLOC_BDEV_SIZE=64 MALLOC_BLOCK_SIZE=512 -rpc_py="python $rootdir/scripts/rpc.py" +rpc_py="$rootdir/scripts/rpc.py" set -e diff --git a/test/nvmf/host/perf.sh b/test/nvmf/host/perf.sh index 7bb528ab0..89da45fe9 100755 --- a/test/nvmf/host/perf.sh +++ b/test/nvmf/host/perf.sh @@ -8,7 +8,7 @@ source $rootdir/test/nvmf/common.sh MALLOC_BDEV_SIZE=64 MALLOC_BLOCK_SIZE=512 -rpc_py="python $rootdir/scripts/rpc.py" +rpc_py="$rootdir/scripts/rpc.py" set -e diff --git a/test/nvmf/lvol/nvmf_lvol.sh b/test/nvmf/lvol/nvmf_lvol.sh index 17006236d..b69d93a68 100755 --- a/test/nvmf/lvol/nvmf_lvol.sh +++ b/test/nvmf/lvol/nvmf_lvol.sh @@ -11,7 +11,7 @@ LVOL_BDEV_SIZE=10 SUBSYS_NR=2 LVOL_BDEVS_NR=6 -rpc_py="python $rootdir/scripts/rpc.py" +rpc_py="$rootdir/scripts/rpc.py" function disconnect_nvmf() { diff --git a/test/nvmf/multiconnection/multiconnection.sh b/test/nvmf/multiconnection/multiconnection.sh index dbe4b4a49..d5781d96b 100755 --- a/test/nvmf/multiconnection/multiconnection.sh +++ b/test/nvmf/multiconnection/multiconnection.sh @@ -9,7 +9,7 @@ MALLOC_BDEV_SIZE=128 MALLOC_BLOCK_SIZE=512 NVMF_SUBSYS=11 -rpc_py="python $rootdir/scripts/rpc.py" +rpc_py="$rootdir/scripts/rpc.py" set -e diff --git a/test/nvmf/nvme_cli/nvme_cli.sh b/test/nvmf/nvme_cli/nvme_cli.sh index 30a7ee95d..8a72f6434 100755 --- a/test/nvmf/nvme_cli/nvme_cli.sh +++ b/test/nvmf/nvme_cli/nvme_cli.sh @@ -15,7 +15,7 @@ spdk_nvme_cli="${DEPENDENCY_DIR}/nvme-cli" MALLOC_BDEV_SIZE=64 MALLOC_BLOCK_SIZE=512 -rpc_py="python $rootdir/scripts/rpc.py" +rpc_py="$rootdir/scripts/rpc.py" set -e diff --git a/test/nvmf/rpc/rpc.sh b/test/nvmf/rpc/rpc.sh index eaf79a018..690261dee 100755 --- a/test/nvmf/rpc/rpc.sh +++ b/test/nvmf/rpc/rpc.sh @@ -5,7 +5,7 @@ rootdir=$(readlink -f $testdir/../../..) source $rootdir/test/common/autotest_common.sh source $rootdir/test/nvmf/common.sh -rpc_py="python $rootdir/scripts/rpc.py" +rpc_py="$rootdir/scripts/rpc.py" set -e diff --git a/test/nvmf/shutdown/shutdown.sh b/test/nvmf/shutdown/shutdown.sh index 05abe8062..f4a95090c 100755 --- a/test/nvmf/shutdown/shutdown.sh +++ b/test/nvmf/shutdown/shutdown.sh @@ -8,7 +8,7 @@ source $rootdir/test/nvmf/common.sh MALLOC_BDEV_SIZE=128 MALLOC_BLOCK_SIZE=512 -rpc_py="python $rootdir/scripts/rpc.py" +rpc_py="$rootdir/scripts/rpc.py" set -e diff --git a/test/spdkcli/common.sh b/test/spdkcli/common.sh index 4c40aeeff..b55ba7051 100644 --- a/test/spdkcli/common.sh +++ b/test/spdkcli/common.sh @@ -3,7 +3,7 @@ set -xe testdir=$(readlink -f $(dirname $0)) SPDKCLI_BUILD_DIR=$(readlink -f $testdir/../..) -spdkcli_job="python3 $SPDKCLI_BUILD_DIR/test/spdkcli/spdkcli_job.py" +spdkcli_job="$SPDKCLI_BUILD_DIR/test/spdkcli/spdkcli_job.py" . $SPDKCLI_BUILD_DIR/test/common/autotest_common.sh function on_error_exit() { @@ -21,7 +21,7 @@ function run_spdk_tgt() { } function check_match() { - python3 $SPDKCLI_BUILD_DIR/scripts/spdkcli.py ll $SPDKCLI_BRANCH > $testdir/match_files/${MATCH_FILE} + $SPDKCLI_BUILD_DIR/scripts/spdkcli.py ll $SPDKCLI_BRANCH > $testdir/match_files/${MATCH_FILE} $SPDKCLI_BUILD_DIR/test/app/match/match -v $testdir/match_files/${MATCH_FILE}.match rm -f $testdir/match_files/${MATCH_FILE} } diff --git a/test/spdkcli/spdkcli_job.py b/test/spdkcli/spdkcli_job.py index 0751b955f..a2677f6a5 100755 --- a/test/spdkcli/spdkcli_job.py +++ b/test/spdkcli/spdkcli_job.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python3.5 +#!/usr/bin/env python3 import pexpect import os import sys diff --git a/test/spdkcli/vhost.sh b/test/spdkcli/vhost.sh index 909bfdf4c..652b00913 100755 --- a/test/spdkcli/vhost.sh +++ b/test/spdkcli/vhost.sh @@ -48,7 +48,7 @@ check_match timing_exit spdkcli_check_match timing_enter spdkcli_check_match_details -python3 $SPDKCLI_BUILD_DIR/scripts/spdkcli.py bdevs/split_disk/Nvme0n1p0 show_details | jq -r -S '.' > $testdir/match_files/spdkcli_details_vhost.test +$SPDKCLI_BUILD_DIR/scripts/spdkcli.py bdevs/split_disk/Nvme0n1p0 show_details | jq -r -S '.' > $testdir/match_files/spdkcli_details_vhost.test $SPDKCLI_BUILD_DIR/test/app/match/match -v $testdir/match_files/spdkcli_details_vhost.test.match rm -f $testdir/match_files/spdkcli_details_vhost.test timing_exit spdkcli_check_match_details diff --git a/test/vhost/common/common.sh b/test/vhost/common/common.sh index 1ffc1f84e..69798e588 100644 --- a/test/vhost/common/common.sh +++ b/test/vhost/common/common.sh @@ -1052,7 +1052,7 @@ function run_fio() return 0 fi - python $SPDK_BUILD_DIR/test/vhost/common/run_fio.py --job-file=/root/$job_fname \ + $SPDK_BUILD_DIR/test/vhost/common/run_fio.py --job-file=/root/$job_fname \ $([[ ! -z "$fio_bin" ]] && echo "--fio-bin=$fio_bin") \ --out=$out $json ${fio_disks%,} } diff --git a/test/vhost/common/run_fio.py b/test/vhost/common/run_fio.py index 2bf897edb..0760b018c 100755 --- a/test/vhost/common/run_fio.py +++ b/test/vhost/common/run_fio.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 import os import sys @@ -11,7 +11,7 @@ fio_bin = "fio" def show_help(): - print("""Usage: python run_fio.py [options] [args] + print("""Usage: {} run_fio.py [options] [args] Description: Run FIO job file 'fio.job' on remote machines. NOTE: The job file must exist on remote machines on '/root/' directory. @@ -25,7 +25,7 @@ def show_help(): files with test results -J, --json Use JSON format for output -p, --perf-vmex Enable aggregating statistic for VMEXITS for VMs - """) + """.format(os.path.split(sys.executable)[-1])) def exec_cmd(cmd, blocking): @@ -34,7 +34,7 @@ def exec_cmd(cmd, blocking): stderr=subprocess.STDOUT, stdin=subprocess.PIPE) if blocking is True: out, _ = p.communicate() - return p.returncode, out + return p.returncode, out.decode() return p diff --git a/test/vhost/fiotest/autotest.sh b/test/vhost/fiotest/autotest.sh index 2a87c8e95..466ac1410 100755 --- a/test/vhost/fiotest/autotest.sh +++ b/test/vhost/fiotest/autotest.sh @@ -91,7 +91,7 @@ notice "" notice "Setting up VM" notice "" -rpc_py="python $SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir)/rpc.sock" +rpc_py="$SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir)/rpc.sock" for vm_conf in ${vms[@]}; do IFS=',' read -ra conf <<< "$vm_conf" diff --git a/test/vhost/hotplug/common.sh b/test/vhost/hotplug/common.sh index ea9521159..a94b06cf8 100644 --- a/test/vhost/hotplug/common.sh +++ b/test/vhost/hotplug/common.sh @@ -63,7 +63,7 @@ tmp_attach_job=$BASE_DIR/fio_jobs/fio_attach.job.tmp tmp_detach_job=$BASE_DIR/fio_jobs/fio_detach.job.tmp . $BASE_DIR/../common/common.sh -rpc_py="python $SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir)/rpc.sock" +rpc_py="$SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir)/rpc.sock" function print_test_fio_header() { notice "===============" diff --git a/test/vhost/integrity/integrity_start.sh b/test/vhost/integrity/integrity_start.sh index 46faffb1f..a9899e9f1 100755 --- a/test/vhost/integrity/integrity_start.sh +++ b/test/vhost/integrity/integrity_start.sh @@ -48,7 +48,7 @@ while getopts 'xh-:' optchar; do done . $(readlink -e "$(dirname $0)/../common/common.sh") || exit 1 -rpc_py="python $SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir)/rpc.sock" +rpc_py="$SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir)/rpc.sock" trap 'error_exit "${FUNCNAME}" "${LINENO}"' SIGTERM SIGABRT ERR diff --git a/test/vhost/lvol/lvol_test.sh b/test/vhost/lvol/lvol_test.sh index 30a0b8b4a..5190b5f28 100755 --- a/test/vhost/lvol/lvol_test.sh +++ b/test/vhost/lvol/lvol_test.sh @@ -9,7 +9,7 @@ LVOL_TEST_DIR=$(readlink -f $(dirname $0)) [[ -z "$COMMON_DIR" ]] && COMMON_DIR="$(cd $LVOL_TEST_DIR/../common && pwd)" . $COMMON_DIR/common.sh -rpc_py="python $SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir)/rpc.sock" +rpc_py="$SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir)/rpc.sock" vm_count=1 max_disks="" diff --git a/test/vhost/migration/migration-tc1.sh b/test/vhost/migration/migration-tc1.sh index 9dfcdbc99..ec89545d1 100644 --- a/test/vhost/migration/migration-tc1.sh +++ b/test/vhost/migration/migration-tc1.sh @@ -21,7 +21,7 @@ function migration_tc1_configure_vhost() target_vm=1 incoming_vm_ctrlr=naa.Malloc0.$incoming_vm target_vm_ctrlr=naa.Malloc0.$target_vm - rpc="python $SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir)/rpc.sock" + rpc="$SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir)/rpc.sock" trap 'migration_tc1_error_handler; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT diff --git a/test/vhost/migration/migration-tc2.sh b/test/vhost/migration/migration-tc2.sh index f254b960a..0f9977079 100644 --- a/test/vhost/migration/migration-tc2.sh +++ b/test/vhost/migration/migration-tc2.sh @@ -76,9 +76,9 @@ function migration_tc2_configure_vhost() incoming_vm_ctrlr=naa.VhostScsi0.$incoming_vm target_vm_ctrlr=naa.VhostScsi0.$target_vm - rpc_nvmf="python $SPDK_BUILD_DIR/scripts/rpc.py -s $nvmf_dir/rpc.sock" - rpc_0="python $SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock" - rpc_1="python $SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir 1)/rpc.sock" + rpc_nvmf="$SPDK_BUILD_DIR/scripts/rpc.py -s $nvmf_dir/rpc.sock" + rpc_0="$SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock" + rpc_1="$SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir 1)/rpc.sock" # Default cleanup/error handlers will not shutdown nvmf_tgt app so setup it # here to teardown in cleanup function diff --git a/test/vhost/migration/migration-tc3a.sh b/test/vhost/migration/migration-tc3a.sh index 5ecfa3fd4..9795e3b8a 100644 --- a/test/vhost/migration/migration-tc3a.sh +++ b/test/vhost/migration/migration-tc3a.sh @@ -94,7 +94,7 @@ function host1_cleanup_vhost() function host1_start_nvmf() { nvmf_dir="$TEST_DIR/nvmf_tgt" - rpc_nvmf="python $SPDK_BUILD_DIR/scripts/rpc.py -s $nvmf_dir/nvmf_rpc.sock" + rpc_nvmf="$SPDK_BUILD_DIR/scripts/rpc.py -s $nvmf_dir/nvmf_rpc.sock" notice "Starting nvmf_tgt instance on local server" mkdir -p $nvmf_dir @@ -115,7 +115,7 @@ function host1_start_nvmf() function host1_start_vhost() { - rpc_0="python $SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock" + rpc_0="$SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock" notice "Starting vhost0 instance on local server" trap 'host1_cleanup_vhost; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT diff --git a/test/vhost/migration/migration-tc3b.sh b/test/vhost/migration/migration-tc3b.sh index 22f29c0bc..babba0dca 100755 --- a/test/vhost/migration/migration-tc3b.sh +++ b/test/vhost/migration/migration-tc3b.sh @@ -7,7 +7,7 @@ source $MIGRATION_DIR/autotest.config incoming_vm=1 target_vm=2 target_vm_ctrl=naa.VhostScsi0.$target_vm -rpc="python $SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir 1)/rpc.sock" +rpc="$SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir 1)/rpc.sock" share_dir=$TEST_DIR/share function host_2_cleanup_vhost() diff --git a/test/vhost/other/negative.sh b/test/vhost/other/negative.sh index 679c76971..5728a2838 100755 --- a/test/vhost/other/negative.sh +++ b/test/vhost/other/negative.sh @@ -68,7 +68,7 @@ if [[ $RUN_NIGHTLY -eq 1 ]]; then spdk_vhost_run --json-path=$NEGATIVE_BASE_DIR notice "" - rpc_py="python $SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir)/rpc.sock" + rpc_py="$SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir)/rpc.sock" # General commands notice "Trying to remove nonexistent controller" diff --git a/test/vhost/perf_bench/vhost_perf.sh b/test/vhost/perf_bench/vhost_perf.sh index c8849de25..3789c8f1e 100755 --- a/test/vhost/perf_bench/vhost_perf.sh +++ b/test/vhost/perf_bench/vhost_perf.sh @@ -101,7 +101,7 @@ done . $(readlink -e "$(dirname $0)/../common/common.sh") || exit 1 . $(readlink -e "$(dirname $0)/../../../scripts/common.sh") || exit 1 COMMON_DIR="$(cd $(readlink -f $(dirname $0))/../common && pwd)" -rpc_py="python $SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir)/rpc.sock" +rpc_py="$SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir)/rpc.sock" if [[ -n $custom_cpu_cfg ]]; then source $custom_cpu_cfg