scripts/nvmf_perf: don't rely on __del__ for destroying objects
As described in Python documentation: It is not guaranteed that __del__() methods are called for objects that still exist when the interpreter exits. Because of that class instance objects were not always destroyed after tests. Change-Id: I10f05a480cde37d274d6475efdf47d094cab7cb9 Signed-off-by: Karol Latecki <karol.latecki@intel.com> Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/11136 Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com> Reviewed-by: Konrad Sztyber <konrad.sztyber@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
parent
e7544273c9
commit
c995cc2803
@ -714,7 +714,7 @@ class Initiator(Server):
|
|||||||
def set_local_nic_info_helper(self):
|
def set_local_nic_info_helper(self):
|
||||||
return json.loads(self.exec_cmd(["lshw", "-json"]))
|
return json.loads(self.exec_cmd(["lshw", "-json"]))
|
||||||
|
|
||||||
def __del__(self):
|
def stop(self):
|
||||||
self.ssh_connection.close()
|
self.ssh_connection.close()
|
||||||
|
|
||||||
def exec_cmd(self, cmd, stderr_redirect=False, change_dir=None):
|
def exec_cmd(self, cmd, stderr_redirect=False, change_dir=None):
|
||||||
@ -954,7 +954,7 @@ class KernelTarget(Target):
|
|||||||
if "nvmet_bin" in target_config:
|
if "nvmet_bin" in target_config:
|
||||||
self.nvmet_bin = target_config["nvmet_bin"]
|
self.nvmet_bin = target_config["nvmet_bin"]
|
||||||
|
|
||||||
def __del__(self):
|
def stop(self):
|
||||||
nvmet_command(self.nvmet_bin, "clear")
|
nvmet_command(self.nvmet_bin, "clear")
|
||||||
|
|
||||||
def kernel_tgt_gen_subsystem_conf(self, nvme_list, address_list):
|
def kernel_tgt_gen_subsystem_conf(self, nvme_list, address_list):
|
||||||
@ -1220,7 +1220,7 @@ class SPDKTarget(Target):
|
|||||||
|
|
||||||
self.spdk_tgt_configure()
|
self.spdk_tgt_configure()
|
||||||
|
|
||||||
def __del__(self):
|
def stop(self):
|
||||||
if self.bpf_proc:
|
if self.bpf_proc:
|
||||||
self.log_print("Stopping BPF Trace script")
|
self.log_print("Stopping BPF Trace script")
|
||||||
self.bpf_proc.terminate()
|
self.bpf_proc.terminate()
|
||||||
@ -1252,9 +1252,6 @@ class KernelInitiator(Initiator):
|
|||||||
if "io_uring" in self.ioengine:
|
if "io_uring" in self.ioengine:
|
||||||
self.extra_params = "--nr-poll-queues=8"
|
self.extra_params = "--nr-poll-queues=8"
|
||||||
|
|
||||||
def __del__(self):
|
|
||||||
self.ssh_connection.close()
|
|
||||||
|
|
||||||
def get_connected_nvme_list(self):
|
def get_connected_nvme_list(self):
|
||||||
json_obj = json.loads(self.exec_cmd(["sudo", "nvme", "list", "-o", "json"]))
|
json_obj = json.loads(self.exec_cmd(["sudo", "nvme", "list", "-o", "json"]))
|
||||||
nvme_list = [os.path.basename(x["DevicePath"]) for x in json_obj["Devices"]
|
nvme_list = [os.path.basename(x["DevicePath"]) for x in json_obj["Devices"]
|
||||||
@ -1466,77 +1463,87 @@ if __name__ == "__main__":
|
|||||||
except FileExistsError:
|
except FileExistsError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
target_obj.tgt_start()
|
# TODO: This try block is definietly too large. Need to break this up into separate
|
||||||
|
# logical blocks to reduce size.
|
||||||
for i in initiators:
|
try:
|
||||||
i.discover_subsystems(i.target_nic_ips, target_obj.subsys_no)
|
target_obj.tgt_start()
|
||||||
if i.enable_adq:
|
|
||||||
i.adq_configure_tc()
|
|
||||||
|
|
||||||
# Poor mans threading
|
|
||||||
# Run FIO tests
|
|
||||||
for block_size, io_depth, rw in fio_workloads:
|
|
||||||
threads = []
|
|
||||||
configs = []
|
|
||||||
for i in initiators:
|
|
||||||
if i.mode == "kernel":
|
|
||||||
i.kernel_init_connect()
|
|
||||||
|
|
||||||
cfg = i.gen_fio_config(rw, fio_rw_mix_read, block_size, io_depth, target_obj.subsys_no,
|
|
||||||
fio_num_jobs, fio_ramp_time, fio_run_time, fio_rate_iops)
|
|
||||||
configs.append(cfg)
|
|
||||||
|
|
||||||
for i, cfg in zip(initiators, configs):
|
|
||||||
t = threading.Thread(target=i.run_fio, args=(cfg, fio_run_num))
|
|
||||||
threads.append(t)
|
|
||||||
if target_obj.enable_sar:
|
|
||||||
sar_file_name = "_".join([str(block_size), str(rw), str(io_depth), "sar"])
|
|
||||||
sar_file_name = ".".join([sar_file_name, "txt"])
|
|
||||||
t = threading.Thread(target=target_obj.measure_sar, args=(args.results, sar_file_name))
|
|
||||||
threads.append(t)
|
|
||||||
|
|
||||||
if target_obj.enable_pcm:
|
|
||||||
pcm_fnames = ["%s_%s_%s_%s.csv" % (block_size, rw, io_depth, x) for x in ["pcm_cpu", "pcm_memory", "pcm_power"]]
|
|
||||||
|
|
||||||
pcm_cpu_t = threading.Thread(target=target_obj.measure_pcm, args=(args.results, pcm_fnames[0],))
|
|
||||||
pcm_mem_t = threading.Thread(target=target_obj.measure_pcm_memory, args=(args.results, pcm_fnames[1],))
|
|
||||||
pcm_pow_t = threading.Thread(target=target_obj.measure_pcm_power, args=(args.results, pcm_fnames[2],))
|
|
||||||
|
|
||||||
threads.append(pcm_cpu_t)
|
|
||||||
threads.append(pcm_mem_t)
|
|
||||||
threads.append(pcm_pow_t)
|
|
||||||
|
|
||||||
if target_obj.enable_bandwidth:
|
|
||||||
bandwidth_file_name = "_".join(["bandwidth", str(block_size), str(rw), str(io_depth)])
|
|
||||||
bandwidth_file_name = ".".join([bandwidth_file_name, "csv"])
|
|
||||||
t = threading.Thread(target=target_obj.measure_network_bandwidth, args=(args.results, bandwidth_file_name,))
|
|
||||||
threads.append(t)
|
|
||||||
|
|
||||||
if target_obj.enable_dpdk_memory:
|
|
||||||
t = threading.Thread(target=target_obj.measure_dpdk_memory, args=(args.results))
|
|
||||||
threads.append(t)
|
|
||||||
|
|
||||||
if target_obj.enable_adq:
|
|
||||||
ethtool_thread = threading.Thread(target=target_obj.ethtool_after_fio_ramp, args=(fio_ramp_time,))
|
|
||||||
threads.append(ethtool_thread)
|
|
||||||
|
|
||||||
for t in threads:
|
|
||||||
t.start()
|
|
||||||
for t in threads:
|
|
||||||
t.join()
|
|
||||||
|
|
||||||
for i in initiators:
|
for i in initiators:
|
||||||
if i.mode == "kernel":
|
i.discover_subsystems(i.target_nic_ips, target_obj.subsys_no)
|
||||||
i.kernel_init_disconnect()
|
if i.enable_adq:
|
||||||
i.copy_result_files(args.results)
|
i.adq_configure_tc()
|
||||||
|
|
||||||
target_obj.restore_governor()
|
# Poor mans threading
|
||||||
target_obj.restore_tuned()
|
# Run FIO tests
|
||||||
target_obj.restore_services()
|
for block_size, io_depth, rw in fio_workloads:
|
||||||
target_obj.restore_sysctl()
|
threads = []
|
||||||
for i in initiators:
|
configs = []
|
||||||
i.restore_governor()
|
for i in initiators:
|
||||||
i.restore_tuned()
|
if i.mode == "kernel":
|
||||||
i.restore_services()
|
i.kernel_init_connect()
|
||||||
i.restore_sysctl()
|
|
||||||
target_obj.parse_results(args.results, args.csv_filename)
|
cfg = i.gen_fio_config(rw, fio_rw_mix_read, block_size, io_depth, target_obj.subsys_no,
|
||||||
|
fio_num_jobs, fio_ramp_time, fio_run_time, fio_rate_iops)
|
||||||
|
configs.append(cfg)
|
||||||
|
|
||||||
|
for i, cfg in zip(initiators, configs):
|
||||||
|
t = threading.Thread(target=i.run_fio, args=(cfg, fio_run_num))
|
||||||
|
threads.append(t)
|
||||||
|
if target_obj.enable_sar:
|
||||||
|
sar_file_name = "_".join([str(block_size), str(rw), str(io_depth), "sar"])
|
||||||
|
sar_file_name = ".".join([sar_file_name, "txt"])
|
||||||
|
t = threading.Thread(target=target_obj.measure_sar, args=(args.results, sar_file_name))
|
||||||
|
threads.append(t)
|
||||||
|
|
||||||
|
if target_obj.enable_pcm:
|
||||||
|
pcm_fnames = ["%s_%s_%s_%s.csv" % (block_size, rw, io_depth, x) for x in ["pcm_cpu", "pcm_memory", "pcm_power"]]
|
||||||
|
|
||||||
|
pcm_cpu_t = threading.Thread(target=target_obj.measure_pcm, args=(args.results, pcm_fnames[0],))
|
||||||
|
pcm_mem_t = threading.Thread(target=target_obj.measure_pcm_memory, args=(args.results, pcm_fnames[1],))
|
||||||
|
pcm_pow_t = threading.Thread(target=target_obj.measure_pcm_power, args=(args.results, pcm_fnames[2],))
|
||||||
|
|
||||||
|
threads.append(pcm_cpu_t)
|
||||||
|
threads.append(pcm_mem_t)
|
||||||
|
threads.append(pcm_pow_t)
|
||||||
|
|
||||||
|
if target_obj.enable_bandwidth:
|
||||||
|
bandwidth_file_name = "_".join(["bandwidth", str(block_size), str(rw), str(io_depth)])
|
||||||
|
bandwidth_file_name = ".".join([bandwidth_file_name, "csv"])
|
||||||
|
t = threading.Thread(target=target_obj.measure_network_bandwidth, args=(args.results, bandwidth_file_name,))
|
||||||
|
threads.append(t)
|
||||||
|
|
||||||
|
if target_obj.enable_dpdk_memory:
|
||||||
|
t = threading.Thread(target=target_obj.measure_dpdk_memory, args=(args.results))
|
||||||
|
threads.append(t)
|
||||||
|
|
||||||
|
if target_obj.enable_adq:
|
||||||
|
ethtool_thread = threading.Thread(target=target_obj.ethtool_after_fio_ramp, args=(fio_ramp_time,))
|
||||||
|
threads.append(ethtool_thread)
|
||||||
|
|
||||||
|
for t in threads:
|
||||||
|
t.start()
|
||||||
|
for t in threads:
|
||||||
|
t.join()
|
||||||
|
|
||||||
|
for i in initiators:
|
||||||
|
if i.mode == "kernel":
|
||||||
|
i.kernel_init_disconnect()
|
||||||
|
i.copy_result_files(args.results)
|
||||||
|
|
||||||
|
target_obj.restore_governor()
|
||||||
|
target_obj.restore_tuned()
|
||||||
|
target_obj.restore_services()
|
||||||
|
target_obj.restore_sysctl()
|
||||||
|
for i in initiators:
|
||||||
|
i.restore_governor()
|
||||||
|
i.restore_tuned()
|
||||||
|
i.restore_services()
|
||||||
|
i.restore_sysctl()
|
||||||
|
target_obj.parse_results(args.results, args.csv_filename)
|
||||||
|
finally:
|
||||||
|
for i in initiators:
|
||||||
|
try:
|
||||||
|
i.stop()
|
||||||
|
except Exception as err:
|
||||||
|
pass
|
||||||
|
target_obj.stop()
|
||||||
|
Loading…
Reference in New Issue
Block a user