scripts/nvmf_perf: gather additional metrics for all runs
Run additional measurements (PM, SAR, PCM, BWM-NG, DPDK mem) for each configured test run. For example: until now if fio parameter "retry" was set to 3 fio would run the workload 3 times, but additional measurements would only be done for the first run. Signed-off-by: Karol Latecki <karol.latecki@intel.com> Change-Id: I5a8aaa8eeb28f2a24f47a41650f9e2bd14a298dd Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/15380 Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com> Reviewed-by: Konrad Sztyber <konrad.sztyber@intel.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
parent
ecbb99e4d7
commit
babef5b127
@ -839,28 +839,20 @@ registerfiles=1
|
|||||||
else:
|
else:
|
||||||
self.log.warning("WARNING: you have disabled intel_pstate and using default cpu governance.")
|
self.log.warning("WARNING: you have disabled intel_pstate and using default cpu governance.")
|
||||||
|
|
||||||
def run_fio(self, fio_config_file, run_num=None):
|
def run_fio(self, fio_config_file, run_num=1):
|
||||||
job_name, _ = os.path.splitext(fio_config_file)
|
job_name, _ = os.path.splitext(fio_config_file)
|
||||||
self.log.info("Starting FIO run for job: %s" % job_name)
|
self.log.info("Starting FIO run for job: %s" % job_name)
|
||||||
self.log.info("Using FIO: %s" % self.fio_bin)
|
self.log.info("Using FIO: %s" % self.fio_bin)
|
||||||
|
|
||||||
if run_num:
|
output_filename = job_name + "_run_" + str(run_num) + "_" + self.name + ".json"
|
||||||
for i in range(1, run_num + 1):
|
|
||||||
output_filename = job_name + "_run_" + str(i) + "_" + self.name + ".json"
|
|
||||||
try:
|
try:
|
||||||
output = self.exec_cmd(["sudo", self.fio_bin, fio_config_file, "--output-format=json",
|
output = self.exec_cmd(["sudo", self.fio_bin, fio_config_file, "--output-format=json",
|
||||||
"--output=%s" % output_filename, "--eta=never"], True)
|
"--output=%s" % output_filename, "--eta=never"], True)
|
||||||
self.log.info(output)
|
self.log.info(output)
|
||||||
except CalledProcessError as e:
|
|
||||||
self.log.error("ERROR: Fio process failed!")
|
|
||||||
self.log.info(e.stdout)
|
|
||||||
else:
|
|
||||||
output_filename = job_name + "_" + self.name + ".json"
|
|
||||||
output = self.exec_cmd(["sudo", self.fio_bin,
|
|
||||||
fio_config_file, "--output-format=json",
|
|
||||||
"--output" % output_filename], True)
|
|
||||||
self.log.info(output)
|
|
||||||
self.log.info("FIO run finished. Results in: %s" % output_filename)
|
self.log.info("FIO run finished. Results in: %s" % output_filename)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
self.log.error("ERROR: Fio process failed!")
|
||||||
|
self.log.error(e.stdout)
|
||||||
|
|
||||||
def sys_config(self):
|
def sys_config(self):
|
||||||
self.log.info("====Kernel release:====")
|
self.log.info("====Kernel release:====")
|
||||||
@ -1551,9 +1543,7 @@ if __name__ == "__main__":
|
|||||||
# Poor mans threading
|
# Poor mans threading
|
||||||
# Run FIO tests
|
# Run FIO tests
|
||||||
for block_size, io_depth, rw in fio_workloads:
|
for block_size, io_depth, rw in fio_workloads:
|
||||||
threads = []
|
|
||||||
configs = []
|
configs = []
|
||||||
power_daemon = None
|
|
||||||
for i in initiators:
|
for i in initiators:
|
||||||
i.init_connect()
|
i.init_connect()
|
||||||
cfg = i.gen_fio_config(rw, fio_rw_mix_read, block_size, io_depth, target_obj.subsys_no,
|
cfg = i.gen_fio_config(rw, fio_rw_mix_read, block_size, io_depth, target_obj.subsys_no,
|
||||||
@ -1561,16 +1551,21 @@ if __name__ == "__main__":
|
|||||||
fio_offset, fio_offset_inc)
|
fio_offset, fio_offset_inc)
|
||||||
configs.append(cfg)
|
configs.append(cfg)
|
||||||
|
|
||||||
|
for run_no in range(1, fio_run_num+1):
|
||||||
|
threads = []
|
||||||
|
power_daemon = None
|
||||||
|
|
||||||
for i, cfg in zip(initiators, configs):
|
for i, cfg in zip(initiators, configs):
|
||||||
t = threading.Thread(target=i.run_fio, args=(cfg, fio_run_num))
|
t = threading.Thread(target=i.run_fio, args=(cfg, run_no))
|
||||||
threads.append(t)
|
threads.append(t)
|
||||||
if target_obj.enable_sar:
|
if target_obj.enable_sar:
|
||||||
sar_file_prefix = "%s_%s_%s_sar" % (block_size, rw, io_depth)
|
sar_file_prefix = "%s_%s_%s_run_%s_sar" % (block_size, rw, io_depth, run_no)
|
||||||
t = threading.Thread(target=target_obj.measure_sar, args=(args.results, sar_file_prefix, fio_ramp_time, fio_run_time))
|
t = threading.Thread(target=target_obj.measure_sar, args=(args.results, sar_file_prefix, fio_ramp_time, fio_run_time))
|
||||||
threads.append(t)
|
threads.append(t)
|
||||||
|
|
||||||
if target_obj.enable_pcm:
|
if target_obj.enable_pcm:
|
||||||
pcm_fnames = ["%s_%s_%s_%s.csv" % (block_size, rw, io_depth, x) for x in ["pcm_cpu", "pcm_memory", "pcm_power"]]
|
pcm_fnames = ["%s_%s_%s_run_%s_%s.csv" % (block_size, rw, io_depth, run_no, x)
|
||||||
|
for x in ["pcm_cpu", "pcm_memory", "pcm_power", run_no]]
|
||||||
|
|
||||||
pcm_cpu_t = threading.Thread(target=target_obj.measure_pcm,
|
pcm_cpu_t = threading.Thread(target=target_obj.measure_pcm,
|
||||||
args=(args.results, pcm_fnames[0], fio_ramp_time, fio_run_time))
|
args=(args.results, pcm_fnames[0], fio_ramp_time, fio_run_time))
|
||||||
@ -1584,7 +1579,7 @@ if __name__ == "__main__":
|
|||||||
threads.append(pcm_pow_t)
|
threads.append(pcm_pow_t)
|
||||||
|
|
||||||
if target_obj.enable_bw:
|
if target_obj.enable_bw:
|
||||||
bandwidth_file_name = "_".join([str(block_size), str(rw), str(io_depth), "bandwidth"])
|
bandwidth_file_name = "_".join([block_size, rw, str(io_depth), "run_%s" % run_no, "bandwidth"])
|
||||||
bandwidth_file_name = ".".join([bandwidth_file_name, "csv"])
|
bandwidth_file_name = ".".join([bandwidth_file_name, "csv"])
|
||||||
t = threading.Thread(target=target_obj.measure_network_bandwidth,
|
t = threading.Thread(target=target_obj.measure_network_bandwidth,
|
||||||
args=(args.results, bandwidth_file_name, fio_ramp_time, fio_run_time))
|
args=(args.results, bandwidth_file_name, fio_ramp_time, fio_run_time))
|
||||||
@ -1594,16 +1589,16 @@ if __name__ == "__main__":
|
|||||||
t = threading.Thread(target=target_obj.measure_dpdk_memory, args=(args.results))
|
t = threading.Thread(target=target_obj.measure_dpdk_memory, args=(args.results))
|
||||||
threads.append(t)
|
threads.append(t)
|
||||||
|
|
||||||
|
if target_obj.enable_pm:
|
||||||
|
power_daemon = threading.Thread(target=target_obj.measure_power,
|
||||||
|
args=(args.results, "%s_%s_%s_run_%s" % (block_size, rw, io_depth, run_no),
|
||||||
|
script_full_dir, fio_ramp_time, fio_run_time))
|
||||||
|
threads.append(power_daemon)
|
||||||
|
|
||||||
if target_obj.enable_adq:
|
if target_obj.enable_adq:
|
||||||
ethtool_thread = threading.Thread(target=target_obj.ethtool_after_fio_ramp, args=(fio_ramp_time,))
|
ethtool_thread = threading.Thread(target=target_obj.ethtool_after_fio_ramp, args=(fio_ramp_time,))
|
||||||
threads.append(ethtool_thread)
|
threads.append(ethtool_thread)
|
||||||
|
|
||||||
if target_obj.enable_pm:
|
|
||||||
power_daemon = threading.Thread(target=target_obj.measure_power,
|
|
||||||
args=(args.results, "%s_%s_%s" % (block_size, rw, io_depth), script_full_dir,
|
|
||||||
fio_ramp_time, fio_run_time))
|
|
||||||
threads.append(power_daemon)
|
|
||||||
|
|
||||||
for t in threads:
|
for t in threads:
|
||||||
t.start()
|
t.start()
|
||||||
for t in threads:
|
for t in threads:
|
||||||
@ -1612,8 +1607,10 @@ if __name__ == "__main__":
|
|||||||
for i in initiators:
|
for i in initiators:
|
||||||
i.init_disconnect()
|
i.init_disconnect()
|
||||||
i.copy_result_files(args.results)
|
i.copy_result_files(args.results)
|
||||||
|
try:
|
||||||
parse_results(args.results, args.csv_filename)
|
parse_results(args.results, args.csv_filename)
|
||||||
|
except Exception:
|
||||||
|
logging.error("There was an error with parsing the results")
|
||||||
finally:
|
finally:
|
||||||
for i in initiators:
|
for i in initiators:
|
||||||
try:
|
try:
|
||||||
|
Loading…
Reference in New Issue
Block a user