2018-10-14 19:51:14 +00:00
|
|
|
#!/usr/bin/env python3
|
|
|
|
|
|
|
|
import os
|
|
|
|
import re
|
|
|
|
import sys
|
|
|
|
import json
|
|
|
|
import paramiko
|
|
|
|
import zipfile
|
|
|
|
import threading
|
|
|
|
import subprocess
|
|
|
|
import itertools
|
|
|
|
import time
|
|
|
|
import uuid
|
|
|
|
import rpc
|
|
|
|
import rpc.client
|
2020-02-18 14:13:00 +00:00
|
|
|
import pandas as pd
|
2018-10-14 19:51:14 +00:00
|
|
|
from common import *
|
|
|
|
|
|
|
|
|
|
|
|
class Server:
|
2019-04-02 10:08:35 +00:00
|
|
|
def __init__(self, name, username, password, mode, nic_ips, transport):
|
2018-10-14 19:51:14 +00:00
|
|
|
self.name = name
|
|
|
|
self.mode = mode
|
|
|
|
self.username = username
|
|
|
|
self.password = password
|
2019-04-02 10:08:35 +00:00
|
|
|
self.nic_ips = nic_ips
|
|
|
|
self.transport = transport.lower()
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
if not re.match("^[A-Za-z0-9]*$", name):
|
|
|
|
self.log_print("Please use a name which contains only letters or numbers")
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
def log_print(self, msg):
|
|
|
|
print("[%s] %s" % (self.name, msg), flush=True)
|
|
|
|
|
|
|
|
|
|
|
|
class Target(Server):
|
2019-12-06 09:19:42 +00:00
|
|
|
def __init__(self, name, username, password, mode, nic_ips, transport="rdma",
|
2020-01-10 12:40:16 +00:00
|
|
|
use_null_block=False, sar_settings=None, pcm_settings=None):
|
2019-12-06 09:19:42 +00:00
|
|
|
|
2019-04-02 10:08:35 +00:00
|
|
|
super(Target, self).__init__(name, username, password, mode, nic_ips, transport)
|
2018-10-14 19:51:14 +00:00
|
|
|
self.null_block = bool(use_null_block)
|
|
|
|
self.enable_sar = False
|
2020-01-10 12:40:16 +00:00
|
|
|
self.enable_pcm_memory = False
|
|
|
|
self.enable_pcm = False
|
|
|
|
|
2018-10-14 19:51:14 +00:00
|
|
|
if sar_settings:
|
|
|
|
self.enable_sar, self.sar_delay, self.sar_interval, self.sar_count = sar_settings
|
|
|
|
|
2020-01-10 12:40:16 +00:00
|
|
|
if pcm_settings:
|
|
|
|
self.pcm_dir, self.enable_pcm, self.enable_pcm_memory, self.pcm_delay, self.pcm_interval, self.pcm_count = pcm_settings
|
|
|
|
|
2018-10-14 19:51:14 +00:00
|
|
|
self.script_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
|
|
|
|
self.spdk_dir = os.path.abspath(os.path.join(self.script_dir, "../../../"))
|
|
|
|
|
|
|
|
def zip_spdk_sources(self, spdk_dir, dest_file):
|
|
|
|
self.log_print("Zipping SPDK source directory")
|
|
|
|
fh = zipfile.ZipFile(dest_file, "w", zipfile.ZIP_DEFLATED)
|
2019-06-24 05:49:38 +00:00
|
|
|
for root, directories, files in os.walk(spdk_dir, followlinks=True):
|
2018-10-14 19:51:14 +00:00
|
|
|
for file in files:
|
|
|
|
fh.write(os.path.relpath(os.path.join(root, file)))
|
|
|
|
fh.close()
|
|
|
|
self.log_print("Done zipping")
|
|
|
|
|
|
|
|
def read_json_stats(self, file):
|
|
|
|
with open(file, "r") as json_data:
|
|
|
|
data = json.load(json_data)
|
|
|
|
job_pos = 0 # job_post = 0 because using aggregated results
|
|
|
|
|
|
|
|
# Check if latency is in nano or microseconds to choose correct dict key
|
|
|
|
def get_lat_unit(key_prefix, dict_section):
|
|
|
|
# key prefix - lat, clat or slat.
|
|
|
|
# dict section - portion of json containing latency bucket in question
|
|
|
|
# Return dict key to access the bucket and unit as string
|
|
|
|
for k, v in dict_section.items():
|
|
|
|
if k.startswith(key_prefix):
|
|
|
|
return k, k.split("_")[1]
|
|
|
|
|
|
|
|
read_iops = float(data["jobs"][job_pos]["read"]["iops"])
|
|
|
|
read_bw = float(data["jobs"][job_pos]["read"]["bw"])
|
|
|
|
lat_key, lat_unit = get_lat_unit("lat", data["jobs"][job_pos]["read"])
|
|
|
|
read_avg_lat = float(data["jobs"][job_pos]["read"][lat_key]["mean"])
|
|
|
|
read_min_lat = float(data["jobs"][job_pos]["read"][lat_key]["min"])
|
|
|
|
read_max_lat = float(data["jobs"][job_pos]["read"][lat_key]["max"])
|
|
|
|
clat_key, clat_unit = get_lat_unit("clat", data["jobs"][job_pos]["read"])
|
|
|
|
read_p99_lat = float(data["jobs"][job_pos]["read"][clat_key]["percentile"]["99.000000"])
|
|
|
|
|
|
|
|
if "ns" in lat_unit:
|
|
|
|
read_avg_lat, read_min_lat, read_max_lat = [x / 1000 for x in [read_avg_lat, read_min_lat, read_max_lat]]
|
|
|
|
if "ns" in clat_unit:
|
|
|
|
read_p99_lat = read_p99_lat / 1000
|
|
|
|
|
|
|
|
write_iops = float(data["jobs"][job_pos]["write"]["iops"])
|
|
|
|
write_bw = float(data["jobs"][job_pos]["write"]["bw"])
|
|
|
|
lat_key, lat_unit = get_lat_unit("lat", data["jobs"][job_pos]["write"])
|
|
|
|
write_avg_lat = float(data["jobs"][job_pos]["write"][lat_key]["mean"])
|
|
|
|
write_min_lat = float(data["jobs"][job_pos]["write"][lat_key]["min"])
|
|
|
|
write_max_lat = float(data["jobs"][job_pos]["write"][lat_key]["max"])
|
|
|
|
clat_key, clat_unit = get_lat_unit("clat", data["jobs"][job_pos]["write"])
|
|
|
|
write_p99_lat = float(data["jobs"][job_pos]["write"][clat_key]["percentile"]["99.000000"])
|
|
|
|
|
|
|
|
if "ns" in lat_unit:
|
|
|
|
write_avg_lat, write_min_lat, write_max_lat = [x / 1000 for x in [write_avg_lat, write_min_lat, write_max_lat]]
|
|
|
|
if "ns" in clat_unit:
|
|
|
|
write_p99_lat = write_p99_lat / 1000
|
|
|
|
|
|
|
|
return [read_iops, read_bw, read_avg_lat, read_min_lat, read_max_lat, read_p99_lat,
|
|
|
|
write_iops, write_bw, write_avg_lat, write_min_lat, write_max_lat, write_p99_lat]
|
|
|
|
|
|
|
|
def parse_results(self, results_dir, initiator_count=None, run_num=None):
|
|
|
|
files = os.listdir(results_dir)
|
|
|
|
fio_files = filter(lambda x: ".fio" in x, files)
|
|
|
|
json_files = [x for x in files if ".json" in x]
|
|
|
|
|
|
|
|
# Create empty results file
|
|
|
|
csv_file = "nvmf_results.csv"
|
|
|
|
with open(os.path.join(results_dir, csv_file), "w") as fh:
|
|
|
|
header_line = ",".join(["Name",
|
|
|
|
"read_iops", "read_bw", "read_avg_lat_us",
|
|
|
|
"read_min_lat_us", "read_max_lat_us", "read_p99_lat_us",
|
|
|
|
"write_iops", "write_bw", "write_avg_lat_us",
|
|
|
|
"write_min_lat_us", "write_max_lat_us", "write_p99_lat_us"])
|
|
|
|
fh.write(header_line + "\n")
|
|
|
|
rows = set()
|
|
|
|
|
|
|
|
for fio_config in fio_files:
|
|
|
|
self.log_print("Getting FIO stats for %s" % fio_config)
|
|
|
|
job_name, _ = os.path.splitext(fio_config)
|
|
|
|
|
|
|
|
# If "_CPU" exists in name - ignore it
|
|
|
|
# Initiators for the same job could have diffrent num_cores parameter
|
|
|
|
job_name = re.sub(r"_\d+CPU", "", job_name)
|
|
|
|
job_result_files = [x for x in json_files if job_name in x]
|
|
|
|
self.log_print("Matching result files for current fio config:")
|
|
|
|
for j in job_result_files:
|
|
|
|
self.log_print("\t %s" % j)
|
|
|
|
|
|
|
|
# There may have been more than 1 initiator used in test, need to check that
|
|
|
|
# Result files are created so that string after last "_" separator is server name
|
|
|
|
inits_names = set([os.path.splitext(x)[0].split("_")[-1] for x in job_result_files])
|
|
|
|
inits_avg_results = []
|
|
|
|
for i in inits_names:
|
|
|
|
self.log_print("\tGetting stats for initiator %s" % i)
|
|
|
|
# There may have been more than 1 test run for this job, calculate average results for initiator
|
|
|
|
i_results = [x for x in job_result_files if i in x]
|
|
|
|
|
|
|
|
separate_stats = []
|
|
|
|
for r in i_results:
|
|
|
|
stats = self.read_json_stats(os.path.join(results_dir, r))
|
|
|
|
separate_stats.append(stats)
|
|
|
|
self.log_print(stats)
|
|
|
|
|
|
|
|
z = [sum(c) for c in zip(*separate_stats)]
|
|
|
|
z = [c/len(separate_stats) for c in z]
|
|
|
|
inits_avg_results.append(z)
|
|
|
|
|
|
|
|
self.log_print("\tAverage results for initiator %s" % i)
|
|
|
|
self.log_print(z)
|
|
|
|
|
|
|
|
# Sum average results of all initiators running this FIO job
|
|
|
|
self.log_print("\tTotal results for %s from all initiators" % fio_config)
|
|
|
|
for a in inits_avg_results:
|
|
|
|
self.log_print(a)
|
|
|
|
total = ["{0:.3f}".format(sum(c)) for c in zip(*inits_avg_results)]
|
|
|
|
rows.add(",".join([job_name, *total]))
|
|
|
|
|
2019-03-26 08:25:40 +00:00
|
|
|
# Save results to file
|
|
|
|
for row in rows:
|
|
|
|
with open(os.path.join(results_dir, csv_file), "a") as fh:
|
|
|
|
fh.write(row + "\n")
|
2019-05-21 15:54:51 +00:00
|
|
|
self.log_print("You can find the test results in the file %s" % os.path.join(results_dir, csv_file))
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
def measure_sar(self, results_dir, sar_file_name):
|
|
|
|
self.log_print("Waiting %d delay before measuring SAR stats" % self.sar_delay)
|
|
|
|
time.sleep(self.sar_delay)
|
|
|
|
out = subprocess.check_output("sar -P ALL %s %s" % (self.sar_interval, self.sar_count), shell=True).decode(encoding="utf-8")
|
|
|
|
with open(os.path.join(results_dir, sar_file_name), "w") as fh:
|
|
|
|
for line in out.split("\n"):
|
|
|
|
if "Average" in line and "CPU" in line:
|
|
|
|
self.log_print("Summary CPU utilization from SAR:")
|
|
|
|
self.log_print(line)
|
|
|
|
if "Average" in line and "all" in line:
|
|
|
|
self.log_print(line)
|
|
|
|
fh.write(out)
|
|
|
|
|
2020-01-10 12:40:16 +00:00
|
|
|
def measure_pcm_memory(self, results_dir, pcm_file_name):
|
|
|
|
time.sleep(self.pcm_delay)
|
|
|
|
pcm_memory = subprocess.Popen("%s/pcm-memory.x %s -csv=%s/%s" % (self.pcm_dir, self.pcm_interval,
|
|
|
|
results_dir, pcm_file_name), shell=True)
|
|
|
|
time.sleep(self.pcm_count)
|
|
|
|
pcm_memory.kill()
|
|
|
|
|
|
|
|
def measure_pcm(self, results_dir, pcm_file_name):
|
|
|
|
time.sleep(self.pcm_delay)
|
|
|
|
subprocess.run("%s/pcm.x %s -i=%s -csv=%s/%s" % (self.pcm_dir, self.pcm_interval, self.pcm_count,
|
|
|
|
results_dir, pcm_file_name), shell=True, check=True)
|
2020-02-18 14:13:00 +00:00
|
|
|
df = pd.read_csv(os.path.join(results_dir, pcm_file_name), header=[0, 1])
|
|
|
|
df = df.rename(columns=lambda x: re.sub(r'Unnamed:[\w\s]*$', '', x))
|
|
|
|
skt = df.loc[:, df.columns.get_level_values(1).isin({'UPI0', 'UPI1', 'UPI2'})]
|
|
|
|
skt_pcm_file_name = "_".join(["skt", pcm_file_name])
|
|
|
|
skt.to_csv(os.path.join(results_dir, skt_pcm_file_name), index=False)
|
2020-01-10 12:40:16 +00:00
|
|
|
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
class Initiator(Server):
|
2020-03-03 13:38:56 +00:00
|
|
|
def __init__(self, name, username, password, mode, nic_ips, ip, transport="rdma", cpu_frequency=None,
|
2020-02-24 09:34:22 +00:00
|
|
|
nvmecli_bin="nvme", workspace="/tmp/spdk", cpus_allowed=None, fio_bin="/usr/src/fio/fio"):
|
2019-12-06 09:19:42 +00:00
|
|
|
|
2019-04-02 10:08:35 +00:00
|
|
|
super(Initiator, self).__init__(name, username, password, mode, nic_ips, transport)
|
2019-12-06 09:19:42 +00:00
|
|
|
|
2018-10-14 19:51:14 +00:00
|
|
|
self.ip = ip
|
|
|
|
self.spdk_dir = workspace
|
2019-12-06 09:19:42 +00:00
|
|
|
self.fio_bin = fio_bin
|
2020-02-24 09:34:22 +00:00
|
|
|
self.cpus_allowed = cpus_allowed
|
2020-03-03 13:38:56 +00:00
|
|
|
self.cpu_frequency = cpu_frequency
|
2019-12-06 09:19:42 +00:00
|
|
|
self.nvmecli_bin = nvmecli_bin
|
2018-10-14 19:51:14 +00:00
|
|
|
self.ssh_connection = paramiko.SSHClient()
|
2019-05-21 15:54:51 +00:00
|
|
|
self.ssh_connection.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
2018-10-14 19:51:14 +00:00
|
|
|
self.ssh_connection.connect(self.ip, username=self.username, password=self.password)
|
2019-04-03 11:46:12 +00:00
|
|
|
self.remote_call("sudo rm -rf %s/nvmf_perf" % self.spdk_dir)
|
|
|
|
self.remote_call("mkdir -p %s" % self.spdk_dir)
|
2020-03-03 13:38:56 +00:00
|
|
|
self.set_cpu_frequency()
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
def __del__(self):
|
|
|
|
self.ssh_connection.close()
|
|
|
|
|
|
|
|
def put_file(self, local, remote_dest):
|
|
|
|
ftp = self.ssh_connection.open_sftp()
|
|
|
|
ftp.put(local, remote_dest)
|
|
|
|
ftp.close()
|
|
|
|
|
|
|
|
def get_file(self, remote, local_dest):
|
|
|
|
ftp = self.ssh_connection.open_sftp()
|
|
|
|
ftp.get(remote, local_dest)
|
|
|
|
ftp.close()
|
|
|
|
|
2019-04-03 11:46:12 +00:00
|
|
|
def remote_call(self, cmd):
|
|
|
|
stdin, stdout, stderr = self.ssh_connection.exec_command(cmd)
|
|
|
|
out = stdout.read().decode(encoding="utf-8")
|
|
|
|
err = stderr.read().decode(encoding="utf-8")
|
|
|
|
return out, err
|
|
|
|
|
2018-10-14 19:51:14 +00:00
|
|
|
def copy_result_files(self, dest_dir):
|
|
|
|
self.log_print("Copying results")
|
|
|
|
|
|
|
|
if not os.path.exists(dest_dir):
|
|
|
|
os.mkdir(dest_dir)
|
|
|
|
|
|
|
|
# Get list of result files from initiator and copy them back to target
|
2019-04-03 11:46:12 +00:00
|
|
|
stdout, stderr = self.remote_call("ls %s/nvmf_perf" % self.spdk_dir)
|
|
|
|
file_list = stdout.strip().split("\n")
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
for file in file_list:
|
|
|
|
self.get_file(os.path.join(self.spdk_dir, "nvmf_perf", file),
|
|
|
|
os.path.join(dest_dir, file))
|
|
|
|
self.log_print("Done copying results")
|
|
|
|
|
|
|
|
def discover_subsystems(self, address_list, subsys_no):
|
|
|
|
num_nvmes = range(0, subsys_no)
|
|
|
|
nvme_discover_output = ""
|
|
|
|
for ip, subsys_no in itertools.product(address_list, num_nvmes):
|
|
|
|
self.log_print("Trying to discover: %s:%s" % (ip, 4420 + subsys_no))
|
2019-04-02 10:08:35 +00:00
|
|
|
nvme_discover_cmd = ["sudo",
|
|
|
|
"%s" % self.nvmecli_bin,
|
|
|
|
"discover", "-t %s" % self.transport,
|
|
|
|
"-s %s" % (4420 + subsys_no),
|
|
|
|
"-a %s" % ip]
|
2018-10-14 19:51:14 +00:00
|
|
|
nvme_discover_cmd = " ".join(nvme_discover_cmd)
|
|
|
|
|
2019-04-03 11:46:12 +00:00
|
|
|
stdout, stderr = self.remote_call(nvme_discover_cmd)
|
|
|
|
if stdout:
|
|
|
|
nvme_discover_output = nvme_discover_output + stdout
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
subsystems = re.findall(r'trsvcid:\s(\d+)\s+' # get svcid number
|
|
|
|
r'subnqn:\s+([a-zA-Z0-9\.\-\:]+)\s+' # get NQN id
|
|
|
|
r'traddr:\s+(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})', # get IP address
|
|
|
|
nvme_discover_output) # from nvme discovery output
|
|
|
|
subsystems = filter(lambda x: x[-1] in address_list, subsystems)
|
|
|
|
subsystems = list(set(subsystems))
|
|
|
|
subsystems.sort(key=lambda x: x[1])
|
|
|
|
self.log_print("Found matching subsystems on target side:")
|
|
|
|
for s in subsystems:
|
|
|
|
self.log_print(s)
|
|
|
|
|
|
|
|
return subsystems
|
|
|
|
|
|
|
|
def gen_fio_config(self, rw, rwmixread, block_size, io_depth, subsys_no, num_jobs=None, ramp_time=0, run_time=10):
|
|
|
|
fio_conf_template = """
|
|
|
|
[global]
|
|
|
|
ioengine={ioengine}
|
|
|
|
{spdk_conf}
|
|
|
|
thread=1
|
|
|
|
group_reporting=1
|
|
|
|
direct=1
|
|
|
|
|
|
|
|
norandommap=1
|
|
|
|
rw={rw}
|
|
|
|
rwmixread={rwmixread}
|
|
|
|
bs={block_size}
|
|
|
|
iodepth={io_depth}
|
|
|
|
time_based=1
|
|
|
|
ramp_time={ramp_time}
|
|
|
|
runtime={run_time}
|
|
|
|
"""
|
|
|
|
if "spdk" in self.mode:
|
2019-04-02 10:08:35 +00:00
|
|
|
subsystems = self.discover_subsystems(self.nic_ips, subsys_no)
|
2018-10-14 19:51:14 +00:00
|
|
|
bdev_conf = self.gen_spdk_bdev_conf(subsystems)
|
2019-04-03 11:46:12 +00:00
|
|
|
self.remote_call("echo '%s' > %s/bdev.conf" % (bdev_conf, self.spdk_dir))
|
2018-10-14 19:51:14 +00:00
|
|
|
ioengine = "%s/examples/bdev/fio_plugin/fio_plugin" % self.spdk_dir
|
|
|
|
spdk_conf = "spdk_conf=%s/bdev.conf" % self.spdk_dir
|
|
|
|
else:
|
|
|
|
ioengine = "libaio"
|
|
|
|
spdk_conf = ""
|
2020-02-24 09:34:22 +00:00
|
|
|
out, err = self.remote_call("lsblk -o NAME -nlp")
|
|
|
|
subsystems = [x for x in out.split("\n") if "nvme" in x]
|
|
|
|
|
|
|
|
if self.cpus_allowed is not None:
|
|
|
|
self.log_print("Limiting FIO workload execution on specific cores %s" % self.cpus_allowed)
|
|
|
|
cpus_num = 0
|
|
|
|
cpus = self.cpus_allowed.split(",")
|
|
|
|
for cpu in cpus:
|
|
|
|
if "-" in cpu:
|
|
|
|
a, b = cpu.split("-")
|
|
|
|
cpus_num += len(range(a, b))
|
|
|
|
else:
|
|
|
|
cpus_num += 1
|
|
|
|
threads = range(0, cpus_num)
|
|
|
|
elif hasattr(self, 'num_cores'):
|
|
|
|
self.log_print("Limiting FIO workload execution to %s cores" % self.num_cores)
|
|
|
|
threads = range(0, int(self.num_cores))
|
|
|
|
else:
|
|
|
|
threads = range(0, len(subsystems))
|
|
|
|
|
|
|
|
if "spdk" in self.mode:
|
|
|
|
filename_section = self.gen_fio_filename_conf(subsystems, threads)
|
|
|
|
else:
|
|
|
|
filename_section = self.gen_fio_filename_conf(threads)
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
fio_config = fio_conf_template.format(ioengine=ioengine, spdk_conf=spdk_conf,
|
|
|
|
rw=rw, rwmixread=rwmixread, block_size=block_size,
|
|
|
|
io_depth=io_depth, ramp_time=ramp_time, run_time=run_time)
|
|
|
|
if num_jobs:
|
2020-02-24 09:34:22 +00:00
|
|
|
fio_config = fio_config + "numjobs=%s \n" % num_jobs
|
|
|
|
if self.cpus_allowed is not None:
|
|
|
|
fio_config = fio_config + "cpus_allowed=%s \n" % self.cpus_allowed
|
2018-10-14 19:51:14 +00:00
|
|
|
fio_config = fio_config + filename_section
|
|
|
|
|
|
|
|
fio_config_filename = "%s_%s_%s_m_%s" % (block_size, io_depth, rw, rwmixread)
|
|
|
|
if hasattr(self, "num_cores"):
|
|
|
|
fio_config_filename += "_%sCPU" % self.num_cores
|
|
|
|
fio_config_filename += ".fio"
|
|
|
|
|
2019-04-03 11:46:12 +00:00
|
|
|
self.remote_call("mkdir -p %s/nvmf_perf" % self.spdk_dir)
|
|
|
|
self.remote_call("echo '%s' > %s/nvmf_perf/%s" % (fio_config, self.spdk_dir, fio_config_filename))
|
2018-10-14 19:51:14 +00:00
|
|
|
self.log_print("Created FIO Config:")
|
|
|
|
self.log_print(fio_config)
|
|
|
|
|
|
|
|
return os.path.join(self.spdk_dir, "nvmf_perf", fio_config_filename)
|
|
|
|
|
2020-03-03 13:38:56 +00:00
|
|
|
def set_cpu_frequency(self):
|
|
|
|
if self.cpu_frequency is not None:
|
|
|
|
try:
|
|
|
|
self.remote_call('sudo cpupower frequency-set -g userspace')
|
|
|
|
self.remote_call('sudo cpupower frequency-set -f %s' % self.cpu_frequency)
|
|
|
|
except Exception:
|
|
|
|
self.log_print("ERROR: cpu_frequency will not work when intel_pstate is enabled!")
|
|
|
|
sys.exit()
|
|
|
|
else:
|
|
|
|
self.log_print("WARNING: you have disabled intel_pstate and using default cpu governance.")
|
|
|
|
|
2018-10-14 19:51:14 +00:00
|
|
|
def run_fio(self, fio_config_file, run_num=None):
|
|
|
|
job_name, _ = os.path.splitext(fio_config_file)
|
|
|
|
self.log_print("Starting FIO run for job: %s" % job_name)
|
2019-12-06 09:19:42 +00:00
|
|
|
self.log_print("Using FIO: %s" % self.fio_bin)
|
2020-03-03 13:38:56 +00:00
|
|
|
|
2018-10-14 19:51:14 +00:00
|
|
|
if run_num:
|
|
|
|
for i in range(1, run_num + 1):
|
|
|
|
output_filename = job_name + "_run_" + str(i) + "_" + self.name + ".json"
|
2019-12-06 09:19:42 +00:00
|
|
|
cmd = "sudo %s %s --output-format=json --output=%s" % (self.fio_bin, fio_config_file, output_filename)
|
2019-04-03 11:46:12 +00:00
|
|
|
output, error = self.remote_call(cmd)
|
2018-10-14 19:51:14 +00:00
|
|
|
self.log_print(output)
|
|
|
|
self.log_print(error)
|
|
|
|
else:
|
|
|
|
output_filename = job_name + "_" + self.name + ".json"
|
2019-12-06 09:19:42 +00:00
|
|
|
cmd = "sudo %s %s --output-format=json --output=%s" % (self.fio_bin, fio_config_file, output_filename)
|
2019-04-03 11:46:12 +00:00
|
|
|
output, error = self.remote_call(cmd)
|
2018-10-14 19:51:14 +00:00
|
|
|
self.log_print(output)
|
|
|
|
self.log_print(error)
|
|
|
|
self.log_print("FIO run finished. Results in: %s" % output_filename)
|
|
|
|
|
|
|
|
|
|
|
|
class KernelTarget(Target):
|
2019-12-06 09:19:42 +00:00
|
|
|
def __init__(self, name, username, password, mode, nic_ips, transport="rdma",
|
2020-01-10 12:40:16 +00:00
|
|
|
use_null_block=False, sar_settings=None, pcm_settings=None,
|
2019-12-06 09:19:42 +00:00
|
|
|
nvmet_bin="nvmetcli", **kwargs):
|
2018-10-14 19:51:14 +00:00
|
|
|
|
2019-12-06 09:19:42 +00:00
|
|
|
super(KernelTarget, self).__init__(name, username, password, mode, nic_ips, transport,
|
2020-01-10 12:40:16 +00:00
|
|
|
use_null_block, sar_settings, pcm_settings)
|
2019-12-06 09:19:42 +00:00
|
|
|
self.nvmet_bin = nvmet_bin
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
def __del__(self):
|
|
|
|
nvmet_command(self.nvmet_bin, "clear")
|
|
|
|
|
|
|
|
def kernel_tgt_gen_nullblock_conf(self, address):
|
|
|
|
nvmet_cfg = {
|
|
|
|
"ports": [],
|
|
|
|
"hosts": [],
|
|
|
|
"subsystems": [],
|
|
|
|
}
|
|
|
|
|
|
|
|
nvmet_cfg["subsystems"].append({
|
|
|
|
"allowed_hosts": [],
|
|
|
|
"attr": {
|
|
|
|
"allow_any_host": "1",
|
|
|
|
"version": "1.3"
|
|
|
|
},
|
|
|
|
"namespaces": [
|
|
|
|
{
|
|
|
|
"device": {
|
|
|
|
"path": "/dev/nullb0",
|
|
|
|
"uuid": "%s" % uuid.uuid4()
|
|
|
|
},
|
|
|
|
"enable": 1,
|
|
|
|
"nsid": 1
|
|
|
|
}
|
|
|
|
],
|
|
|
|
"nqn": "nqn.2018-09.io.spdk:cnode1"
|
|
|
|
})
|
|
|
|
|
|
|
|
nvmet_cfg["ports"].append({
|
|
|
|
"addr": {
|
|
|
|
"adrfam": "ipv4",
|
|
|
|
"traddr": address,
|
|
|
|
"trsvcid": "4420",
|
2019-04-02 10:08:35 +00:00
|
|
|
"trtype": "%s" % self.transport,
|
2018-10-14 19:51:14 +00:00
|
|
|
},
|
|
|
|
"portid": 1,
|
|
|
|
"referrals": [],
|
|
|
|
"subsystems": ["nqn.2018-09.io.spdk:cnode1"]
|
|
|
|
})
|
|
|
|
with open("kernel.conf", 'w') as fh:
|
|
|
|
fh.write(json.dumps(nvmet_cfg, indent=2))
|
|
|
|
|
|
|
|
def kernel_tgt_gen_subsystem_conf(self, nvme_list, address_list):
|
|
|
|
|
|
|
|
nvmet_cfg = {
|
|
|
|
"ports": [],
|
|
|
|
"hosts": [],
|
|
|
|
"subsystems": [],
|
|
|
|
}
|
|
|
|
|
|
|
|
# Split disks between NIC IP's
|
|
|
|
disks_per_ip = int(len(nvme_list) / len(address_list))
|
|
|
|
disk_chunks = [nvme_list[i * disks_per_ip:disks_per_ip + disks_per_ip * i] for i in range(0, len(address_list))]
|
|
|
|
|
|
|
|
subsys_no = 1
|
|
|
|
port_no = 0
|
|
|
|
for ip, chunk in zip(address_list, disk_chunks):
|
|
|
|
for disk in chunk:
|
|
|
|
nvmet_cfg["subsystems"].append({
|
|
|
|
"allowed_hosts": [],
|
|
|
|
"attr": {
|
|
|
|
"allow_any_host": "1",
|
|
|
|
"version": "1.3"
|
|
|
|
},
|
|
|
|
"namespaces": [
|
|
|
|
{
|
|
|
|
"device": {
|
|
|
|
"path": disk,
|
|
|
|
"uuid": "%s" % uuid.uuid4()
|
|
|
|
},
|
|
|
|
"enable": 1,
|
|
|
|
"nsid": subsys_no
|
|
|
|
}
|
|
|
|
],
|
|
|
|
"nqn": "nqn.2018-09.io.spdk:cnode%s" % subsys_no
|
|
|
|
})
|
|
|
|
|
|
|
|
nvmet_cfg["ports"].append({
|
|
|
|
"addr": {
|
|
|
|
"adrfam": "ipv4",
|
|
|
|
"traddr": ip,
|
|
|
|
"trsvcid": "%s" % (4420 + port_no),
|
2019-04-02 10:08:35 +00:00
|
|
|
"trtype": "%s" % self.transport
|
2018-10-14 19:51:14 +00:00
|
|
|
},
|
|
|
|
"portid": subsys_no,
|
|
|
|
"referrals": [],
|
|
|
|
"subsystems": ["nqn.2018-09.io.spdk:cnode%s" % subsys_no]
|
|
|
|
})
|
|
|
|
subsys_no += 1
|
|
|
|
port_no += 1
|
|
|
|
|
|
|
|
with open("kernel.conf", "w") as fh:
|
|
|
|
fh.write(json.dumps(nvmet_cfg, indent=2))
|
|
|
|
pass
|
|
|
|
|
|
|
|
def tgt_start(self):
|
|
|
|
self.log_print("Configuring kernel NVMeOF Target")
|
|
|
|
|
|
|
|
if self.null_block:
|
|
|
|
print("Configuring with null block device.")
|
2019-04-02 10:08:35 +00:00
|
|
|
if len(self.nic_ips) > 1:
|
2018-10-14 19:51:14 +00:00
|
|
|
print("Testing with null block limited to single RDMA NIC.")
|
|
|
|
print("Please specify only 1 IP address.")
|
|
|
|
exit(1)
|
|
|
|
self.subsys_no = 1
|
2019-04-02 10:08:35 +00:00
|
|
|
self.kernel_tgt_gen_nullblock_conf(self.nic_ips[0])
|
2018-10-14 19:51:14 +00:00
|
|
|
else:
|
|
|
|
print("Configuring with NVMe drives.")
|
|
|
|
nvme_list = get_nvme_devices()
|
2019-04-02 10:08:35 +00:00
|
|
|
self.kernel_tgt_gen_subsystem_conf(nvme_list, self.nic_ips)
|
2018-10-14 19:51:14 +00:00
|
|
|
self.subsys_no = len(nvme_list)
|
|
|
|
|
|
|
|
nvmet_command(self.nvmet_bin, "clear")
|
|
|
|
nvmet_command(self.nvmet_bin, "restore kernel.conf")
|
|
|
|
self.log_print("Done configuring kernel NVMeOF Target")
|
|
|
|
|
|
|
|
|
|
|
|
class SPDKTarget(Target):
|
2019-12-06 09:19:42 +00:00
|
|
|
|
|
|
|
def __init__(self, name, username, password, mode, nic_ips, transport="rdma",
|
2020-01-10 12:40:16 +00:00
|
|
|
use_null_block=False, sar_settings=None, pcm_settings=None,
|
2019-12-06 09:19:42 +00:00
|
|
|
num_shared_buffers=4096, num_cores=1, **kwargs):
|
|
|
|
|
|
|
|
super(SPDKTarget, self).__init__(name, username, password, mode, nic_ips, transport,
|
2020-01-10 12:40:16 +00:00
|
|
|
use_null_block, sar_settings, pcm_settings)
|
2018-10-14 19:51:14 +00:00
|
|
|
self.num_cores = num_cores
|
|
|
|
self.num_shared_buffers = num_shared_buffers
|
|
|
|
|
|
|
|
def spdk_tgt_configure(self):
|
|
|
|
self.log_print("Configuring SPDK NVMeOF target via RPC")
|
|
|
|
numa_list = get_used_numa_nodes()
|
|
|
|
|
|
|
|
# Create RDMA transport layer
|
2019-04-02 10:08:35 +00:00
|
|
|
rpc.nvmf.nvmf_create_transport(self.client, trtype=self.transport, num_shared_buffers=self.num_shared_buffers)
|
2018-10-14 19:51:14 +00:00
|
|
|
self.log_print("SPDK NVMeOF transport layer:")
|
2019-09-23 10:42:27 +00:00
|
|
|
rpc.client.print_dict(rpc.nvmf.nvmf_get_transports(self.client))
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
if self.null_block:
|
|
|
|
nvme_section = self.spdk_tgt_add_nullblock()
|
2019-04-02 10:08:35 +00:00
|
|
|
subsystems_section = self.spdk_tgt_add_subsystem_conf(self.nic_ips, req_num_disks=1)
|
2018-10-14 19:51:14 +00:00
|
|
|
else:
|
|
|
|
nvme_section = self.spdk_tgt_add_nvme_conf()
|
2019-04-02 10:08:35 +00:00
|
|
|
subsystems_section = self.spdk_tgt_add_subsystem_conf(self.nic_ips)
|
2018-10-14 19:51:14 +00:00
|
|
|
self.log_print("Done configuring SPDK NVMeOF Target")
|
|
|
|
|
|
|
|
def spdk_tgt_add_nullblock(self):
|
|
|
|
self.log_print("Adding null block bdev to config via RPC")
|
2019-08-20 13:06:22 +00:00
|
|
|
rpc.bdev.bdev_null_create(self.client, 102400, 4096, "Nvme0n1")
|
2018-10-14 19:51:14 +00:00
|
|
|
self.log_print("SPDK Bdevs configuration:")
|
2019-09-11 09:29:55 +00:00
|
|
|
rpc.client.print_dict(rpc.bdev.bdev_get_bdevs(self.client))
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
def spdk_tgt_add_nvme_conf(self, req_num_disks=None):
|
|
|
|
self.log_print("Adding NVMe bdevs to config via RPC")
|
|
|
|
|
|
|
|
bdfs = get_nvme_devices_bdf()
|
|
|
|
bdfs = [b.replace(":", ".") for b in bdfs]
|
|
|
|
|
|
|
|
if req_num_disks:
|
|
|
|
if req_num_disks > len(bdfs):
|
|
|
|
self.log_print("ERROR: Requested number of disks is more than available %s" % len(bdfs))
|
|
|
|
sys.exit(1)
|
|
|
|
else:
|
|
|
|
bdfs = bdfs[0:req_num_disks]
|
|
|
|
|
|
|
|
for i, bdf in enumerate(bdfs):
|
2019-08-23 13:50:51 +00:00
|
|
|
rpc.bdev.bdev_nvme_attach_controller(self.client, name="Nvme%s" % i, trtype="PCIe", traddr=bdf)
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
self.log_print("SPDK Bdevs configuration:")
|
2019-09-11 09:29:55 +00:00
|
|
|
rpc.client.print_dict(rpc.bdev.bdev_get_bdevs(self.client))
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
def spdk_tgt_add_subsystem_conf(self, ips=None, req_num_disks=None):
|
|
|
|
self.log_print("Adding subsystems to config")
|
|
|
|
if not req_num_disks:
|
|
|
|
req_num_disks = get_nvme_devices_count()
|
|
|
|
|
|
|
|
# Distribute bdevs between provided NICs
|
|
|
|
num_disks = range(1, req_num_disks + 1)
|
|
|
|
disks_per_ip = int(len(num_disks) / len(ips))
|
|
|
|
disk_chunks = [num_disks[i * disks_per_ip:disks_per_ip + disks_per_ip * i] for i in range(0, len(ips))]
|
|
|
|
|
|
|
|
# Create subsystems, add bdevs to namespaces, add listeners
|
|
|
|
for ip, chunk in zip(ips, disk_chunks):
|
|
|
|
for c in chunk:
|
|
|
|
nqn = "nqn.2018-09.io.spdk:cnode%s" % c
|
|
|
|
serial = "SPDK00%s" % c
|
|
|
|
bdev_name = "Nvme%sn1" % (c - 1)
|
2019-09-20 10:22:44 +00:00
|
|
|
rpc.nvmf.nvmf_create_subsystem(self.client, nqn, serial,
|
2018-10-14 19:51:14 +00:00
|
|
|
allow_any_host=True, max_namespaces=8)
|
|
|
|
rpc.nvmf.nvmf_subsystem_add_ns(self.client, nqn, bdev_name)
|
|
|
|
|
|
|
|
rpc.nvmf.nvmf_subsystem_add_listener(self.client, nqn,
|
2019-04-02 10:08:35 +00:00
|
|
|
trtype=self.transport,
|
2018-10-14 19:51:14 +00:00
|
|
|
traddr=ip,
|
|
|
|
trsvcid="4420",
|
|
|
|
adrfam="ipv4")
|
|
|
|
|
|
|
|
self.log_print("SPDK NVMeOF subsystem configuration:")
|
2019-09-20 09:35:36 +00:00
|
|
|
rpc.client.print_dict(rpc.nvmf.nvmf_get_subsystems(self.client))
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
def tgt_start(self):
|
|
|
|
self.subsys_no = get_nvme_devices_count()
|
|
|
|
self.log_print("Starting SPDK NVMeOF Target process")
|
|
|
|
nvmf_app_path = os.path.join(self.spdk_dir, "app/nvmf_tgt/nvmf_tgt")
|
|
|
|
command = " ".join([nvmf_app_path, "-m", self.num_cores])
|
|
|
|
proc = subprocess.Popen(command, shell=True)
|
|
|
|
self.pid = os.path.join(self.spdk_dir, "nvmf.pid")
|
|
|
|
|
|
|
|
with open(self.pid, "w") as fh:
|
|
|
|
fh.write(str(proc.pid))
|
|
|
|
self.nvmf_proc = proc
|
|
|
|
self.log_print("SPDK NVMeOF Target PID=%s" % self.pid)
|
|
|
|
self.log_print("Waiting for spdk to initilize...")
|
|
|
|
while True:
|
|
|
|
if os.path.exists("/var/tmp/spdk.sock"):
|
|
|
|
break
|
|
|
|
time.sleep(1)
|
|
|
|
self.client = rpc.client.JSONRPCClient("/var/tmp/spdk.sock")
|
|
|
|
|
|
|
|
self.spdk_tgt_configure()
|
|
|
|
|
|
|
|
def __del__(self):
|
|
|
|
if hasattr(self, "nvmf_proc"):
|
|
|
|
try:
|
|
|
|
self.nvmf_proc.terminate()
|
|
|
|
self.nvmf_proc.wait()
|
|
|
|
except Exception as e:
|
|
|
|
self.log_print(e)
|
|
|
|
self.nvmf_proc.kill()
|
|
|
|
self.nvmf_proc.communicate()
|
|
|
|
|
|
|
|
|
|
|
|
class KernelInitiator(Initiator):
|
2019-12-06 09:19:42 +00:00
|
|
|
def __init__(self, name, username, password, mode, nic_ips, ip, transport,
|
2020-02-24 09:34:22 +00:00
|
|
|
cpus_allowed=None, fio_bin="/usr/src/fio/fio", **kwargs):
|
2019-12-06 09:19:42 +00:00
|
|
|
|
|
|
|
super(KernelInitiator, self).__init__(name, username, password, mode, nic_ips, ip, transport,
|
2020-02-24 09:34:22 +00:00
|
|
|
cpus_allowed=cpus_allowed, fio_bin=fio_bin)
|
2018-10-14 19:51:14 +00:00
|
|
|
|
2019-12-12 10:18:36 +00:00
|
|
|
self.extra_params = ""
|
|
|
|
if kwargs["extra_params"]:
|
|
|
|
self.extra_params = kwargs["extra_params"]
|
|
|
|
|
2018-10-14 19:51:14 +00:00
|
|
|
def __del__(self):
|
|
|
|
self.ssh_connection.close()
|
|
|
|
|
|
|
|
def kernel_init_connect(self, address_list, subsys_no):
|
|
|
|
subsystems = self.discover_subsystems(address_list, subsys_no)
|
|
|
|
self.log_print("Below connection attempts may result in error messages, this is expected!")
|
|
|
|
for subsystem in subsystems:
|
|
|
|
self.log_print("Trying to connect %s %s %s" % subsystem)
|
2019-12-12 10:18:36 +00:00
|
|
|
self.remote_call("sudo %s connect -t %s -s %s -n %s -a %s %s" % (self.nvmecli_bin,
|
|
|
|
self.transport,
|
|
|
|
*subsystem,
|
|
|
|
self.extra_params))
|
2018-10-14 19:51:14 +00:00
|
|
|
time.sleep(2)
|
|
|
|
|
|
|
|
def kernel_init_disconnect(self, address_list, subsys_no):
|
|
|
|
subsystems = self.discover_subsystems(address_list, subsys_no)
|
|
|
|
for subsystem in subsystems:
|
2019-04-03 11:46:12 +00:00
|
|
|
self.remote_call("sudo %s disconnect -n %s" % (self.nvmecli_bin, subsystem[1]))
|
2018-10-14 19:51:14 +00:00
|
|
|
time.sleep(1)
|
|
|
|
|
2020-02-24 09:34:22 +00:00
|
|
|
def gen_fio_filename_conf(self, threads):
|
2019-04-03 11:46:12 +00:00
|
|
|
out, err = self.remote_call("lsblk -o NAME -nlp")
|
2018-10-14 19:51:14 +00:00
|
|
|
nvme_list = [x for x in out.split("\n") if "nvme" in x]
|
|
|
|
|
|
|
|
filename_section = ""
|
|
|
|
for i, nvme in enumerate(nvme_list):
|
|
|
|
filename_section = "\n".join([filename_section,
|
|
|
|
"[filename%s]" % i,
|
|
|
|
"filename=%s" % nvme])
|
|
|
|
|
|
|
|
return filename_section
|
|
|
|
|
|
|
|
|
|
|
|
class SPDKInitiator(Initiator):
|
2019-12-06 09:19:42 +00:00
|
|
|
def __init__(self, name, username, password, mode, nic_ips, ip, transport="rdma",
|
2020-02-24 09:34:22 +00:00
|
|
|
num_cores=1, cpus_allowed=None, fio_bin="/usr/src/fio/fio", **kwargs):
|
2019-12-06 09:19:42 +00:00
|
|
|
super(SPDKInitiator, self).__init__(name, username, password, mode, nic_ips, ip, transport,
|
2020-02-24 09:34:22 +00:00
|
|
|
cpus_allowed=cpus_allowed, fio_bin=fio_bin)
|
2019-12-06 09:19:42 +00:00
|
|
|
|
|
|
|
self.num_cores = num_cores
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
def install_spdk(self, local_spdk_zip):
|
|
|
|
self.put_file(local_spdk_zip, "/tmp/spdk_drop.zip")
|
|
|
|
self.log_print("Copied sources zip from target")
|
2019-04-03 11:46:12 +00:00
|
|
|
self.remote_call("unzip -qo /tmp/spdk_drop.zip -d %s" % self.spdk_dir)
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
self.log_print("Sources unpacked")
|
2019-12-06 09:19:42 +00:00
|
|
|
self.log_print("Using fio binary %s" % self.fio_bin)
|
2019-09-09 20:05:37 +00:00
|
|
|
self.remote_call("cd %s; git submodule update --init; ./configure --with-rdma --with-fio=%s;"
|
2019-12-06 09:19:42 +00:00
|
|
|
"make clean; make -j$(($(nproc)*2))" % (self.spdk_dir, os.path.dirname(self.fio_bin)))
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
self.log_print("SPDK built")
|
2019-04-03 11:46:12 +00:00
|
|
|
self.remote_call("sudo %s/scripts/setup.sh" % self.spdk_dir)
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
def gen_spdk_bdev_conf(self, remote_subsystem_list):
|
|
|
|
header = "[Nvme]"
|
2019-04-02 10:08:35 +00:00
|
|
|
row_template = """ TransportId "trtype:{transport} adrfam:IPv4 traddr:{ip} trsvcid:{svc} subnqn:{nqn}" Nvme{i}"""
|
2018-10-14 19:51:14 +00:00
|
|
|
|
2019-04-02 10:08:35 +00:00
|
|
|
bdev_rows = [row_template.format(transport=self.transport,
|
|
|
|
svc=x[0],
|
2018-10-14 19:51:14 +00:00
|
|
|
nqn=x[1],
|
|
|
|
ip=x[2],
|
|
|
|
i=i) for i, x in enumerate(remote_subsystem_list)]
|
|
|
|
bdev_rows = "\n".join(bdev_rows)
|
|
|
|
bdev_section = "\n".join([header, bdev_rows])
|
|
|
|
return bdev_section
|
|
|
|
|
2020-02-24 09:34:22 +00:00
|
|
|
def gen_fio_filename_conf(self, subsystems, threads):
|
2018-10-14 19:51:14 +00:00
|
|
|
filename_section = ""
|
2020-02-24 09:34:22 +00:00
|
|
|
filenames = ["Nvme%sn1" % x for x in range(0, subsystems)]
|
|
|
|
nvme_per_split = int(subsystems / threads)
|
|
|
|
remainder = subsystems % threads
|
|
|
|
iterator = iter(filenames)
|
|
|
|
result = []
|
|
|
|
for i in range(threads):
|
|
|
|
result.append([])
|
|
|
|
for j in range(nvme_per_split):
|
|
|
|
result[i].append(next(iterator))
|
|
|
|
if remainder:
|
|
|
|
result[i].append(next(iterator))
|
|
|
|
remainder -= 1
|
|
|
|
for i, r in enumerate(result):
|
|
|
|
header = "[filename%s]" % i
|
|
|
|
disks = "\n".join(["filename=%s" % x for x in r])
|
2018-10-14 19:51:14 +00:00
|
|
|
filename_section = "\n".join([filename_section, header, disks])
|
|
|
|
|
|
|
|
return filename_section
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
spdk_zip_path = "/tmp/spdk.zip"
|
|
|
|
target_results_dir = "/tmp/results"
|
|
|
|
|
2019-05-16 11:56:07 +00:00
|
|
|
if (len(sys.argv) > 1):
|
|
|
|
config_file_path = sys.argv[1]
|
|
|
|
else:
|
|
|
|
script_full_dir = os.path.dirname(os.path.realpath(__file__))
|
|
|
|
config_file_path = os.path.join(script_full_dir, "config.json")
|
|
|
|
|
|
|
|
print("Using config file: %s" % config_file_path)
|
|
|
|
with open(config_file_path, "r") as config:
|
2018-10-14 19:51:14 +00:00
|
|
|
data = json.load(config)
|
|
|
|
|
|
|
|
initiators = []
|
|
|
|
fio_cases = []
|
|
|
|
|
|
|
|
for k, v in data.items():
|
|
|
|
if "target" in k:
|
|
|
|
if data[k]["mode"] == "spdk":
|
2019-04-02 10:08:35 +00:00
|
|
|
target_obj = SPDKTarget(name=k, **data["general"], **v)
|
2018-10-14 19:51:14 +00:00
|
|
|
elif data[k]["mode"] == "kernel":
|
2019-04-02 10:08:35 +00:00
|
|
|
target_obj = KernelTarget(name=k, **data["general"], **v)
|
2018-10-14 19:51:14 +00:00
|
|
|
elif "initiator" in k:
|
|
|
|
if data[k]["mode"] == "spdk":
|
2019-04-02 10:08:35 +00:00
|
|
|
init_obj = SPDKInitiator(name=k, **data["general"], **v)
|
2018-10-14 19:51:14 +00:00
|
|
|
elif data[k]["mode"] == "kernel":
|
2019-04-02 10:08:35 +00:00
|
|
|
init_obj = KernelInitiator(name=k, **data["general"], **v)
|
2018-10-14 19:51:14 +00:00
|
|
|
initiators.append(init_obj)
|
|
|
|
elif "fio" in k:
|
|
|
|
fio_workloads = itertools.product(data[k]["bs"],
|
|
|
|
data[k]["qd"],
|
|
|
|
data[k]["rw"])
|
|
|
|
|
|
|
|
fio_run_time = data[k]["run_time"]
|
|
|
|
fio_ramp_time = data[k]["ramp_time"]
|
|
|
|
fio_rw_mix_read = data[k]["rwmixread"]
|
|
|
|
fio_run_num = data[k]["run_num"] if "run_num" in data[k].keys() else None
|
|
|
|
fio_num_jobs = data[k]["num_jobs"] if "num_jobs" in data[k].keys() else None
|
|
|
|
else:
|
|
|
|
continue
|
|
|
|
|
|
|
|
# Copy and install SPDK on remote initiators
|
|
|
|
target_obj.zip_spdk_sources(target_obj.spdk_dir, spdk_zip_path)
|
|
|
|
threads = []
|
|
|
|
for i in initiators:
|
|
|
|
if i.mode == "spdk":
|
|
|
|
t = threading.Thread(target=i.install_spdk, args=(spdk_zip_path,))
|
|
|
|
threads.append(t)
|
|
|
|
t.start()
|
|
|
|
for t in threads:
|
|
|
|
t.join()
|
|
|
|
|
|
|
|
target_obj.tgt_start()
|
|
|
|
|
|
|
|
# Poor mans threading
|
|
|
|
# Run FIO tests
|
|
|
|
for block_size, io_depth, rw in fio_workloads:
|
|
|
|
threads = []
|
|
|
|
configs = []
|
|
|
|
for i in initiators:
|
|
|
|
if i.mode == "kernel":
|
2019-04-02 10:08:35 +00:00
|
|
|
i.kernel_init_connect(i.nic_ips, target_obj.subsys_no)
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
cfg = i.gen_fio_config(rw, fio_rw_mix_read, block_size, io_depth, target_obj.subsys_no,
|
|
|
|
fio_num_jobs, fio_ramp_time, fio_run_time)
|
|
|
|
configs.append(cfg)
|
|
|
|
|
|
|
|
for i, cfg in zip(initiators, configs):
|
|
|
|
t = threading.Thread(target=i.run_fio, args=(cfg, fio_run_num))
|
|
|
|
threads.append(t)
|
|
|
|
if target_obj.enable_sar:
|
|
|
|
sar_file_name = "_".join([str(block_size), str(rw), str(io_depth), "sar"])
|
|
|
|
sar_file_name = ".".join([sar_file_name, "txt"])
|
|
|
|
t = threading.Thread(target=target_obj.measure_sar, args=(target_results_dir, sar_file_name))
|
|
|
|
threads.append(t)
|
|
|
|
|
2020-01-10 12:40:16 +00:00
|
|
|
if target_obj.enable_pcm:
|
|
|
|
pcm_file_name = "_".join(["pcm_cpu", str(block_size), str(rw), str(io_depth)])
|
|
|
|
pcm_file_name = ".".join([pcm_file_name, "csv"])
|
|
|
|
t = threading.Thread(target=target_obj.measure_pcm, args=(target_results_dir, pcm_file_name,))
|
|
|
|
threads.append(t)
|
|
|
|
|
|
|
|
if target_obj.enable_pcm_memory:
|
|
|
|
pcm_file_name = "_".join(["pcm_memory", str(block_size), str(rw), str(io_depth)])
|
|
|
|
pcm_file_name = ".".join([pcm_file_name, "csv"])
|
|
|
|
t = threading.Thread(target=target_obj.measure_pcm_memory, args=(target_results_dir, pcm_file_name,))
|
|
|
|
threads.append(t)
|
|
|
|
|
2018-10-14 19:51:14 +00:00
|
|
|
for t in threads:
|
|
|
|
t.start()
|
|
|
|
for t in threads:
|
|
|
|
t.join()
|
|
|
|
|
|
|
|
for i in initiators:
|
|
|
|
if i.mode == "kernel":
|
2019-04-02 10:08:35 +00:00
|
|
|
i.kernel_init_disconnect(i.nic_ips, target_obj.subsys_no)
|
2018-10-14 19:51:14 +00:00
|
|
|
i.copy_result_files(target_results_dir)
|
|
|
|
|
|
|
|
target_obj.parse_results(target_results_dir)
|