2018-10-14 19:51:14 +00:00
|
|
|
#!/usr/bin/env python3
|
2022-11-02 15:19:59 +00:00
|
|
|
# SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
# Copyright (C) 2018 Intel Corporation
|
|
|
|
# All rights reserved.
|
|
|
|
#
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
import os
|
|
|
|
import re
|
|
|
|
import sys
|
2021-07-02 07:27:05 +00:00
|
|
|
import argparse
|
2018-10-14 19:51:14 +00:00
|
|
|
import json
|
2022-09-27 14:44:20 +00:00
|
|
|
import logging
|
2018-10-14 19:51:14 +00:00
|
|
|
import zipfile
|
|
|
|
import threading
|
|
|
|
import subprocess
|
|
|
|
import itertools
|
2021-02-04 11:47:39 +00:00
|
|
|
import configparser
|
2018-10-14 19:51:14 +00:00
|
|
|
import time
|
|
|
|
import uuid
|
2021-01-28 13:04:03 +00:00
|
|
|
|
|
|
|
import paramiko
|
|
|
|
import pandas as pd
|
2018-10-14 19:51:14 +00:00
|
|
|
from common import *
|
2022-11-21 09:37:56 +00:00
|
|
|
from subprocess import CalledProcessError
|
2018-10-14 19:51:14 +00:00
|
|
|
|
2021-09-21 13:21:31 +00:00
|
|
|
sys.path.append(os.path.dirname(__file__) + '/../../../python')
|
|
|
|
|
|
|
|
import spdk.rpc as rpc # noqa
|
|
|
|
import spdk.rpc.client as rpc_client # noqa
|
|
|
|
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
class Server:
|
2021-02-01 12:31:52 +00:00
|
|
|
def __init__(self, name, general_config, server_config):
|
2018-10-14 19:51:14 +00:00
|
|
|
self.name = name
|
2021-02-01 12:31:52 +00:00
|
|
|
self.username = general_config["username"]
|
|
|
|
self.password = general_config["password"]
|
|
|
|
self.transport = general_config["transport"].lower()
|
|
|
|
self.nic_ips = server_config["nic_ips"]
|
|
|
|
self.mode = server_config["mode"]
|
2021-02-23 13:07:38 +00:00
|
|
|
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log = logging.getLogger(self.name)
|
|
|
|
|
2021-02-23 13:07:38 +00:00
|
|
|
self.irq_scripts_dir = "/usr/src/local/mlnx-tools/ofed_scripts"
|
|
|
|
if "irq_scripts_dir" in server_config and server_config["irq_scripts_dir"]:
|
|
|
|
self.irq_scripts_dir = server_config["irq_scripts_dir"]
|
|
|
|
|
2021-02-02 11:13:43 +00:00
|
|
|
self.local_nic_info = []
|
2021-01-28 15:26:04 +00:00
|
|
|
self._nics_json_obj = {}
|
2021-02-04 11:47:39 +00:00
|
|
|
self.svc_restore_dict = {}
|
2021-02-04 11:49:51 +00:00
|
|
|
self.sysctl_restore_dict = {}
|
2021-02-04 15:52:17 +00:00
|
|
|
self.tuned_restore_dict = {}
|
2021-02-04 16:26:17 +00:00
|
|
|
self.governor_restore = ""
|
2021-02-04 15:52:17 +00:00
|
|
|
self.tuned_profile = ""
|
2018-10-14 19:51:14 +00:00
|
|
|
|
2021-02-02 15:05:51 +00:00
|
|
|
self.enable_adq = False
|
|
|
|
self.adq_priority = None
|
|
|
|
if "adq_enable" in server_config and server_config["adq_enable"]:
|
|
|
|
self.enable_adq = server_config["adq_enable"]
|
|
|
|
self.adq_priority = 1
|
|
|
|
|
2021-02-04 15:52:17 +00:00
|
|
|
if "tuned_profile" in server_config:
|
|
|
|
self.tuned_profile = server_config["tuned_profile"]
|
|
|
|
|
2018-10-14 19:51:14 +00:00
|
|
|
if not re.match("^[A-Za-z0-9]*$", name):
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Please use a name which contains only letters or numbers")
|
2018-10-14 19:51:14 +00:00
|
|
|
sys.exit(1)
|
|
|
|
|
2022-04-03 06:13:29 +00:00
|
|
|
@staticmethod
|
|
|
|
def get_uncommented_lines(lines):
|
2021-02-11 11:52:32 +00:00
|
|
|
return [line for line in lines if line and not line.startswith('#')]
|
2020-11-25 09:36:11 +00:00
|
|
|
|
2021-01-28 15:26:04 +00:00
|
|
|
def get_nic_name_by_ip(self, ip):
|
|
|
|
if not self._nics_json_obj:
|
|
|
|
nics_json_obj = self.exec_cmd(["ip", "-j", "address", "show"])
|
|
|
|
self._nics_json_obj = list(filter(lambda x: x["addr_info"], json.loads(nics_json_obj)))
|
|
|
|
for nic in self._nics_json_obj:
|
|
|
|
for addr in nic["addr_info"]:
|
|
|
|
if ip in addr["local"]:
|
|
|
|
return nic["ifname"]
|
|
|
|
|
2021-02-02 11:13:43 +00:00
|
|
|
def set_local_nic_info_helper(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def set_local_nic_info(self, pci_info):
|
|
|
|
def extract_network_elements(json_obj):
|
|
|
|
nic_list = []
|
|
|
|
if isinstance(json_obj, list):
|
|
|
|
for x in json_obj:
|
|
|
|
nic_list.extend(extract_network_elements(x))
|
|
|
|
elif isinstance(json_obj, dict):
|
|
|
|
if "children" in json_obj:
|
|
|
|
nic_list.extend(extract_network_elements(json_obj["children"]))
|
|
|
|
if "class" in json_obj.keys() and "network" in json_obj["class"]:
|
|
|
|
nic_list.append(json_obj)
|
|
|
|
return nic_list
|
|
|
|
|
|
|
|
self.local_nic_info = extract_network_elements(pci_info)
|
|
|
|
|
2022-06-30 12:16:47 +00:00
|
|
|
def get_nic_numa_node(self, nic_name):
|
|
|
|
return int(self.exec_cmd(["cat", "/sys/class/net/%s/device/numa_node" % nic_name]))
|
|
|
|
|
2022-06-30 16:45:58 +00:00
|
|
|
def get_numa_cpu_map(self):
|
|
|
|
numa_cpu_json_obj = json.loads(self.exec_cmd(["lscpu", "-b", "-e=NODE,CPU", "-J"]))
|
|
|
|
numa_cpu_json_map = {}
|
|
|
|
|
|
|
|
for cpu in numa_cpu_json_obj["cpus"]:
|
|
|
|
cpu_num = int(cpu["cpu"])
|
|
|
|
numa_node = int(cpu["node"])
|
|
|
|
numa_cpu_json_map.setdefault(numa_node, [])
|
|
|
|
numa_cpu_json_map[numa_node].append(cpu_num)
|
|
|
|
|
|
|
|
return numa_cpu_json_map
|
|
|
|
|
2022-04-03 06:13:29 +00:00
|
|
|
# pylint: disable=R0201
|
2021-02-23 13:07:38 +00:00
|
|
|
def exec_cmd(self, cmd, stderr_redirect=False, change_dir=None):
|
2021-02-03 12:08:15 +00:00
|
|
|
return ""
|
|
|
|
|
2021-02-04 11:47:39 +00:00
|
|
|
def configure_system(self):
|
2022-07-04 11:49:08 +00:00
|
|
|
self.load_drivers()
|
2021-02-04 11:47:39 +00:00
|
|
|
self.configure_services()
|
2021-02-04 11:49:51 +00:00
|
|
|
self.configure_sysctl()
|
2021-02-04 15:52:17 +00:00
|
|
|
self.configure_tuned()
|
2021-02-04 16:26:17 +00:00
|
|
|
self.configure_cpu_governor()
|
2021-02-23 13:07:38 +00:00
|
|
|
self.configure_irq_affinity()
|
2021-02-04 11:47:39 +00:00
|
|
|
|
2022-07-04 11:49:08 +00:00
|
|
|
def load_drivers(self):
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Loading drivers")
|
2022-07-04 11:49:08 +00:00
|
|
|
self.exec_cmd(["sudo", "modprobe", "-a",
|
|
|
|
"nvme-%s" % self.transport,
|
|
|
|
"nvmet-%s" % self.transport])
|
|
|
|
if self.mode == "kernel" and hasattr(self, "null_block") and self.null_block:
|
|
|
|
self.exec_cmd(["sudo", "modprobe", "null_blk",
|
|
|
|
"nr_devices=%s" % self.null_block])
|
|
|
|
|
2021-02-03 12:33:10 +00:00
|
|
|
def configure_adq(self):
|
2021-02-25 13:58:23 +00:00
|
|
|
if self.mode == "kernel":
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.warning("WARNING: ADQ setup not yet supported for Kernel mode. Skipping configuration.")
|
2021-02-25 13:58:23 +00:00
|
|
|
return
|
2021-02-03 12:33:10 +00:00
|
|
|
self.adq_load_modules()
|
2021-02-03 12:58:53 +00:00
|
|
|
self.adq_configure_nic()
|
2021-02-03 12:33:10 +00:00
|
|
|
|
|
|
|
def adq_load_modules(self):
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Modprobing ADQ-related Linux modules...")
|
2021-02-03 12:33:10 +00:00
|
|
|
adq_module_deps = ["sch_mqprio", "act_mirred", "cls_flower"]
|
|
|
|
for module in adq_module_deps:
|
|
|
|
try:
|
|
|
|
self.exec_cmd(["sudo", "modprobe", module])
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("%s loaded!" % module)
|
2021-02-03 12:33:10 +00:00
|
|
|
except CalledProcessError as e:
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.error("ERROR: failed to load module %s" % module)
|
|
|
|
self.log.error("%s resulted in error: %s" % (e.cmd, e.output))
|
2021-02-03 12:33:10 +00:00
|
|
|
|
2021-02-22 14:00:04 +00:00
|
|
|
def adq_configure_tc(self):
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Configuring ADQ Traffic classes and filters...")
|
2021-02-25 13:58:23 +00:00
|
|
|
|
|
|
|
if self.mode == "kernel":
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.warning("WARNING: ADQ setup not yet supported for Kernel mode. Skipping configuration.")
|
2021-02-25 13:58:23 +00:00
|
|
|
return
|
|
|
|
|
2021-02-22 14:00:04 +00:00
|
|
|
num_queues_tc0 = 2 # 2 is minimum number of queues for TC0
|
|
|
|
num_queues_tc1 = self.num_cores
|
|
|
|
port_param = "dst_port" if isinstance(self, Target) else "src_port"
|
2021-10-04 12:36:31 +00:00
|
|
|
port = "4420"
|
2021-02-22 14:00:04 +00:00
|
|
|
xps_script_path = os.path.join(self.spdk_dir, "scripts", "perf", "nvmf", "set_xps_rxqs")
|
|
|
|
|
|
|
|
for nic_ip in self.nic_ips:
|
|
|
|
nic_name = self.get_nic_name_by_ip(nic_ip)
|
|
|
|
tc_qdisc_map_cmd = ["sudo", "tc", "qdisc", "add", "dev", nic_name,
|
|
|
|
"root", "mqprio", "num_tc", "2", "map", "0", "1",
|
|
|
|
"queues", "%s@0" % num_queues_tc0,
|
|
|
|
"%s@%s" % (num_queues_tc1, num_queues_tc0),
|
|
|
|
"hw", "1", "mode", "channel"]
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info(" ".join(tc_qdisc_map_cmd))
|
2021-02-22 14:00:04 +00:00
|
|
|
self.exec_cmd(tc_qdisc_map_cmd)
|
|
|
|
|
2021-09-30 17:13:38 +00:00
|
|
|
time.sleep(5)
|
2021-02-22 14:00:04 +00:00
|
|
|
tc_qdisc_ingress_cmd = ["sudo", "tc", "qdisc", "add", "dev", nic_name, "ingress"]
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info(" ".join(tc_qdisc_ingress_cmd))
|
2021-02-22 14:00:04 +00:00
|
|
|
self.exec_cmd(tc_qdisc_ingress_cmd)
|
|
|
|
|
2021-10-04 12:36:31 +00:00
|
|
|
tc_filter_cmd = ["sudo", "tc", "filter", "add", "dev", nic_name,
|
|
|
|
"protocol", "ip", "ingress", "prio", "1", "flower",
|
|
|
|
"dst_ip", "%s/32" % nic_ip, "ip_proto", "tcp", port_param, port,
|
|
|
|
"skip_sw", "hw_tc", "1"]
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info(" ".join(tc_filter_cmd))
|
2021-10-04 12:36:31 +00:00
|
|
|
self.exec_cmd(tc_filter_cmd)
|
2021-02-22 14:00:04 +00:00
|
|
|
|
2021-09-30 17:13:38 +00:00
|
|
|
# show tc configuration
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Show tc configuration for %s NIC..." % nic_name)
|
2021-09-30 17:13:38 +00:00
|
|
|
tc_disk_out = self.exec_cmd(["sudo", "tc", "qdisc", "show", "dev", nic_name])
|
|
|
|
tc_filter_out = self.exec_cmd(["sudo", "tc", "filter", "show", "dev", nic_name, "ingress"])
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("%s" % tc_disk_out)
|
|
|
|
self.log.info("%s" % tc_filter_out)
|
2021-09-30 17:13:38 +00:00
|
|
|
|
2021-11-25 01:40:59 +00:00
|
|
|
# Ethtool coalesce settings must be applied after configuring traffic classes
|
2021-02-22 14:00:04 +00:00
|
|
|
self.exec_cmd(["sudo", "ethtool", "--coalesce", nic_name, "adaptive-rx", "off", "rx-usecs", "0"])
|
|
|
|
self.exec_cmd(["sudo", "ethtool", "--coalesce", nic_name, "adaptive-tx", "off", "tx-usecs", "500"])
|
|
|
|
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Running set_xps_rxqs script for %s NIC..." % nic_name)
|
2021-02-22 14:00:04 +00:00
|
|
|
xps_cmd = ["sudo", xps_script_path, nic_name]
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info(xps_cmd)
|
2021-02-22 14:00:04 +00:00
|
|
|
self.exec_cmd(xps_cmd)
|
|
|
|
|
2022-05-11 11:00:15 +00:00
|
|
|
def reload_driver(self, driver):
|
|
|
|
try:
|
|
|
|
self.exec_cmd(["sudo", "rmmod", driver])
|
|
|
|
self.exec_cmd(["sudo", "modprobe", driver])
|
|
|
|
except CalledProcessError as e:
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.error("ERROR: failed to reload %s module!" % driver)
|
|
|
|
self.log.error("%s resulted in error: %s" % (e.cmd, e.output))
|
2022-05-11 11:00:15 +00:00
|
|
|
|
2021-02-03 12:58:53 +00:00
|
|
|
def adq_configure_nic(self):
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Configuring NIC port settings for ADQ testing...")
|
2021-02-03 12:58:53 +00:00
|
|
|
|
|
|
|
# Reload the driver first, to make sure any previous settings are re-set.
|
2022-05-11 11:00:15 +00:00
|
|
|
self.reload_driver("ice")
|
2021-02-03 12:58:53 +00:00
|
|
|
|
|
|
|
nic_names = [self.get_nic_name_by_ip(n) for n in self.nic_ips]
|
|
|
|
for nic in nic_names:
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info(nic)
|
2021-02-03 12:58:53 +00:00
|
|
|
try:
|
|
|
|
self.exec_cmd(["sudo", "ethtool", "-K", nic,
|
|
|
|
"hw-tc-offload", "on"]) # Enable hardware TC offload
|
|
|
|
self.exec_cmd(["sudo", "ethtool", "--set-priv-flags", nic,
|
|
|
|
"channel-inline-flow-director", "on"]) # Enable Intel Flow Director
|
|
|
|
self.exec_cmd(["sudo", "ethtool", "--set-priv-flags", nic, "fw-lldp-agent", "off"]) # Disable LLDP
|
2021-10-14 10:37:56 +00:00
|
|
|
# As temporary workaround for ADQ, channel packet inspection optimization is turned on during connection establishment.
|
|
|
|
# Then turned off before fio ramp_up expires in ethtool_after_fio_ramp().
|
2021-02-03 12:58:53 +00:00
|
|
|
self.exec_cmd(["sudo", "ethtool", "--set-priv-flags", nic,
|
2021-10-14 10:37:56 +00:00
|
|
|
"channel-pkt-inspect-optimize", "on"])
|
2021-02-03 12:58:53 +00:00
|
|
|
except CalledProcessError as e:
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.error("ERROR: failed to configure NIC port using ethtool!")
|
|
|
|
self.log.error("%s resulted in error: %s" % (e.cmd, e.output))
|
|
|
|
self.log.info("Please update your NIC driver and firmware versions and try again.")
|
|
|
|
self.log.info(self.exec_cmd(["sudo", "ethtool", "-k", nic]))
|
|
|
|
self.log.info(self.exec_cmd(["sudo", "ethtool", "--show-priv-flags", nic]))
|
2021-02-03 12:58:53 +00:00
|
|
|
|
2021-02-04 11:47:39 +00:00
|
|
|
def configure_services(self):
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Configuring active services...")
|
2021-02-04 11:47:39 +00:00
|
|
|
svc_config = configparser.ConfigParser(strict=False)
|
|
|
|
|
|
|
|
# Below list is valid only for RHEL / Fedora systems and might not
|
|
|
|
# contain valid names for other distributions.
|
|
|
|
svc_target_state = {
|
|
|
|
"firewalld": "inactive",
|
|
|
|
"irqbalance": "inactive",
|
|
|
|
"lldpad.service": "inactive",
|
|
|
|
"lldpad.socket": "inactive"
|
|
|
|
}
|
|
|
|
|
|
|
|
for service in svc_target_state:
|
|
|
|
out = self.exec_cmd(["sudo", "systemctl", "show", "--no-page", service])
|
|
|
|
out = "\n".join(["[%s]" % service, out])
|
|
|
|
svc_config.read_string(out)
|
|
|
|
|
|
|
|
if "LoadError" in svc_config[service] and "not found" in svc_config[service]["LoadError"]:
|
|
|
|
continue
|
|
|
|
|
|
|
|
service_state = svc_config[service]["ActiveState"]
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Current state of %s service is %s" % (service, service_state))
|
2021-02-04 11:47:39 +00:00
|
|
|
self.svc_restore_dict.update({service: service_state})
|
|
|
|
if service_state != "inactive":
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Disabling %s. It will be restored after the test has finished." % service)
|
2021-02-04 11:47:39 +00:00
|
|
|
self.exec_cmd(["sudo", "systemctl", "stop", service])
|
|
|
|
|
2021-02-04 11:49:51 +00:00
|
|
|
def configure_sysctl(self):
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Tuning sysctl settings...")
|
2021-02-04 11:49:51 +00:00
|
|
|
|
2021-04-19 15:33:36 +00:00
|
|
|
busy_read = 0
|
2021-02-04 11:49:51 +00:00
|
|
|
if self.enable_adq and self.mode == "spdk":
|
|
|
|
busy_read = 1
|
|
|
|
|
|
|
|
sysctl_opts = {
|
|
|
|
"net.core.busy_poll": 0,
|
|
|
|
"net.core.busy_read": busy_read,
|
|
|
|
"net.core.somaxconn": 4096,
|
|
|
|
"net.core.netdev_max_backlog": 8192,
|
|
|
|
"net.ipv4.tcp_max_syn_backlog": 16384,
|
|
|
|
"net.core.rmem_max": 268435456,
|
|
|
|
"net.core.wmem_max": 268435456,
|
|
|
|
"net.ipv4.tcp_mem": "268435456 268435456 268435456",
|
|
|
|
"net.ipv4.tcp_rmem": "8192 1048576 33554432",
|
|
|
|
"net.ipv4.tcp_wmem": "8192 1048576 33554432",
|
|
|
|
"net.ipv4.route.flush": 1,
|
|
|
|
"vm.overcommit_memory": 1,
|
|
|
|
}
|
|
|
|
|
|
|
|
for opt, value in sysctl_opts.items():
|
|
|
|
self.sysctl_restore_dict.update({opt: self.exec_cmd(["sysctl", "-n", opt]).strip()})
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info(self.exec_cmd(["sudo", "sysctl", "-w", "%s=%s" % (opt, value)]).strip())
|
2021-02-04 11:49:51 +00:00
|
|
|
|
2021-02-04 15:52:17 +00:00
|
|
|
def configure_tuned(self):
|
|
|
|
if not self.tuned_profile:
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.warning("WARNING: Tuned profile not set in configuration file. Skipping configuration.")
|
2021-02-04 15:52:17 +00:00
|
|
|
return
|
|
|
|
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Configuring tuned-adm profile to %s." % self.tuned_profile)
|
2021-02-04 15:52:17 +00:00
|
|
|
service = "tuned"
|
|
|
|
tuned_config = configparser.ConfigParser(strict=False)
|
|
|
|
|
|
|
|
out = self.exec_cmd(["sudo", "systemctl", "show", "--no-page", service])
|
|
|
|
out = "\n".join(["[%s]" % service, out])
|
|
|
|
tuned_config.read_string(out)
|
|
|
|
tuned_state = tuned_config[service]["ActiveState"]
|
|
|
|
self.svc_restore_dict.update({service: tuned_state})
|
|
|
|
|
|
|
|
if tuned_state != "inactive":
|
|
|
|
profile = self.exec_cmd(["cat", "/etc/tuned/active_profile"]).strip()
|
|
|
|
profile_mode = self.exec_cmd(["cat", "/etc/tuned/profile_mode"]).strip()
|
|
|
|
|
|
|
|
self.tuned_restore_dict = {
|
|
|
|
"profile": profile,
|
|
|
|
"mode": profile_mode
|
|
|
|
}
|
|
|
|
|
|
|
|
self.exec_cmd(["sudo", "systemctl", "start", service])
|
|
|
|
self.exec_cmd(["sudo", "tuned-adm", "profile", self.tuned_profile])
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Tuned profile set to %s." % self.exec_cmd(["cat", "/etc/tuned/active_profile"]))
|
2021-02-04 15:52:17 +00:00
|
|
|
|
2021-02-04 16:26:17 +00:00
|
|
|
def configure_cpu_governor(self):
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Setting CPU governor to performance...")
|
2021-02-04 16:26:17 +00:00
|
|
|
|
|
|
|
# This assumes that there is the same CPU scaling governor on each CPU
|
|
|
|
self.governor_restore = self.exec_cmd(["cat", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor"]).strip()
|
|
|
|
self.exec_cmd(["sudo", "cpupower", "frequency-set", "-g", "performance"])
|
|
|
|
|
2021-02-23 13:07:38 +00:00
|
|
|
def configure_irq_affinity(self):
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Setting NIC irq affinity for NICs...")
|
2021-02-23 13:07:38 +00:00
|
|
|
|
|
|
|
irq_script_path = os.path.join(self.irq_scripts_dir, "set_irq_affinity.sh")
|
|
|
|
nic_names = [self.get_nic_name_by_ip(n) for n in self.nic_ips]
|
|
|
|
for nic in nic_names:
|
|
|
|
irq_cmd = ["sudo", irq_script_path, nic]
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info(irq_cmd)
|
2021-02-23 13:07:38 +00:00
|
|
|
self.exec_cmd(irq_cmd, change_dir=self.irq_scripts_dir)
|
|
|
|
|
2021-02-04 11:47:39 +00:00
|
|
|
def restore_services(self):
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Restoring services...")
|
2021-02-04 11:47:39 +00:00
|
|
|
for service, state in self.svc_restore_dict.items():
|
|
|
|
cmd = "stop" if state == "inactive" else "start"
|
|
|
|
self.exec_cmd(["sudo", "systemctl", cmd, service])
|
|
|
|
|
2021-02-04 11:49:51 +00:00
|
|
|
def restore_sysctl(self):
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Restoring sysctl settings...")
|
2021-02-04 11:49:51 +00:00
|
|
|
for opt, value in self.sysctl_restore_dict.items():
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info(self.exec_cmd(["sudo", "sysctl", "-w", "%s=%s" % (opt, value)]).strip())
|
2021-02-04 11:49:51 +00:00
|
|
|
|
2021-02-04 15:52:17 +00:00
|
|
|
def restore_tuned(self):
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Restoring tuned-adm settings...")
|
2021-02-04 15:52:17 +00:00
|
|
|
|
|
|
|
if not self.tuned_restore_dict:
|
|
|
|
return
|
|
|
|
|
|
|
|
if self.tuned_restore_dict["mode"] == "auto":
|
|
|
|
self.exec_cmd(["sudo", "tuned-adm", "auto_profile"])
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Reverted tuned-adm to auto_profile.")
|
2021-02-04 15:52:17 +00:00
|
|
|
else:
|
|
|
|
self.exec_cmd(["sudo", "tuned-adm", "profile", self.tuned_restore_dict["profile"]])
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Reverted tuned-adm to %s profile." % self.tuned_restore_dict["profile"])
|
2021-02-04 15:52:17 +00:00
|
|
|
|
2021-02-04 16:26:17 +00:00
|
|
|
def restore_governor(self):
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Restoring CPU governor setting...")
|
2021-02-04 16:26:17 +00:00
|
|
|
if self.governor_restore:
|
|
|
|
self.exec_cmd(["sudo", "cpupower", "frequency-set", "-g", self.governor_restore])
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Reverted CPU governor to %s." % self.governor_restore)
|
2021-02-04 16:26:17 +00:00
|
|
|
|
2022-10-04 15:13:59 +00:00
|
|
|
def restore_settings(self):
|
|
|
|
self.restore_governor()
|
|
|
|
self.restore_tuned()
|
|
|
|
self.restore_services()
|
|
|
|
self.restore_sysctl()
|
|
|
|
if self.enable_adq:
|
|
|
|
self.reload_driver("ice")
|
|
|
|
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
class Target(Server):
|
2021-02-01 12:31:52 +00:00
|
|
|
def __init__(self, name, general_config, target_config):
|
2022-04-03 05:52:50 +00:00
|
|
|
super().__init__(name, general_config, target_config)
|
2019-12-06 09:19:42 +00:00
|
|
|
|
2021-02-01 12:31:52 +00:00
|
|
|
# Defaults
|
2022-11-08 12:18:00 +00:00
|
|
|
self.enable_zcopy = False
|
|
|
|
self.scheduler_name = "static"
|
|
|
|
self.null_block = 0
|
|
|
|
self._nics_json_obj = json.loads(self.exec_cmd(["ip", "-j", "address", "show"]))
|
|
|
|
self.subsystem_info_list = []
|
|
|
|
self.initiator_info = []
|
|
|
|
self.nvme_allowlist = []
|
|
|
|
self.nvme_blocklist = []
|
|
|
|
|
|
|
|
# Target-side measurement options
|
2022-11-18 07:40:43 +00:00
|
|
|
self.enable_pm = True
|
2022-11-08 12:53:39 +00:00
|
|
|
self.enable_sar = True
|
2022-11-08 14:21:22 +00:00
|
|
|
self.enable_pcm = True
|
2022-11-08 13:09:22 +00:00
|
|
|
self.enable_bw = True
|
2022-11-10 09:30:00 +00:00
|
|
|
self.enable_dpdk_memory = True
|
2020-01-10 12:40:16 +00:00
|
|
|
|
2021-02-01 12:31:52 +00:00
|
|
|
if "null_block_devices" in target_config:
|
|
|
|
self.null_block = target_config["null_block_devices"]
|
|
|
|
if "scheduler_settings" in target_config:
|
|
|
|
self.scheduler_name = target_config["scheduler_settings"]
|
|
|
|
if "zcopy_settings" in target_config:
|
|
|
|
self.enable_zcopy = target_config["zcopy_settings"]
|
2021-07-05 13:26:40 +00:00
|
|
|
if "results_dir" in target_config:
|
|
|
|
self.results_dir = target_config["results_dir"]
|
2022-10-04 12:11:56 +00:00
|
|
|
if "blocklist" in target_config:
|
|
|
|
self.nvme_blocklist = target_config["blocklist"]
|
|
|
|
if "allowlist" in target_config:
|
|
|
|
self.nvme_allowlist = target_config["allowlist"]
|
|
|
|
# Blocklist takes precedence, remove common elements from allowlist
|
|
|
|
self.nvme_allowlist = list(set(self.nvme_allowlist) - set(self.nvme_blocklist))
|
2022-11-18 07:40:43 +00:00
|
|
|
if "enable_pm" in target_config:
|
|
|
|
self.enable_pm = target_config["enable_pm"]
|
2022-11-08 12:53:39 +00:00
|
|
|
if "enable_sar" in target_config:
|
2022-12-02 08:09:24 +00:00
|
|
|
self.enable_sar = target_config["enable_sar"]
|
2022-11-08 14:21:22 +00:00
|
|
|
if "enable_pcm" in target_config:
|
|
|
|
self.enable_pcm = target_config["enable_pcm"]
|
2022-11-08 12:18:00 +00:00
|
|
|
if "enable_bandwidth" in target_config:
|
2022-11-08 13:09:22 +00:00
|
|
|
self.enable_bw = target_config["enable_bandwidth"]
|
2022-11-08 12:18:00 +00:00
|
|
|
if "enable_dpdk_memory" in target_config:
|
2022-11-10 09:30:00 +00:00
|
|
|
self.enable_dpdk_memory = target_config["enable_dpdk_memory"]
|
2022-10-04 12:11:56 +00:00
|
|
|
|
|
|
|
self.log.info("Items now on allowlist: %s" % self.nvme_allowlist)
|
|
|
|
self.log.info("Items now on blocklist: %s" % self.nvme_blocklist)
|
2020-11-26 12:58:41 +00:00
|
|
|
|
2018-10-14 19:51:14 +00:00
|
|
|
self.script_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
|
|
|
|
self.spdk_dir = os.path.abspath(os.path.join(self.script_dir, "../../../"))
|
2021-02-02 11:13:43 +00:00
|
|
|
self.set_local_nic_info(self.set_local_nic_info_helper())
|
2021-03-08 14:31:05 +00:00
|
|
|
|
|
|
|
if "skip_spdk_install" not in general_config or general_config["skip_spdk_install"] is False:
|
|
|
|
self.zip_spdk_sources(self.spdk_dir, "/tmp/spdk.zip")
|
|
|
|
|
2021-02-04 11:47:39 +00:00
|
|
|
self.configure_system()
|
2021-02-03 12:33:10 +00:00
|
|
|
if self.enable_adq:
|
|
|
|
self.configure_adq()
|
2020-11-25 09:36:11 +00:00
|
|
|
self.sys_config()
|
2018-10-14 19:51:14 +00:00
|
|
|
|
2021-02-02 11:13:43 +00:00
|
|
|
def set_local_nic_info_helper(self):
|
2021-02-03 12:08:15 +00:00
|
|
|
return json.loads(self.exec_cmd(["lshw", "-json"]))
|
|
|
|
|
2021-02-23 13:07:38 +00:00
|
|
|
def exec_cmd(self, cmd, stderr_redirect=False, change_dir=None):
|
2021-02-03 12:08:15 +00:00
|
|
|
stderr_opt = None
|
|
|
|
if stderr_redirect:
|
|
|
|
stderr_opt = subprocess.STDOUT
|
2021-02-23 13:07:38 +00:00
|
|
|
if change_dir:
|
|
|
|
old_cwd = os.getcwd()
|
|
|
|
os.chdir(change_dir)
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Changing directory to %s" % change_dir)
|
2021-02-23 13:07:38 +00:00
|
|
|
|
2021-02-03 12:08:15 +00:00
|
|
|
out = check_output(cmd, stderr=stderr_opt).decode(encoding="utf-8")
|
2021-02-23 13:07:38 +00:00
|
|
|
|
|
|
|
if change_dir:
|
|
|
|
os.chdir(old_cwd)
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Changing directory to %s" % old_cwd)
|
2021-02-03 12:08:15 +00:00
|
|
|
return out
|
2021-02-02 11:13:43 +00:00
|
|
|
|
2018-10-14 19:51:14 +00:00
|
|
|
def zip_spdk_sources(self, spdk_dir, dest_file):
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Zipping SPDK source directory")
|
2018-10-14 19:51:14 +00:00
|
|
|
fh = zipfile.ZipFile(dest_file, "w", zipfile.ZIP_DEFLATED)
|
2022-04-03 05:52:50 +00:00
|
|
|
for root, _directories, files in os.walk(spdk_dir, followlinks=True):
|
2018-10-14 19:51:14 +00:00
|
|
|
for file in files:
|
|
|
|
fh.write(os.path.relpath(os.path.join(root, file)))
|
|
|
|
fh.close()
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Done zipping")
|
2018-10-14 19:51:14 +00:00
|
|
|
|
2022-05-18 09:58:23 +00:00
|
|
|
@staticmethod
|
|
|
|
def _chunks(input_list, chunks_no):
|
|
|
|
div, rem = divmod(len(input_list), chunks_no)
|
|
|
|
for i in range(chunks_no):
|
|
|
|
si = (div + 1) * (i if i < rem else rem) + div * (0 if i < rem else i - rem)
|
|
|
|
yield input_list[si:si + (div + 1 if i < rem else div)]
|
|
|
|
|
|
|
|
def spread_bdevs(self, req_disks):
|
|
|
|
# Spread available block devices indexes:
|
|
|
|
# - evenly across available initiator systems
|
|
|
|
# - evenly across available NIC interfaces for
|
|
|
|
# each initiator
|
|
|
|
# Not NUMA aware.
|
|
|
|
ip_bdev_map = []
|
|
|
|
initiator_chunks = self._chunks(range(0, req_disks), len(self.initiator_info))
|
|
|
|
|
|
|
|
for i, (init, init_chunk) in enumerate(zip(self.initiator_info, initiator_chunks)):
|
|
|
|
self.initiator_info[i]["bdev_range"] = init_chunk
|
|
|
|
init_chunks_list = list(self._chunks(init_chunk, len(init["target_nic_ips"])))
|
|
|
|
for ip, nic_chunk in zip(self.initiator_info[i]["target_nic_ips"], init_chunks_list):
|
|
|
|
for c in nic_chunk:
|
|
|
|
ip_bdev_map.append((ip, c))
|
|
|
|
return ip_bdev_map
|
|
|
|
|
2022-11-08 12:53:39 +00:00
|
|
|
def measure_sar(self, results_dir, sar_file_prefix, ramp_time, run_time):
|
2021-08-24 10:10:51 +00:00
|
|
|
cpu_number = os.cpu_count()
|
|
|
|
sar_idle_sum = 0
|
2022-05-05 10:24:40 +00:00
|
|
|
sar_output_file = os.path.join(results_dir, sar_file_prefix + ".txt")
|
|
|
|
sar_cpu_util_file = os.path.join(results_dir, ".".join([sar_file_prefix + "cpu_util", "txt"]))
|
2022-05-05 08:36:20 +00:00
|
|
|
|
2022-11-08 12:53:39 +00:00
|
|
|
self.log.info("Waiting %d seconds for ramp-up to finish before measuring SAR stats" % ramp_time)
|
|
|
|
time.sleep(ramp_time)
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Starting SAR measurements")
|
2022-05-05 08:36:20 +00:00
|
|
|
|
2022-11-08 12:53:39 +00:00
|
|
|
out = self.exec_cmd(["sar", "-P", "ALL", "%s" % 1, "%s" % run_time])
|
2022-05-05 10:24:40 +00:00
|
|
|
with open(os.path.join(results_dir, sar_output_file), "w") as fh:
|
2018-10-14 19:51:14 +00:00
|
|
|
for line in out.split("\n"):
|
2021-08-24 10:10:51 +00:00
|
|
|
if "Average" in line:
|
|
|
|
if "CPU" in line:
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Summary CPU utilization from SAR:")
|
|
|
|
self.log.info(line)
|
2021-08-24 10:10:51 +00:00
|
|
|
elif "all" in line:
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info(line)
|
2021-08-24 10:10:51 +00:00
|
|
|
else:
|
2021-10-12 13:25:59 +00:00
|
|
|
sar_idle_sum += float(line.split()[7])
|
2018-10-14 19:51:14 +00:00
|
|
|
fh.write(out)
|
2021-08-24 10:10:51 +00:00
|
|
|
sar_cpu_usage = cpu_number * 100 - sar_idle_sum
|
2022-05-05 10:24:40 +00:00
|
|
|
|
|
|
|
with open(os.path.join(results_dir, sar_cpu_util_file), "w") as f:
|
|
|
|
f.write("%0.2f" % sar_cpu_usage)
|
2018-10-14 19:51:14 +00:00
|
|
|
|
2022-11-18 07:40:43 +00:00
|
|
|
def measure_power(self, results_dir, prefix, script_full_dir, ramp_time, run_time):
|
|
|
|
time.sleep(ramp_time)
|
2022-11-18 07:58:19 +00:00
|
|
|
self.log.info("Starting power measurements")
|
2022-09-22 14:58:25 +00:00
|
|
|
self.exec_cmd(["%s/../pm/collect-bmc-pm" % script_full_dir,
|
|
|
|
"-d", "%s" % results_dir, "-l", "-p", "%s" % prefix,
|
2022-11-18 07:40:43 +00:00
|
|
|
"-x", "-c", "%s" % run_time, "-t", "%s" % 1, "-r"])
|
2022-08-23 19:08:01 +00:00
|
|
|
|
2021-10-14 10:37:56 +00:00
|
|
|
def ethtool_after_fio_ramp(self, fio_ramp_time):
|
|
|
|
time.sleep(fio_ramp_time//2)
|
|
|
|
nic_names = [self.get_nic_name_by_ip(n) for n in self.nic_ips]
|
|
|
|
for nic in nic_names:
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info(nic)
|
2021-10-14 10:37:56 +00:00
|
|
|
self.exec_cmd(["sudo", "ethtool", "--set-priv-flags", nic,
|
|
|
|
"channel-pkt-inspect-optimize", "off"]) # Disable channel packet inspection optimization
|
|
|
|
|
2022-11-08 14:21:22 +00:00
|
|
|
def measure_pcm_memory(self, results_dir, pcm_file_name, ramp_time, run_time):
|
|
|
|
time.sleep(ramp_time)
|
|
|
|
cmd = ["pcm-memory", "1", "-csv=%s/%s" % (results_dir, pcm_file_name)]
|
2021-01-22 09:46:55 +00:00
|
|
|
pcm_memory = subprocess.Popen(cmd)
|
2022-11-08 14:21:22 +00:00
|
|
|
time.sleep(run_time)
|
2021-01-22 09:25:22 +00:00
|
|
|
pcm_memory.terminate()
|
2020-01-10 12:40:16 +00:00
|
|
|
|
2022-11-08 14:21:22 +00:00
|
|
|
def measure_pcm(self, results_dir, pcm_file_name, ramp_time, run_time):
|
|
|
|
time.sleep(ramp_time)
|
|
|
|
cmd = ["pcm", "1", "-i=%s" % run_time,
|
2022-06-30 08:07:49 +00:00
|
|
|
"-csv=%s/%s" % (results_dir, pcm_file_name)]
|
2021-01-22 09:46:55 +00:00
|
|
|
subprocess.run(cmd)
|
2020-02-18 14:13:00 +00:00
|
|
|
df = pd.read_csv(os.path.join(results_dir, pcm_file_name), header=[0, 1])
|
|
|
|
df = df.rename(columns=lambda x: re.sub(r'Unnamed:[\w\s]*$', '', x))
|
|
|
|
skt = df.loc[:, df.columns.get_level_values(1).isin({'UPI0', 'UPI1', 'UPI2'})]
|
|
|
|
skt_pcm_file_name = "_".join(["skt", pcm_file_name])
|
|
|
|
skt.to_csv(os.path.join(results_dir, skt_pcm_file_name), index=False)
|
2020-01-10 12:40:16 +00:00
|
|
|
|
2022-11-08 14:21:22 +00:00
|
|
|
def measure_pcm_power(self, results_dir, pcm_power_file_name, ramp_time, run_time):
|
|
|
|
time.sleep(ramp_time)
|
|
|
|
out = self.exec_cmd(["pcm-power", "1", "-i=%s" % run_time])
|
2021-01-18 17:59:08 +00:00
|
|
|
with open(os.path.join(results_dir, pcm_power_file_name), "w") as fh:
|
|
|
|
fh.write(out)
|
2022-11-08 14:21:22 +00:00
|
|
|
# TODO: Above command results in a .csv file containing measurements for all gathered samples.
|
|
|
|
# Improve this so that additional file containing measurements average is generated too.
|
2021-01-18 17:59:08 +00:00
|
|
|
|
2022-11-08 13:09:22 +00:00
|
|
|
def measure_network_bandwidth(self, results_dir, bandwidth_file_name, ramp_time, run_time):
|
|
|
|
self.log.info("Waiting %d seconds for ramp-up to finish before measuring bandwidth stats" % ramp_time)
|
|
|
|
time.sleep(ramp_time)
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("INFO: starting network bandwidth measure")
|
2021-03-03 13:06:13 +00:00
|
|
|
self.exec_cmd(["bwm-ng", "-o", "csv", "-F", "%s/%s" % (results_dir, bandwidth_file_name),
|
2022-11-08 13:09:22 +00:00
|
|
|
"-a", "1", "-t", "1000", "-c", "%s" % run_time])
|
|
|
|
# TODO: Above command results in a .csv file containing measurements for all gathered samples.
|
|
|
|
# Improve this so that additional file containing measurements average is generated too.
|
|
|
|
# TODO: Monitor only these interfaces which are currently used to run the workload.
|
2020-07-03 08:01:23 +00:00
|
|
|
|
2022-11-10 09:30:00 +00:00
|
|
|
def measure_dpdk_memory(self, results_dir, dump_file_name, ramp_time):
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("INFO: waiting to generate DPDK memory usage")
|
2022-11-10 09:30:00 +00:00
|
|
|
time.sleep(ramp_time)
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("INFO: generating DPDK memory usage")
|
2022-11-10 09:30:00 +00:00
|
|
|
tmp_dump_file = rpc.env_dpdk.env_dpdk_get_mem_stats(self.client)["filename"]
|
|
|
|
os.rename(tmp_dump_file, "%s/%s" % (results_dir, dump_file_name))
|
2020-07-28 11:17:19 +00:00
|
|
|
|
2020-11-25 09:36:11 +00:00
|
|
|
def sys_config(self):
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("====Kernel release:====")
|
|
|
|
self.log.info(os.uname().release)
|
|
|
|
self.log.info("====Kernel command line:====")
|
2020-11-25 09:36:11 +00:00
|
|
|
with open('/proc/cmdline') as f:
|
|
|
|
cmdline = f.readlines()
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info('\n'.join(self.get_uncommented_lines(cmdline)))
|
|
|
|
self.log.info("====sysctl conf:====")
|
2020-11-25 09:36:11 +00:00
|
|
|
with open('/etc/sysctl.conf') as f:
|
|
|
|
sysctl = f.readlines()
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info('\n'.join(self.get_uncommented_lines(sysctl)))
|
|
|
|
self.log.info("====Cpu power info:====")
|
|
|
|
self.log.info(self.exec_cmd(["cpupower", "frequency-info"]))
|
|
|
|
self.log.info("====zcopy settings:====")
|
|
|
|
self.log.info("zcopy enabled: %s" % (self.enable_zcopy))
|
|
|
|
self.log.info("====Scheduler settings:====")
|
|
|
|
self.log.info("SPDK scheduler: %s" % (self.scheduler_name))
|
2020-11-25 09:36:11 +00:00
|
|
|
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
class Initiator(Server):
|
2021-02-01 12:31:52 +00:00
|
|
|
def __init__(self, name, general_config, initiator_config):
|
2022-04-03 05:52:50 +00:00
|
|
|
super().__init__(name, general_config, initiator_config)
|
2021-02-01 12:31:52 +00:00
|
|
|
|
|
|
|
# Required fields
|
|
|
|
self.ip = initiator_config["ip"]
|
2021-03-26 13:29:51 +00:00
|
|
|
self.target_nic_ips = initiator_config["target_nic_ips"]
|
2021-02-01 12:31:52 +00:00
|
|
|
|
|
|
|
# Defaults
|
|
|
|
self.cpus_allowed = None
|
|
|
|
self.cpus_allowed_policy = "shared"
|
|
|
|
self.spdk_dir = "/tmp/spdk"
|
|
|
|
self.fio_bin = "/usr/src/fio/fio"
|
|
|
|
self.nvmecli_bin = "nvme"
|
|
|
|
self.cpu_frequency = None
|
2021-02-09 15:17:26 +00:00
|
|
|
self.subsystem_info_list = []
|
2021-02-01 12:31:52 +00:00
|
|
|
|
|
|
|
if "spdk_dir" in initiator_config:
|
|
|
|
self.spdk_dir = initiator_config["spdk_dir"]
|
|
|
|
if "fio_bin" in initiator_config:
|
|
|
|
self.fio_bin = initiator_config["fio_bin"]
|
|
|
|
if "nvmecli_bin" in initiator_config:
|
|
|
|
self.nvmecli_bin = initiator_config["nvmecli_bin"]
|
|
|
|
if "cpus_allowed" in initiator_config:
|
|
|
|
self.cpus_allowed = initiator_config["cpus_allowed"]
|
|
|
|
if "cpus_allowed_policy" in initiator_config:
|
|
|
|
self.cpus_allowed_policy = initiator_config["cpus_allowed_policy"]
|
|
|
|
if "cpu_frequency" in initiator_config:
|
|
|
|
self.cpu_frequency = initiator_config["cpu_frequency"]
|
2020-04-15 09:44:07 +00:00
|
|
|
if os.getenv('SPDK_WORKSPACE'):
|
|
|
|
self.spdk_dir = os.getenv('SPDK_WORKSPACE')
|
2021-02-01 12:31:52 +00:00
|
|
|
|
2018-10-14 19:51:14 +00:00
|
|
|
self.ssh_connection = paramiko.SSHClient()
|
2019-05-21 15:54:51 +00:00
|
|
|
self.ssh_connection.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
2018-10-14 19:51:14 +00:00
|
|
|
self.ssh_connection.connect(self.ip, username=self.username, password=self.password)
|
2021-02-03 12:08:15 +00:00
|
|
|
self.exec_cmd(["sudo", "rm", "-rf", "%s/nvmf_perf" % self.spdk_dir])
|
|
|
|
self.exec_cmd(["mkdir", "-p", "%s" % self.spdk_dir])
|
|
|
|
self._nics_json_obj = json.loads(self.exec_cmd(["ip", "-j", "address", "show"]))
|
2021-03-08 14:31:05 +00:00
|
|
|
|
|
|
|
if "skip_spdk_install" not in general_config or general_config["skip_spdk_install"] is False:
|
|
|
|
self.copy_spdk("/tmp/spdk.zip")
|
2021-02-02 11:13:43 +00:00
|
|
|
self.set_local_nic_info(self.set_local_nic_info_helper())
|
2020-03-03 13:38:56 +00:00
|
|
|
self.set_cpu_frequency()
|
2021-02-04 11:47:39 +00:00
|
|
|
self.configure_system()
|
2021-02-03 12:33:10 +00:00
|
|
|
if self.enable_adq:
|
|
|
|
self.configure_adq()
|
2020-11-25 09:36:11 +00:00
|
|
|
self.sys_config()
|
2018-10-14 19:51:14 +00:00
|
|
|
|
2021-02-02 11:13:43 +00:00
|
|
|
def set_local_nic_info_helper(self):
|
2021-02-03 12:08:15 +00:00
|
|
|
return json.loads(self.exec_cmd(["lshw", "-json"]))
|
2021-02-02 11:13:43 +00:00
|
|
|
|
2022-01-18 09:33:06 +00:00
|
|
|
def stop(self):
|
2022-10-04 15:13:59 +00:00
|
|
|
self.restore_settings()
|
2018-10-14 19:51:14 +00:00
|
|
|
self.ssh_connection.close()
|
|
|
|
|
2021-02-23 13:07:38 +00:00
|
|
|
def exec_cmd(self, cmd, stderr_redirect=False, change_dir=None):
|
|
|
|
if change_dir:
|
|
|
|
cmd = ["cd", change_dir, ";", *cmd]
|
|
|
|
|
2021-02-04 11:49:51 +00:00
|
|
|
# In case one of the command elements contains whitespace and is not
|
|
|
|
# already quoted, # (e.g. when calling sysctl) quote it again to prevent expansion
|
|
|
|
# when sending to remote system.
|
|
|
|
for i, c in enumerate(cmd):
|
|
|
|
if (" " in c or "\t" in c) and not (c.startswith("'") and c.endswith("'")):
|
|
|
|
cmd[i] = '"%s"' % c
|
2021-02-03 12:08:15 +00:00
|
|
|
cmd = " ".join(cmd)
|
2021-02-04 11:49:51 +00:00
|
|
|
|
|
|
|
# Redirect stderr to stdout thanks using get_pty option if needed
|
2021-02-03 12:08:15 +00:00
|
|
|
_, stdout, _ = self.ssh_connection.exec_command(cmd, get_pty=stderr_redirect)
|
|
|
|
out = stdout.read().decode(encoding="utf-8")
|
|
|
|
|
|
|
|
# Check the return code
|
|
|
|
rc = stdout.channel.recv_exit_status()
|
|
|
|
if rc:
|
|
|
|
raise CalledProcessError(int(rc), cmd, out)
|
|
|
|
|
|
|
|
return out
|
|
|
|
|
2018-10-14 19:51:14 +00:00
|
|
|
def put_file(self, local, remote_dest):
|
|
|
|
ftp = self.ssh_connection.open_sftp()
|
|
|
|
ftp.put(local, remote_dest)
|
|
|
|
ftp.close()
|
|
|
|
|
|
|
|
def get_file(self, remote, local_dest):
|
|
|
|
ftp = self.ssh_connection.open_sftp()
|
|
|
|
ftp.get(remote, local_dest)
|
|
|
|
ftp.close()
|
|
|
|
|
2021-03-08 14:31:05 +00:00
|
|
|
def copy_spdk(self, local_spdk_zip):
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Copying SPDK sources to initiator %s" % self.name)
|
2021-03-08 14:31:05 +00:00
|
|
|
self.put_file(local_spdk_zip, "/tmp/spdk_drop.zip")
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Copied sources zip from target")
|
2021-03-08 14:31:05 +00:00
|
|
|
self.exec_cmd(["unzip", "-qo", "/tmp/spdk_drop.zip", "-d", self.spdk_dir])
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Sources unpacked")
|
2021-03-08 14:31:05 +00:00
|
|
|
|
2018-10-14 19:51:14 +00:00
|
|
|
def copy_result_files(self, dest_dir):
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Copying results")
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
if not os.path.exists(dest_dir):
|
|
|
|
os.mkdir(dest_dir)
|
|
|
|
|
|
|
|
# Get list of result files from initiator and copy them back to target
|
2021-02-03 12:08:15 +00:00
|
|
|
file_list = self.exec_cmd(["ls", "%s/nvmf_perf" % self.spdk_dir]).strip().split("\n")
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
for file in file_list:
|
|
|
|
self.get_file(os.path.join(self.spdk_dir, "nvmf_perf", file),
|
|
|
|
os.path.join(dest_dir, file))
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Done copying results")
|
2018-10-14 19:51:14 +00:00
|
|
|
|
2022-09-28 11:14:52 +00:00
|
|
|
def match_subsystems(self, target_subsytems):
|
|
|
|
subsystems = [subsystem for subsystem in target_subsytems if subsystem[2] in self.target_nic_ips]
|
2018-10-14 19:51:14 +00:00
|
|
|
subsystems.sort(key=lambda x: x[1])
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Found matching subsystems on target side:")
|
2018-10-14 19:51:14 +00:00
|
|
|
for s in subsystems:
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info(s)
|
2021-02-09 15:17:26 +00:00
|
|
|
self.subsystem_info_list = subsystems
|
2018-10-14 19:51:14 +00:00
|
|
|
|
2020-08-24 16:22:15 +00:00
|
|
|
def gen_fio_filename_conf(self, *args, **kwargs):
|
|
|
|
# Logic implemented in SPDKInitiator and KernelInitiator classes
|
|
|
|
pass
|
|
|
|
|
2022-06-30 16:45:58 +00:00
|
|
|
def get_route_nic_numa(self, remote_nvme_ip):
|
|
|
|
local_nvme_nic = json.loads(self.exec_cmd(["ip", "-j", "route", "get", remote_nvme_ip]))
|
|
|
|
local_nvme_nic = local_nvme_nic[0]["dev"]
|
|
|
|
return self.get_nic_numa_node(local_nvme_nic)
|
|
|
|
|
2022-06-30 17:04:44 +00:00
|
|
|
@staticmethod
|
|
|
|
def gen_fio_offset_section(offset_inc, num_jobs):
|
|
|
|
offset_inc = 100 // num_jobs if offset_inc == 0 else offset_inc
|
2022-06-30 16:45:58 +00:00
|
|
|
return "\n".join(["size=%s%%" % offset_inc,
|
|
|
|
"offset=0%",
|
|
|
|
"offset_increment=%s%%" % offset_inc])
|
|
|
|
|
|
|
|
def gen_fio_numa_section(self, fio_filenames_list):
|
|
|
|
numa_stats = {}
|
|
|
|
for nvme in fio_filenames_list:
|
|
|
|
nvme_numa = self.get_nvme_subsystem_numa(os.path.basename(nvme))
|
|
|
|
numa_stats[nvme_numa] = numa_stats.setdefault(nvme_numa, 0) + 1
|
|
|
|
|
|
|
|
# Use the most common NUMA node for this chunk to allocate memory and CPUs
|
|
|
|
section_local_numa = sorted(numa_stats.items(), key=lambda item: item[1], reverse=True)[0][0]
|
|
|
|
|
|
|
|
return "\n".join(["numa_cpu_nodes=%s" % section_local_numa,
|
|
|
|
"numa_mem_policy=prefer:%s" % section_local_numa])
|
2022-06-30 17:04:44 +00:00
|
|
|
|
|
|
|
def gen_fio_config(self, rw, rwmixread, block_size, io_depth, subsys_no,
|
|
|
|
num_jobs=None, ramp_time=0, run_time=10, rate_iops=0,
|
|
|
|
offset=False, offset_inc=0):
|
2018-10-14 19:51:14 +00:00
|
|
|
fio_conf_template = """
|
|
|
|
[global]
|
|
|
|
ioengine={ioengine}
|
|
|
|
{spdk_conf}
|
|
|
|
thread=1
|
|
|
|
group_reporting=1
|
|
|
|
direct=1
|
2020-04-14 12:51:16 +00:00
|
|
|
percentile_list=50:90:99:99.5:99.9:99.99:99.999
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
norandommap=1
|
|
|
|
rw={rw}
|
|
|
|
rwmixread={rwmixread}
|
|
|
|
bs={block_size}
|
|
|
|
time_based=1
|
|
|
|
ramp_time={ramp_time}
|
|
|
|
runtime={run_time}
|
2021-05-10 12:56:00 +00:00
|
|
|
rate_iops={rate_iops}
|
2018-10-14 19:51:14 +00:00
|
|
|
"""
|
|
|
|
if "spdk" in self.mode:
|
2020-05-28 18:39:36 +00:00
|
|
|
ioengine = "%s/build/fio/spdk_bdev" % self.spdk_dir
|
2020-10-19 11:20:57 +00:00
|
|
|
spdk_conf = "spdk_json_conf=%s/bdev.conf" % self.spdk_dir
|
2018-10-14 19:51:14 +00:00
|
|
|
else:
|
2021-06-01 12:24:31 +00:00
|
|
|
ioengine = self.ioengine
|
2018-10-14 19:51:14 +00:00
|
|
|
spdk_conf = ""
|
2021-02-03 12:08:15 +00:00
|
|
|
out = self.exec_cmd(["sudo", "nvme", "list", "|", "grep", "-E", "'SPDK|Linux'",
|
|
|
|
"|", "awk", "'{print $1}'"])
|
2020-02-24 09:34:22 +00:00
|
|
|
subsystems = [x for x in out.split("\n") if "nvme" in x]
|
|
|
|
|
|
|
|
if self.cpus_allowed is not None:
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Limiting FIO workload execution on specific cores %s" % self.cpus_allowed)
|
2020-02-24 09:34:22 +00:00
|
|
|
cpus_num = 0
|
|
|
|
cpus = self.cpus_allowed.split(",")
|
|
|
|
for cpu in cpus:
|
|
|
|
if "-" in cpu:
|
|
|
|
a, b = cpu.split("-")
|
2020-04-03 14:30:04 +00:00
|
|
|
a = int(a)
|
|
|
|
b = int(b)
|
2020-02-24 09:34:22 +00:00
|
|
|
cpus_num += len(range(a, b))
|
|
|
|
else:
|
|
|
|
cpus_num += 1
|
2021-02-23 12:11:26 +00:00
|
|
|
self.num_cores = cpus_num
|
|
|
|
threads = range(0, self.num_cores)
|
2020-02-24 09:34:22 +00:00
|
|
|
elif hasattr(self, 'num_cores'):
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Limiting FIO workload execution to %s cores" % self.num_cores)
|
2020-02-24 09:34:22 +00:00
|
|
|
threads = range(0, int(self.num_cores))
|
|
|
|
else:
|
2021-02-23 12:11:26 +00:00
|
|
|
self.num_cores = len(subsystems)
|
2020-02-24 09:34:22 +00:00
|
|
|
threads = range(0, len(subsystems))
|
|
|
|
|
|
|
|
if "spdk" in self.mode:
|
2022-06-30 17:04:44 +00:00
|
|
|
filename_section = self.gen_fio_filename_conf(self.subsystem_info_list, threads, io_depth, num_jobs,
|
|
|
|
offset, offset_inc)
|
2020-02-24 09:34:22 +00:00
|
|
|
else:
|
2022-06-30 17:04:44 +00:00
|
|
|
filename_section = self.gen_fio_filename_conf(threads, io_depth, num_jobs,
|
|
|
|
offset, offset_inc)
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
fio_config = fio_conf_template.format(ioengine=ioengine, spdk_conf=spdk_conf,
|
|
|
|
rw=rw, rwmixread=rwmixread, block_size=block_size,
|
2021-05-10 12:56:00 +00:00
|
|
|
ramp_time=ramp_time, run_time=run_time, rate_iops=rate_iops)
|
2021-06-01 12:24:31 +00:00
|
|
|
|
|
|
|
# TODO: hipri disabled for now, as it causes fio errors:
|
|
|
|
# io_u error on file /dev/nvme2n1: Operation not supported
|
2022-09-28 13:26:40 +00:00
|
|
|
# See comment in KernelInitiator class, init_connect() function
|
2021-06-01 12:24:31 +00:00
|
|
|
if hasattr(self, "ioengine") and "io_uring" in self.ioengine:
|
|
|
|
fio_config = fio_config + """
|
|
|
|
fixedbufs=1
|
|
|
|
registerfiles=1
|
|
|
|
#hipri=1
|
|
|
|
"""
|
2018-10-14 19:51:14 +00:00
|
|
|
if num_jobs:
|
2020-02-24 09:34:22 +00:00
|
|
|
fio_config = fio_config + "numjobs=%s \n" % num_jobs
|
|
|
|
if self.cpus_allowed is not None:
|
|
|
|
fio_config = fio_config + "cpus_allowed=%s \n" % self.cpus_allowed
|
2020-03-13 08:38:40 +00:00
|
|
|
fio_config = fio_config + "cpus_allowed_policy=%s \n" % self.cpus_allowed_policy
|
2018-10-14 19:51:14 +00:00
|
|
|
fio_config = fio_config + filename_section
|
|
|
|
|
|
|
|
fio_config_filename = "%s_%s_%s_m_%s" % (block_size, io_depth, rw, rwmixread)
|
|
|
|
if hasattr(self, "num_cores"):
|
|
|
|
fio_config_filename += "_%sCPU" % self.num_cores
|
|
|
|
fio_config_filename += ".fio"
|
|
|
|
|
2021-02-03 12:08:15 +00:00
|
|
|
self.exec_cmd(["mkdir", "-p", "%s/nvmf_perf" % self.spdk_dir])
|
|
|
|
self.exec_cmd(["echo", "'%s'" % fio_config, ">", "%s/nvmf_perf/%s" % (self.spdk_dir, fio_config_filename)])
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Created FIO Config:")
|
|
|
|
self.log.info(fio_config)
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
return os.path.join(self.spdk_dir, "nvmf_perf", fio_config_filename)
|
|
|
|
|
2020-03-03 13:38:56 +00:00
|
|
|
def set_cpu_frequency(self):
|
|
|
|
if self.cpu_frequency is not None:
|
|
|
|
try:
|
2021-02-03 12:08:15 +00:00
|
|
|
self.exec_cmd(["sudo", "cpupower", "frequency-set", "-g", "userspace"], True)
|
|
|
|
self.exec_cmd(["sudo", "cpupower", "frequency-set", "-f", "%s" % self.cpu_frequency], True)
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info(self.exec_cmd(["sudo", "cpupower", "frequency-info"]))
|
2020-03-03 13:38:56 +00:00
|
|
|
except Exception:
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.error("ERROR: cpu_frequency will not work when intel_pstate is enabled!")
|
2020-03-03 13:38:56 +00:00
|
|
|
sys.exit()
|
|
|
|
else:
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.warning("WARNING: you have disabled intel_pstate and using default cpu governance.")
|
2020-03-03 13:38:56 +00:00
|
|
|
|
2022-11-09 12:21:33 +00:00
|
|
|
def run_fio(self, fio_config_file, run_num=1):
|
2018-10-14 19:51:14 +00:00
|
|
|
job_name, _ = os.path.splitext(fio_config_file)
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Starting FIO run for job: %s" % job_name)
|
|
|
|
self.log.info("Using FIO: %s" % self.fio_bin)
|
2020-03-03 13:38:56 +00:00
|
|
|
|
2022-11-09 12:21:33 +00:00
|
|
|
output_filename = job_name + "_run_" + str(run_num) + "_" + self.name + ".json"
|
|
|
|
try:
|
|
|
|
output = self.exec_cmd(["sudo", self.fio_bin, fio_config_file, "--output-format=json",
|
|
|
|
"--output=%s" % output_filename, "--eta=never"], True)
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info(output)
|
2022-11-09 12:21:33 +00:00
|
|
|
self.log.info("FIO run finished. Results in: %s" % output_filename)
|
|
|
|
except subprocess.CalledProcessError as e:
|
|
|
|
self.log.error("ERROR: Fio process failed!")
|
|
|
|
self.log.error(e.stdout)
|
2018-10-14 19:51:14 +00:00
|
|
|
|
2020-11-25 09:36:11 +00:00
|
|
|
def sys_config(self):
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("====Kernel release:====")
|
|
|
|
self.log.info(self.exec_cmd(["uname", "-r"]))
|
|
|
|
self.log.info("====Kernel command line:====")
|
2021-02-03 12:08:15 +00:00
|
|
|
cmdline = self.exec_cmd(["cat", "/proc/cmdline"])
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info('\n'.join(self.get_uncommented_lines(cmdline.splitlines())))
|
|
|
|
self.log.info("====sysctl conf:====")
|
2022-09-05 17:11:38 +00:00
|
|
|
sysctl = self.exec_cmd(["sudo", "cat", "/etc/sysctl.conf"])
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info('\n'.join(self.get_uncommented_lines(sysctl.splitlines())))
|
|
|
|
self.log.info("====Cpu power info:====")
|
|
|
|
self.log.info(self.exec_cmd(["cpupower", "frequency-info"]))
|
2020-11-25 09:36:11 +00:00
|
|
|
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
class KernelTarget(Target):
|
2021-02-01 12:31:52 +00:00
|
|
|
def __init__(self, name, general_config, target_config):
|
2022-04-03 05:52:50 +00:00
|
|
|
super().__init__(name, general_config, target_config)
|
2021-02-01 12:31:52 +00:00
|
|
|
# Defaults
|
|
|
|
self.nvmet_bin = "nvmetcli"
|
2018-10-14 19:51:14 +00:00
|
|
|
|
2021-02-01 12:31:52 +00:00
|
|
|
if "nvmet_bin" in target_config:
|
|
|
|
self.nvmet_bin = target_config["nvmet_bin"]
|
2018-10-14 19:51:14 +00:00
|
|
|
|
2022-01-18 09:33:06 +00:00
|
|
|
def stop(self):
|
2022-09-28 13:54:30 +00:00
|
|
|
self.nvmet_command(self.nvmet_bin, "clear")
|
2022-10-04 15:13:59 +00:00
|
|
|
self.restore_settings()
|
2022-09-28 13:54:30 +00:00
|
|
|
|
2022-10-04 12:11:56 +00:00
|
|
|
def get_nvme_device_bdf(self, nvme_dev_path):
|
|
|
|
nvme_name = os.path.basename(nvme_dev_path)
|
|
|
|
return self.exec_cmd(["cat", "/sys/block/%s/device/address" % nvme_name]).strip()
|
|
|
|
|
2022-10-04 10:39:29 +00:00
|
|
|
def get_nvme_devices(self):
|
2022-10-04 12:11:56 +00:00
|
|
|
dev_list = self.exec_cmd(["lsblk", "-o", "NAME", "-nlpd"]).split("\n")
|
|
|
|
nvme_list = []
|
|
|
|
for dev in dev_list:
|
|
|
|
if "nvme" not in dev:
|
|
|
|
continue
|
|
|
|
if self.get_nvme_device_bdf(dev) in self.nvme_blocklist:
|
|
|
|
continue
|
|
|
|
if len(self.nvme_allowlist) == 0:
|
|
|
|
nvme_list.append(dev)
|
|
|
|
continue
|
|
|
|
if self.get_nvme_device_bdf(dev) in self.nvme_allowlist:
|
|
|
|
nvme_list.append(dev)
|
|
|
|
return dev_list
|
2022-10-04 10:39:29 +00:00
|
|
|
|
2022-09-28 13:54:30 +00:00
|
|
|
def nvmet_command(self, nvmet_bin, command):
|
|
|
|
return self.exec_cmd([nvmet_bin, *(command.split(" "))])
|
2018-10-14 19:51:14 +00:00
|
|
|
|
2022-05-18 09:58:23 +00:00
|
|
|
def kernel_tgt_gen_subsystem_conf(self, nvme_list):
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
nvmet_cfg = {
|
|
|
|
"ports": [],
|
|
|
|
"hosts": [],
|
|
|
|
"subsystems": [],
|
|
|
|
}
|
|
|
|
|
2022-05-18 09:58:23 +00:00
|
|
|
for ip, bdev_num in self.spread_bdevs(len(nvme_list)):
|
|
|
|
port = str(4420 + bdev_num)
|
|
|
|
nqn = "nqn.2018-09.io.spdk:cnode%s" % bdev_num
|
|
|
|
serial = "SPDK00%s" % bdev_num
|
|
|
|
bdev_name = nvme_list[bdev_num]
|
|
|
|
|
|
|
|
nvmet_cfg["subsystems"].append({
|
|
|
|
"allowed_hosts": [],
|
|
|
|
"attr": {
|
|
|
|
"allow_any_host": "1",
|
|
|
|
"serial": serial,
|
|
|
|
"version": "1.3"
|
|
|
|
},
|
|
|
|
"namespaces": [
|
|
|
|
{
|
|
|
|
"device": {
|
|
|
|
"path": bdev_name,
|
|
|
|
"uuid": "%s" % uuid.uuid4()
|
|
|
|
},
|
|
|
|
"enable": 1,
|
|
|
|
"nsid": port
|
|
|
|
}
|
|
|
|
],
|
|
|
|
"nqn": nqn
|
|
|
|
})
|
|
|
|
|
|
|
|
nvmet_cfg["ports"].append({
|
|
|
|
"addr": {
|
|
|
|
"adrfam": "ipv4",
|
|
|
|
"traddr": ip,
|
|
|
|
"trsvcid": port,
|
|
|
|
"trtype": self.transport
|
|
|
|
},
|
|
|
|
"portid": bdev_num,
|
|
|
|
"referrals": [],
|
|
|
|
"subsystems": [nqn]
|
|
|
|
})
|
|
|
|
|
2022-09-28 11:07:30 +00:00
|
|
|
self.subsystem_info_list.append((port, nqn, ip))
|
2022-05-18 09:58:23 +00:00
|
|
|
self.subsys_no = len(self.subsystem_info_list)
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
with open("kernel.conf", "w") as fh:
|
|
|
|
fh.write(json.dumps(nvmet_cfg, indent=2))
|
|
|
|
|
|
|
|
def tgt_start(self):
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Configuring kernel NVMeOF Target")
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
if self.null_block:
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Configuring with null block device.")
|
2022-05-18 09:58:23 +00:00
|
|
|
nvme_list = ["/dev/nullb{}".format(x) for x in range(self.null_block)]
|
2018-10-14 19:51:14 +00:00
|
|
|
else:
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Configuring with NVMe drives.")
|
2022-10-04 10:39:29 +00:00
|
|
|
nvme_list = self.get_nvme_devices()
|
2022-05-18 09:58:23 +00:00
|
|
|
|
|
|
|
self.kernel_tgt_gen_subsystem_conf(nvme_list)
|
|
|
|
self.subsys_no = len(nvme_list)
|
2018-10-14 19:51:14 +00:00
|
|
|
|
2022-09-28 13:54:30 +00:00
|
|
|
self.nvmet_command(self.nvmet_bin, "clear")
|
|
|
|
self.nvmet_command(self.nvmet_bin, "restore kernel.conf")
|
2021-02-22 14:00:04 +00:00
|
|
|
|
|
|
|
if self.enable_adq:
|
|
|
|
self.adq_configure_tc()
|
|
|
|
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Done configuring kernel NVMeOF Target")
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
|
|
|
|
class SPDKTarget(Target):
|
2021-02-01 12:31:52 +00:00
|
|
|
def __init__(self, name, general_config, target_config):
|
2022-04-03 05:52:50 +00:00
|
|
|
super().__init__(name, general_config, target_config)
|
2021-02-01 12:31:52 +00:00
|
|
|
|
|
|
|
# Required fields
|
2021-02-23 12:08:57 +00:00
|
|
|
self.core_mask = target_config["core_mask"]
|
|
|
|
self.num_cores = self.get_num_cores(self.core_mask)
|
2019-12-06 09:19:42 +00:00
|
|
|
|
2021-02-01 12:31:52 +00:00
|
|
|
# Defaults
|
|
|
|
self.dif_insert_strip = False
|
|
|
|
self.null_block_dif_type = 0
|
|
|
|
self.num_shared_buffers = 4096
|
2022-01-26 07:55:05 +00:00
|
|
|
self.max_queue_depth = 128
|
2021-07-05 13:26:40 +00:00
|
|
|
self.bpf_proc = None
|
|
|
|
self.bpf_scripts = []
|
2022-03-18 14:29:39 +00:00
|
|
|
self.enable_dsa = False
|
2022-05-11 13:05:46 +00:00
|
|
|
self.scheduler_core_limit = None
|
2019-12-06 09:19:42 +00:00
|
|
|
|
2021-02-01 12:31:52 +00:00
|
|
|
if "num_shared_buffers" in target_config:
|
|
|
|
self.num_shared_buffers = target_config["num_shared_buffers"]
|
2022-01-26 07:55:05 +00:00
|
|
|
if "max_queue_depth" in target_config:
|
|
|
|
self.max_queue_depth = target_config["max_queue_depth"]
|
2021-02-01 12:31:52 +00:00
|
|
|
if "null_block_dif_type" in target_config:
|
|
|
|
self.null_block_dif_type = target_config["null_block_dif_type"]
|
|
|
|
if "dif_insert_strip" in target_config:
|
|
|
|
self.dif_insert_strip = target_config["dif_insert_strip"]
|
2021-07-05 13:26:40 +00:00
|
|
|
if "bpf_scripts" in target_config:
|
|
|
|
self.bpf_scripts = target_config["bpf_scripts"]
|
2022-03-18 14:29:39 +00:00
|
|
|
if "dsa_settings" in target_config:
|
|
|
|
self.enable_dsa = target_config["dsa_settings"]
|
2022-05-11 13:05:46 +00:00
|
|
|
if "scheduler_core_limit" in target_config:
|
|
|
|
self.scheduler_core_limit = target_config["scheduler_core_limit"]
|
2021-11-18 14:25:50 +00:00
|
|
|
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("====DSA settings:====")
|
|
|
|
self.log.info("DSA enabled: %s" % (self.enable_dsa))
|
2018-10-14 19:51:14 +00:00
|
|
|
|
2022-10-04 11:54:27 +00:00
|
|
|
def get_nvme_devices_count(self):
|
|
|
|
return len(self.get_nvme_devices())
|
|
|
|
|
|
|
|
def get_nvme_devices(self):
|
|
|
|
bdev_subsys_json_obj = json.loads(self.exec_cmd([os.path.join(self.spdk_dir, "scripts/gen_nvme.sh")]))
|
2022-10-04 12:11:56 +00:00
|
|
|
bdev_bdfs = []
|
|
|
|
for bdev in bdev_subsys_json_obj["config"]:
|
|
|
|
bdev_traddr = bdev["params"]["traddr"]
|
|
|
|
if bdev_traddr in self.nvme_blocklist:
|
|
|
|
continue
|
|
|
|
if len(self.nvme_allowlist) == 0:
|
|
|
|
bdev_bdfs.append(bdev_traddr)
|
|
|
|
if bdev_traddr in self.nvme_allowlist:
|
|
|
|
bdev_bdfs.append(bdev_traddr)
|
2022-10-04 11:54:27 +00:00
|
|
|
return bdev_bdfs
|
|
|
|
|
2022-04-03 06:13:29 +00:00
|
|
|
@staticmethod
|
|
|
|
def get_num_cores(core_mask):
|
2021-02-23 12:08:57 +00:00
|
|
|
if "0x" in core_mask:
|
|
|
|
return bin(int(core_mask, 16)).count("1")
|
|
|
|
else:
|
|
|
|
num_cores = 0
|
|
|
|
core_mask = core_mask.replace("[", "")
|
|
|
|
core_mask = core_mask.replace("]", "")
|
|
|
|
for i in core_mask.split(","):
|
|
|
|
if "-" in i:
|
|
|
|
x, y = i.split("-")
|
|
|
|
num_cores += len(range(int(x), int(y))) + 1
|
|
|
|
else:
|
|
|
|
num_cores += 1
|
|
|
|
return num_cores
|
|
|
|
|
2018-10-14 19:51:14 +00:00
|
|
|
def spdk_tgt_configure(self):
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Configuring SPDK NVMeOF target via RPC")
|
2018-10-14 19:51:14 +00:00
|
|
|
|
2021-09-30 17:13:38 +00:00
|
|
|
if self.enable_adq:
|
|
|
|
self.adq_configure_tc()
|
|
|
|
|
2022-01-26 07:55:05 +00:00
|
|
|
# Create transport layer
|
2020-08-24 15:53:16 +00:00
|
|
|
rpc.nvmf.nvmf_create_transport(self.client, trtype=self.transport,
|
|
|
|
num_shared_buffers=self.num_shared_buffers,
|
2022-01-26 07:55:05 +00:00
|
|
|
max_queue_depth=self.max_queue_depth,
|
2021-02-02 15:05:51 +00:00
|
|
|
dif_insert_or_strip=self.dif_insert_strip,
|
|
|
|
sock_priority=self.adq_priority)
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("SPDK NVMeOF transport layer:")
|
2021-09-21 13:21:31 +00:00
|
|
|
rpc_client.print_dict(rpc.nvmf.nvmf_get_transports(self.client))
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
if self.null_block:
|
2021-01-28 13:08:31 +00:00
|
|
|
self.spdk_tgt_add_nullblock(self.null_block)
|
|
|
|
self.spdk_tgt_add_subsystem_conf(self.nic_ips, self.null_block)
|
2018-10-14 19:51:14 +00:00
|
|
|
else:
|
2021-01-28 13:08:31 +00:00
|
|
|
self.spdk_tgt_add_nvme_conf()
|
|
|
|
self.spdk_tgt_add_subsystem_conf(self.nic_ips)
|
2021-02-22 14:00:04 +00:00
|
|
|
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Done configuring SPDK NVMeOF Target")
|
2021-09-30 17:13:38 +00:00
|
|
|
|
2020-08-21 16:47:56 +00:00
|
|
|
def spdk_tgt_add_nullblock(self, null_block_count):
|
2020-08-24 11:27:02 +00:00
|
|
|
md_size = 0
|
|
|
|
block_size = 4096
|
|
|
|
if self.null_block_dif_type != 0:
|
|
|
|
md_size = 128
|
|
|
|
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Adding null block bdevices to config via RPC")
|
2020-08-21 16:47:56 +00:00
|
|
|
for i in range(null_block_count):
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Setting bdev protection to :%s" % self.null_block_dif_type)
|
2020-08-24 11:27:02 +00:00
|
|
|
rpc.bdev.bdev_null_create(self.client, 102400, block_size + md_size, "Nvme{}n1".format(i),
|
|
|
|
dif_type=self.null_block_dif_type, md_size=md_size)
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("SPDK Bdevs configuration:")
|
2021-09-21 13:21:31 +00:00
|
|
|
rpc_client.print_dict(rpc.bdev.bdev_get_bdevs(self.client))
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
def spdk_tgt_add_nvme_conf(self, req_num_disks=None):
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Adding NVMe bdevs to config via RPC")
|
2018-10-14 19:51:14 +00:00
|
|
|
|
2022-10-04 11:54:27 +00:00
|
|
|
bdfs = self.get_nvme_devices()
|
2018-10-14 19:51:14 +00:00
|
|
|
bdfs = [b.replace(":", ".") for b in bdfs]
|
|
|
|
|
|
|
|
if req_num_disks:
|
|
|
|
if req_num_disks > len(bdfs):
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.error("ERROR: Requested number of disks is more than available %s" % len(bdfs))
|
2018-10-14 19:51:14 +00:00
|
|
|
sys.exit(1)
|
|
|
|
else:
|
|
|
|
bdfs = bdfs[0:req_num_disks]
|
|
|
|
|
|
|
|
for i, bdf in enumerate(bdfs):
|
2019-08-23 13:50:51 +00:00
|
|
|
rpc.bdev.bdev_nvme_attach_controller(self.client, name="Nvme%s" % i, trtype="PCIe", traddr=bdf)
|
2018-10-14 19:51:14 +00:00
|
|
|
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("SPDK Bdevs configuration:")
|
2021-09-21 13:21:31 +00:00
|
|
|
rpc_client.print_dict(rpc.bdev.bdev_get_bdevs(self.client))
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
def spdk_tgt_add_subsystem_conf(self, ips=None, req_num_disks=None):
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Adding subsystems to config")
|
2018-10-14 19:51:14 +00:00
|
|
|
if not req_num_disks:
|
2022-10-04 11:54:27 +00:00
|
|
|
req_num_disks = self.get_nvme_devices_count()
|
2018-10-14 19:51:14 +00:00
|
|
|
|
2022-05-18 09:58:23 +00:00
|
|
|
for ip, bdev_num in self.spread_bdevs(req_num_disks):
|
|
|
|
port = str(4420 + bdev_num)
|
|
|
|
nqn = "nqn.2018-09.io.spdk:cnode%s" % bdev_num
|
|
|
|
serial = "SPDK00%s" % bdev_num
|
|
|
|
bdev_name = "Nvme%sn1" % bdev_num
|
|
|
|
|
|
|
|
rpc.nvmf.nvmf_create_subsystem(self.client, nqn, serial,
|
|
|
|
allow_any_host=True, max_namespaces=8)
|
|
|
|
rpc.nvmf.nvmf_subsystem_add_ns(self.client, nqn, bdev_name)
|
2022-05-20 08:27:06 +00:00
|
|
|
for nqn_name in [nqn, "discovery"]:
|
|
|
|
rpc.nvmf.nvmf_subsystem_add_listener(self.client,
|
|
|
|
nqn=nqn_name,
|
|
|
|
trtype=self.transport,
|
|
|
|
traddr=ip,
|
|
|
|
trsvcid=port,
|
|
|
|
adrfam="ipv4")
|
2022-09-28 11:07:30 +00:00
|
|
|
self.subsystem_info_list.append((port, nqn, ip))
|
2022-05-18 09:58:23 +00:00
|
|
|
self.subsys_no = len(self.subsystem_info_list)
|
|
|
|
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("SPDK NVMeOF subsystem configuration:")
|
2021-09-21 13:21:31 +00:00
|
|
|
rpc_client.print_dict(rpc.nvmf.nvmf_get_subsystems(self.client))
|
2018-10-14 19:51:14 +00:00
|
|
|
|
2021-07-05 13:26:40 +00:00
|
|
|
def bpf_start(self):
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Starting BPF Trace scripts: %s" % self.bpf_scripts)
|
2021-07-05 13:26:40 +00:00
|
|
|
bpf_script = os.path.join(self.spdk_dir, "scripts/bpftrace.sh")
|
|
|
|
bpf_traces = [os.path.join(self.spdk_dir, "scripts/bpf", trace) for trace in self.bpf_scripts]
|
|
|
|
results_path = os.path.join(self.results_dir, "bpf_traces.txt")
|
|
|
|
|
|
|
|
with open(self.pid, "r") as fh:
|
|
|
|
nvmf_pid = str(fh.readline())
|
|
|
|
|
|
|
|
cmd = [bpf_script, nvmf_pid, *bpf_traces]
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info(cmd)
|
2021-07-05 13:26:40 +00:00
|
|
|
self.bpf_proc = subprocess.Popen(cmd, env={"BPF_OUTFILE": results_path})
|
|
|
|
|
2018-10-14 19:51:14 +00:00
|
|
|
def tgt_start(self):
|
2020-05-22 09:24:53 +00:00
|
|
|
if self.null_block:
|
|
|
|
self.subsys_no = 1
|
2020-07-23 10:54:34 +00:00
|
|
|
else:
|
2022-10-04 11:54:27 +00:00
|
|
|
self.subsys_no = self.get_nvme_devices_count()
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Starting SPDK NVMeOF Target process")
|
2021-01-22 09:46:55 +00:00
|
|
|
nvmf_app_path = os.path.join(self.spdk_dir, "build/bin/nvmf_tgt")
|
2021-02-23 12:08:57 +00:00
|
|
|
proc = subprocess.Popen([nvmf_app_path, "--wait-for-rpc", "-m", self.core_mask])
|
2018-10-14 19:51:14 +00:00
|
|
|
self.pid = os.path.join(self.spdk_dir, "nvmf.pid")
|
|
|
|
|
|
|
|
with open(self.pid, "w") as fh:
|
|
|
|
fh.write(str(proc.pid))
|
|
|
|
self.nvmf_proc = proc
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("SPDK NVMeOF Target PID=%s" % self.pid)
|
|
|
|
self.log.info("Waiting for spdk to initialize...")
|
2018-10-14 19:51:14 +00:00
|
|
|
while True:
|
|
|
|
if os.path.exists("/var/tmp/spdk.sock"):
|
|
|
|
break
|
|
|
|
time.sleep(1)
|
2021-09-21 13:21:31 +00:00
|
|
|
self.client = rpc_client.JSONRPCClient("/var/tmp/spdk.sock")
|
2018-10-14 19:51:14 +00:00
|
|
|
|
2022-09-05 08:49:14 +00:00
|
|
|
rpc.sock.sock_set_default_impl(self.client, impl_name="posix")
|
|
|
|
|
2020-11-26 12:58:41 +00:00
|
|
|
if self.enable_zcopy:
|
|
|
|
rpc.sock.sock_impl_set_options(self.client, impl_name="posix",
|
2021-07-16 06:17:24 +00:00
|
|
|
enable_zerocopy_send_server=True)
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Target socket options:")
|
2021-09-21 13:21:31 +00:00
|
|
|
rpc_client.print_dict(rpc.sock.sock_impl_get_options(self.client, impl_name="posix"))
|
2020-11-26 12:58:41 +00:00
|
|
|
|
2021-02-02 15:05:51 +00:00
|
|
|
if self.enable_adq:
|
|
|
|
rpc.sock.sock_impl_set_options(self.client, impl_name="posix", enable_placement_id=1)
|
|
|
|
rpc.bdev.bdev_nvme_set_options(self.client, timeout_us=0, action_on_timeout=None,
|
|
|
|
nvme_adminq_poll_period_us=100000, retry_count=4)
|
|
|
|
|
2022-03-18 14:29:39 +00:00
|
|
|
if self.enable_dsa:
|
2022-08-08 21:43:24 +00:00
|
|
|
rpc.dsa.dsa_scan_accel_module(self.client, config_kernel_mode=None)
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Target DSA accel module enabled")
|
2020-12-04 08:56:27 +00:00
|
|
|
|
2022-05-11 13:05:46 +00:00
|
|
|
rpc.app.framework_set_scheduler(self.client, name=self.scheduler_name, core_limit=self.scheduler_core_limit)
|
2020-11-26 12:58:41 +00:00
|
|
|
rpc.framework_start_init(self.client)
|
2021-07-05 13:26:40 +00:00
|
|
|
|
|
|
|
if self.bpf_scripts:
|
|
|
|
self.bpf_start()
|
|
|
|
|
2018-10-14 19:51:14 +00:00
|
|
|
self.spdk_tgt_configure()
|
|
|
|
|
2022-01-18 09:33:06 +00:00
|
|
|
def stop(self):
|
2021-07-05 13:26:40 +00:00
|
|
|
if self.bpf_proc:
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Stopping BPF Trace script")
|
2021-07-05 13:26:40 +00:00
|
|
|
self.bpf_proc.terminate()
|
|
|
|
self.bpf_proc.wait()
|
|
|
|
|
2018-10-14 19:51:14 +00:00
|
|
|
if hasattr(self, "nvmf_proc"):
|
|
|
|
try:
|
|
|
|
self.nvmf_proc.terminate()
|
2022-07-21 07:18:24 +00:00
|
|
|
self.nvmf_proc.wait(timeout=30)
|
2018-10-14 19:51:14 +00:00
|
|
|
except Exception as e:
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Failed to terminate SPDK Target process. Sending SIGKILL.")
|
|
|
|
self.log.info(e)
|
2018-10-14 19:51:14 +00:00
|
|
|
self.nvmf_proc.kill()
|
|
|
|
self.nvmf_proc.communicate()
|
2022-07-21 07:18:24 +00:00
|
|
|
# Try to clean up RPC socket files if they were not removed
|
|
|
|
# because of using 'kill'
|
|
|
|
try:
|
|
|
|
os.remove("/var/tmp/spdk.sock")
|
|
|
|
os.remove("/var/tmp/spdk.sock.lock")
|
|
|
|
except FileNotFoundError:
|
|
|
|
pass
|
2022-10-04 15:13:59 +00:00
|
|
|
self.restore_settings()
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
|
|
|
|
class KernelInitiator(Initiator):
|
2021-02-01 12:31:52 +00:00
|
|
|
def __init__(self, name, general_config, initiator_config):
|
2022-04-03 05:52:50 +00:00
|
|
|
super().__init__(name, general_config, initiator_config)
|
2018-10-14 19:51:14 +00:00
|
|
|
|
2021-02-01 12:31:52 +00:00
|
|
|
# Defaults
|
2019-12-12 10:18:36 +00:00
|
|
|
self.extra_params = ""
|
2021-06-01 12:24:31 +00:00
|
|
|
self.ioengine = "libaio"
|
2021-02-01 12:31:52 +00:00
|
|
|
|
2022-10-06 10:48:24 +00:00
|
|
|
if "num_cores" in initiator_config:
|
|
|
|
self.num_cores = initiator_config["num_cores"]
|
|
|
|
|
2021-02-01 12:31:52 +00:00
|
|
|
if "extra_params" in initiator_config:
|
|
|
|
self.extra_params = initiator_config["extra_params"]
|
2019-12-12 10:18:36 +00:00
|
|
|
|
2021-06-01 12:24:31 +00:00
|
|
|
if "kernel_engine" in initiator_config:
|
|
|
|
self.ioengine = initiator_config["kernel_engine"]
|
|
|
|
if "io_uring" in self.ioengine:
|
|
|
|
self.extra_params = "--nr-poll-queues=8"
|
|
|
|
|
|
|
|
def get_connected_nvme_list(self):
|
|
|
|
json_obj = json.loads(self.exec_cmd(["sudo", "nvme", "list", "-o", "json"]))
|
|
|
|
nvme_list = [os.path.basename(x["DevicePath"]) for x in json_obj["Devices"]
|
|
|
|
if "SPDK" in x["ModelNumber"] or "Linux" in x["ModelNumber"]]
|
|
|
|
return nvme_list
|
|
|
|
|
2022-09-28 13:26:40 +00:00
|
|
|
def init_connect(self):
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Below connection attempts may result in error messages, this is expected!")
|
2021-02-09 15:17:26 +00:00
|
|
|
for subsystem in self.subsystem_info_list:
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Trying to connect %s %s %s" % subsystem)
|
2021-02-03 12:08:15 +00:00
|
|
|
self.exec_cmd(["sudo", self.nvmecli_bin, "connect", "-t", self.transport,
|
|
|
|
"-s", subsystem[0], "-n", subsystem[1], "-a", subsystem[2], self.extra_params])
|
2018-10-14 19:51:14 +00:00
|
|
|
time.sleep(2)
|
|
|
|
|
2021-06-01 12:24:31 +00:00
|
|
|
if "io_uring" in self.ioengine:
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Setting block layer settings for io_uring.")
|
2021-06-01 12:24:31 +00:00
|
|
|
|
|
|
|
# TODO: io_poll=1 and io_poll_delay=-1 params not set here, because
|
|
|
|
# apparently it's not possible for connected subsystems.
|
|
|
|
# Results in "error: Invalid argument"
|
|
|
|
block_sysfs_settings = {
|
|
|
|
"iostats": "0",
|
|
|
|
"rq_affinity": "0",
|
|
|
|
"nomerges": "2"
|
|
|
|
}
|
|
|
|
|
|
|
|
for disk in self.get_connected_nvme_list():
|
|
|
|
sysfs = os.path.join("/sys/block", disk, "queue")
|
|
|
|
for k, v in block_sysfs_settings.items():
|
|
|
|
sysfs_opt_path = os.path.join(sysfs, k)
|
|
|
|
try:
|
|
|
|
self.exec_cmd(["sudo", "bash", "-c", "echo %s > %s" % (v, sysfs_opt_path)], stderr_redirect=True)
|
2022-11-21 09:37:56 +00:00
|
|
|
except CalledProcessError as e:
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.warning("Warning: command %s failed due to error %s. %s was not set!" % (e.cmd, e.output, v))
|
2021-06-01 12:24:31 +00:00
|
|
|
finally:
|
|
|
|
_ = self.exec_cmd(["sudo", "cat", "%s" % (sysfs_opt_path)])
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("%s=%s" % (sysfs_opt_path, _))
|
2021-06-01 12:24:31 +00:00
|
|
|
|
2022-09-28 13:26:40 +00:00
|
|
|
def init_disconnect(self):
|
2021-02-09 15:17:26 +00:00
|
|
|
for subsystem in self.subsystem_info_list:
|
2021-02-03 12:08:15 +00:00
|
|
|
self.exec_cmd(["sudo", self.nvmecli_bin, "disconnect", "-n", subsystem[1]])
|
2018-10-14 19:51:14 +00:00
|
|
|
time.sleep(1)
|
|
|
|
|
2022-06-30 16:45:58 +00:00
|
|
|
def get_nvme_subsystem_numa(self, dev_name):
|
|
|
|
# Remove two last characters to get controller name instead of subsystem name
|
|
|
|
nvme_ctrl = os.path.basename(dev_name)[:-2]
|
|
|
|
remote_nvme_ip = re.search(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})',
|
|
|
|
self.exec_cmd(["cat", "/sys/class/nvme/%s/address" % nvme_ctrl]))
|
2022-09-23 06:58:36 +00:00
|
|
|
return self.get_route_nic_numa(remote_nvme_ip.group(0))
|
2022-06-30 16:45:58 +00:00
|
|
|
|
2022-06-30 17:04:44 +00:00
|
|
|
def gen_fio_filename_conf(self, threads, io_depth, num_jobs=1, offset=False, offset_inc=0):
|
2022-06-30 16:45:58 +00:00
|
|
|
# Generate connected nvme devices names and sort them by used NIC numa node
|
|
|
|
# to allow better grouping when splitting into fio sections.
|
2021-07-12 12:52:43 +00:00
|
|
|
nvme_list = [os.path.join("/dev", nvme) for nvme in self.get_connected_nvme_list()]
|
2022-06-30 16:45:58 +00:00
|
|
|
nvme_numas = [self.get_nvme_subsystem_numa(x) for x in nvme_list]
|
|
|
|
nvme_list = [x for _, x in sorted(zip(nvme_numas, nvme_list))]
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
filename_section = ""
|
2020-05-07 10:37:17 +00:00
|
|
|
nvme_per_split = int(len(nvme_list) / len(threads))
|
|
|
|
remainder = len(nvme_list) % len(threads)
|
2020-05-22 09:24:53 +00:00
|
|
|
iterator = iter(nvme_list)
|
2020-03-06 10:02:28 +00:00
|
|
|
result = []
|
2020-04-03 14:30:04 +00:00
|
|
|
for i in range(len(threads)):
|
2020-03-06 10:02:28 +00:00
|
|
|
result.append([])
|
2021-07-02 08:27:15 +00:00
|
|
|
for _ in range(nvme_per_split):
|
2020-03-06 10:02:28 +00:00
|
|
|
result[i].append(next(iterator))
|
|
|
|
if remainder:
|
|
|
|
result[i].append(next(iterator))
|
|
|
|
remainder -= 1
|
|
|
|
for i, r in enumerate(result):
|
|
|
|
header = "[filename%s]" % i
|
2020-05-22 09:24:53 +00:00
|
|
|
disks = "\n".join(["filename=%s" % x for x in r])
|
2020-06-03 08:20:16 +00:00
|
|
|
job_section_qd = round((io_depth * len(r)) / num_jobs)
|
2020-07-22 09:10:03 +00:00
|
|
|
if job_section_qd == 0:
|
|
|
|
job_section_qd = 1
|
2020-06-03 08:20:16 +00:00
|
|
|
iodepth = "iodepth=%s" % job_section_qd
|
2022-06-30 17:04:44 +00:00
|
|
|
|
|
|
|
offset_section = ""
|
|
|
|
if offset:
|
|
|
|
offset_section = self.gen_fio_offset_section(offset_inc, num_jobs)
|
|
|
|
|
2022-06-30 16:45:58 +00:00
|
|
|
numa_opts = self.gen_fio_numa_section(r)
|
|
|
|
|
|
|
|
filename_section = "\n".join([filename_section, header, disks, iodepth, numa_opts, offset_section, ""])
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
return filename_section
|
|
|
|
|
|
|
|
|
|
|
|
class SPDKInitiator(Initiator):
|
2021-02-01 12:31:52 +00:00
|
|
|
def __init__(self, name, general_config, initiator_config):
|
2022-04-03 05:52:50 +00:00
|
|
|
super().__init__(name, general_config, initiator_config)
|
2019-12-06 09:19:42 +00:00
|
|
|
|
2021-03-08 14:31:05 +00:00
|
|
|
if "skip_spdk_install" not in general_config or general_config["skip_spdk_install"] is False:
|
2021-07-02 08:27:15 +00:00
|
|
|
self.install_spdk()
|
2021-03-08 14:31:05 +00:00
|
|
|
|
2021-11-18 14:25:50 +00:00
|
|
|
# Optional fields
|
|
|
|
self.enable_data_digest = False
|
|
|
|
if "enable_data_digest" in initiator_config:
|
|
|
|
self.enable_data_digest = initiator_config["enable_data_digest"]
|
2022-10-06 10:48:24 +00:00
|
|
|
if "num_cores" in initiator_config:
|
|
|
|
self.num_cores = initiator_config["num_cores"]
|
2021-11-18 14:25:50 +00:00
|
|
|
|
2021-07-02 08:27:15 +00:00
|
|
|
def install_spdk(self):
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("Using fio binary %s" % self.fio_bin)
|
2021-02-03 12:08:15 +00:00
|
|
|
self.exec_cmd(["git", "-C", self.spdk_dir, "submodule", "update", "--init"])
|
|
|
|
self.exec_cmd(["git", "-C", self.spdk_dir, "clean", "-ffdx"])
|
|
|
|
self.exec_cmd(["cd", self.spdk_dir, "&&", "./configure", "--with-rdma", "--with-fio=%s" % os.path.dirname(self.fio_bin)])
|
|
|
|
self.exec_cmd(["make", "-C", self.spdk_dir, "clean"])
|
|
|
|
self.exec_cmd(["make", "-C", self.spdk_dir, "-j$(($(nproc)*2))"])
|
2018-10-14 19:51:14 +00:00
|
|
|
|
2022-09-27 14:44:20 +00:00
|
|
|
self.log.info("SPDK built")
|
2021-02-03 12:08:15 +00:00
|
|
|
self.exec_cmd(["sudo", "%s/scripts/setup.sh" % self.spdk_dir])
|
2018-10-14 19:51:14 +00:00
|
|
|
|
2022-09-28 13:26:40 +00:00
|
|
|
def init_connect(self):
|
|
|
|
# Not a real "connect" like when doing "nvme connect" because SPDK's fio
|
|
|
|
# bdev plugin initiates connection just before starting IO traffic.
|
|
|
|
# This is just to have a "init_connect" equivalent of the same function
|
|
|
|
# from KernelInitiator class.
|
|
|
|
# Just prepare bdev.conf JSON file for later use and consider it
|
|
|
|
# "making a connection".
|
|
|
|
bdev_conf = self.gen_spdk_bdev_conf(self.subsystem_info_list)
|
|
|
|
self.exec_cmd(["echo", "'%s'" % bdev_conf, ">", "%s/bdev.conf" % self.spdk_dir])
|
|
|
|
|
|
|
|
def init_disconnect(self):
|
|
|
|
# SPDK Initiator does not need to explicity disconnect as this gets done
|
|
|
|
# after fio bdev plugin finishes IO.
|
|
|
|
pass
|
|
|
|
|
2018-10-14 19:51:14 +00:00
|
|
|
def gen_spdk_bdev_conf(self, remote_subsystem_list):
|
2020-10-19 11:20:57 +00:00
|
|
|
bdev_cfg_section = {
|
|
|
|
"subsystems": [
|
|
|
|
{
|
|
|
|
"subsystem": "bdev",
|
|
|
|
"config": []
|
|
|
|
}
|
|
|
|
]
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, subsys in enumerate(remote_subsystem_list):
|
|
|
|
sub_port, sub_nqn, sub_addr = map(lambda x: str(x), subsys)
|
|
|
|
nvme_ctrl = {
|
|
|
|
"method": "bdev_nvme_attach_controller",
|
|
|
|
"params": {
|
|
|
|
"name": "Nvme{}".format(i),
|
|
|
|
"trtype": self.transport,
|
|
|
|
"traddr": sub_addr,
|
|
|
|
"trsvcid": sub_port,
|
|
|
|
"subnqn": sub_nqn,
|
|
|
|
"adrfam": "IPv4"
|
|
|
|
}
|
|
|
|
}
|
2021-02-02 15:05:51 +00:00
|
|
|
|
|
|
|
if self.enable_adq:
|
|
|
|
nvme_ctrl["params"].update({"priority": "1"})
|
|
|
|
|
2021-11-18 14:25:50 +00:00
|
|
|
if self.enable_data_digest:
|
|
|
|
nvme_ctrl["params"].update({"ddgst": self.enable_data_digest})
|
|
|
|
|
2020-10-19 11:20:57 +00:00
|
|
|
bdev_cfg_section["subsystems"][0]["config"].append(nvme_ctrl)
|
|
|
|
|
|
|
|
return json.dumps(bdev_cfg_section, indent=2)
|
2018-10-14 19:51:14 +00:00
|
|
|
|
2022-06-30 17:04:44 +00:00
|
|
|
def gen_fio_filename_conf(self, subsystems, threads, io_depth, num_jobs=1, offset=False, offset_inc=0):
|
2018-10-14 19:51:14 +00:00
|
|
|
filename_section = ""
|
2020-04-03 14:30:04 +00:00
|
|
|
if len(threads) >= len(subsystems):
|
|
|
|
threads = range(0, len(subsystems))
|
2022-06-30 16:45:58 +00:00
|
|
|
|
|
|
|
# Generate expected NVMe Bdev names and sort them by used NIC numa node
|
|
|
|
# to allow better grouping when splitting into fio sections.
|
2020-04-03 14:30:04 +00:00
|
|
|
filenames = ["Nvme%sn1" % x for x in range(0, len(subsystems))]
|
2022-06-30 16:45:58 +00:00
|
|
|
filename_numas = [self.get_nvme_subsystem_numa(x) for x in filenames]
|
|
|
|
filenames = [x for _, x in sorted(zip(filename_numas, filenames))]
|
|
|
|
|
2020-04-03 14:30:04 +00:00
|
|
|
nvme_per_split = int(len(subsystems) / len(threads))
|
|
|
|
remainder = len(subsystems) % len(threads)
|
2020-02-24 09:34:22 +00:00
|
|
|
iterator = iter(filenames)
|
|
|
|
result = []
|
2020-04-03 14:30:04 +00:00
|
|
|
for i in range(len(threads)):
|
2020-02-24 09:34:22 +00:00
|
|
|
result.append([])
|
2021-07-02 08:27:15 +00:00
|
|
|
for _ in range(nvme_per_split):
|
2020-02-24 09:34:22 +00:00
|
|
|
result[i].append(next(iterator))
|
|
|
|
if remainder:
|
|
|
|
result[i].append(next(iterator))
|
|
|
|
remainder -= 1
|
|
|
|
for i, r in enumerate(result):
|
|
|
|
header = "[filename%s]" % i
|
|
|
|
disks = "\n".join(["filename=%s" % x for x in r])
|
2020-06-03 08:20:16 +00:00
|
|
|
job_section_qd = round((io_depth * len(r)) / num_jobs)
|
2020-07-22 09:10:03 +00:00
|
|
|
if job_section_qd == 0:
|
|
|
|
job_section_qd = 1
|
2020-06-03 08:20:16 +00:00
|
|
|
iodepth = "iodepth=%s" % job_section_qd
|
2022-06-30 17:04:44 +00:00
|
|
|
|
|
|
|
offset_section = ""
|
|
|
|
if offset:
|
|
|
|
offset_section = self.gen_fio_offset_section(offset_inc, num_jobs)
|
|
|
|
|
2022-06-30 16:45:58 +00:00
|
|
|
numa_opts = self.gen_fio_numa_section(r)
|
|
|
|
|
|
|
|
filename_section = "\n".join([filename_section, header, disks, iodepth, numa_opts, offset_section, ""])
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
return filename_section
|
|
|
|
|
2022-06-30 16:45:58 +00:00
|
|
|
def get_nvme_subsystem_numa(self, bdev_name):
|
|
|
|
bdev_conf_json_obj = json.loads(self.exec_cmd(["cat", "%s/bdev.conf" % self.spdk_dir]))
|
|
|
|
bdev_conf_json_obj = bdev_conf_json_obj["subsystems"][0]["config"]
|
|
|
|
|
|
|
|
# Remove two last characters to get controller name instead of subsystem name
|
|
|
|
nvme_ctrl = bdev_name[:-2]
|
|
|
|
remote_nvme_ip = list(filter(lambda x: x["params"]["name"] == "%s" % nvme_ctrl, bdev_conf_json_obj))[0]["params"]["traddr"]
|
|
|
|
return self.get_route_nic_numa(remote_nvme_ip)
|
|
|
|
|
2018-10-14 19:51:14 +00:00
|
|
|
|
|
|
|
if __name__ == "__main__":
|
2021-07-02 07:27:05 +00:00
|
|
|
script_full_dir = os.path.dirname(os.path.realpath(__file__))
|
|
|
|
default_config_file_path = os.path.relpath(os.path.join(script_full_dir, "config.json"))
|
2018-10-14 19:51:14 +00:00
|
|
|
|
2021-07-02 07:27:05 +00:00
|
|
|
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
|
|
|
parser.add_argument('-c', '--config', type=str, default=default_config_file_path,
|
|
|
|
help='Configuration file.')
|
|
|
|
parser.add_argument('-r', '--results', type=str, default='/tmp/results',
|
|
|
|
help='Results directory.')
|
|
|
|
parser.add_argument('-s', '--csv-filename', type=str, default='nvmf_results.csv',
|
|
|
|
help='CSV results filename.')
|
2022-10-04 12:11:56 +00:00
|
|
|
parser.add_argument('-f', '--force', default=False, action='store_true',
|
|
|
|
dest='force', help="""Force script to continue and try to use all
|
|
|
|
available NVMe devices during test.
|
|
|
|
WARNING: Might result in data loss on used NVMe drives""")
|
2019-05-16 11:56:07 +00:00
|
|
|
|
2021-07-02 07:27:05 +00:00
|
|
|
args = parser.parse_args()
|
|
|
|
|
2022-09-27 14:44:20 +00:00
|
|
|
logging.basicConfig(level=logging.INFO,
|
|
|
|
format='[%(name)s:%(funcName)s:%(lineno)d] %(message)s')
|
|
|
|
|
|
|
|
logging.info("Using config file: %s" % args.config)
|
2021-07-02 07:27:05 +00:00
|
|
|
with open(args.config, "r") as config:
|
2018-10-14 19:51:14 +00:00
|
|
|
data = json.load(config)
|
|
|
|
|
|
|
|
initiators = []
|
|
|
|
fio_cases = []
|
|
|
|
|
2021-02-01 12:31:52 +00:00
|
|
|
general_config = data["general"]
|
|
|
|
target_config = data["target"]
|
|
|
|
initiator_configs = [data[x] for x in data.keys() if "initiator" in x]
|
|
|
|
|
2022-10-04 12:11:56 +00:00
|
|
|
if "null_block_devices" not in data["target"] and \
|
|
|
|
(args.force is False and
|
|
|
|
"allowlist" not in data["target"] and
|
|
|
|
"blocklist" not in data["target"]):
|
|
|
|
# TODO: Also check if allowlist or blocklist are not empty.
|
|
|
|
logging.warning("""WARNING: This script requires allowlist and blocklist to be defined.
|
|
|
|
You can choose to use all available NVMe drives on your system, which may potentially
|
|
|
|
lead to data loss. If you wish to proceed with all attached NVMes, use "-f" option.""")
|
|
|
|
exit(1)
|
|
|
|
|
2018-10-14 19:51:14 +00:00
|
|
|
for k, v in data.items():
|
|
|
|
if "target" in k:
|
2021-07-05 13:26:40 +00:00
|
|
|
v.update({"results_dir": args.results})
|
2018-10-14 19:51:14 +00:00
|
|
|
if data[k]["mode"] == "spdk":
|
2021-02-01 12:31:52 +00:00
|
|
|
target_obj = SPDKTarget(k, data["general"], v)
|
2018-10-14 19:51:14 +00:00
|
|
|
elif data[k]["mode"] == "kernel":
|
2021-02-01 12:31:52 +00:00
|
|
|
target_obj = KernelTarget(k, data["general"], v)
|
2018-10-14 19:51:14 +00:00
|
|
|
elif "initiator" in k:
|
|
|
|
if data[k]["mode"] == "spdk":
|
2021-02-01 12:31:52 +00:00
|
|
|
init_obj = SPDKInitiator(k, data["general"], v)
|
2018-10-14 19:51:14 +00:00
|
|
|
elif data[k]["mode"] == "kernel":
|
2021-02-01 12:31:52 +00:00
|
|
|
init_obj = KernelInitiator(k, data["general"], v)
|
2018-10-14 19:51:14 +00:00
|
|
|
initiators.append(init_obj)
|
|
|
|
elif "fio" in k:
|
|
|
|
fio_workloads = itertools.product(data[k]["bs"],
|
|
|
|
data[k]["qd"],
|
|
|
|
data[k]["rw"])
|
|
|
|
|
|
|
|
fio_run_time = data[k]["run_time"]
|
|
|
|
fio_ramp_time = data[k]["ramp_time"]
|
|
|
|
fio_rw_mix_read = data[k]["rwmixread"]
|
|
|
|
fio_run_num = data[k]["run_num"] if "run_num" in data[k].keys() else None
|
|
|
|
fio_num_jobs = data[k]["num_jobs"] if "num_jobs" in data[k].keys() else None
|
2021-05-25 09:09:42 +00:00
|
|
|
|
|
|
|
fio_rate_iops = 0
|
|
|
|
if "rate_iops" in data[k]:
|
|
|
|
fio_rate_iops = data[k]["rate_iops"]
|
2022-06-30 17:04:44 +00:00
|
|
|
|
|
|
|
fio_offset = False
|
|
|
|
if "offset" in data[k]:
|
|
|
|
fio_offset = data[k]["offset"]
|
|
|
|
fio_offset_inc = 0
|
|
|
|
if "offset_inc" in data[k]:
|
|
|
|
fio_offset_inc = data[k]["offset_inc"]
|
2018-10-14 19:51:14 +00:00
|
|
|
else:
|
|
|
|
continue
|
|
|
|
|
2021-01-22 09:21:44 +00:00
|
|
|
try:
|
2021-07-02 07:27:05 +00:00
|
|
|
os.mkdir(args.results)
|
2021-01-22 09:21:44 +00:00
|
|
|
except FileExistsError:
|
|
|
|
pass
|
|
|
|
|
2022-05-18 09:58:00 +00:00
|
|
|
for i in initiators:
|
|
|
|
target_obj.initiator_info.append(
|
|
|
|
{"name": i.name, "target_nic_ips": i.target_nic_ips, "initiator_nic_ips": i.nic_ips}
|
|
|
|
)
|
|
|
|
|
2022-01-18 09:33:06 +00:00
|
|
|
# TODO: This try block is definietly too large. Need to break this up into separate
|
|
|
|
# logical blocks to reduce size.
|
|
|
|
try:
|
|
|
|
target_obj.tgt_start()
|
2021-02-09 15:17:26 +00:00
|
|
|
|
2018-10-14 19:51:14 +00:00
|
|
|
for i in initiators:
|
2022-09-28 11:14:52 +00:00
|
|
|
i.match_subsystems(target_obj.subsystem_info_list)
|
2022-01-18 09:33:06 +00:00
|
|
|
if i.enable_adq:
|
|
|
|
i.adq_configure_tc()
|
|
|
|
|
|
|
|
# Poor mans threading
|
|
|
|
# Run FIO tests
|
|
|
|
for block_size, io_depth, rw in fio_workloads:
|
|
|
|
configs = []
|
|
|
|
for i in initiators:
|
2022-09-28 13:26:40 +00:00
|
|
|
i.init_connect()
|
2022-01-18 09:33:06 +00:00
|
|
|
cfg = i.gen_fio_config(rw, fio_rw_mix_read, block_size, io_depth, target_obj.subsys_no,
|
2022-06-30 17:04:44 +00:00
|
|
|
fio_num_jobs, fio_ramp_time, fio_run_time, fio_rate_iops,
|
|
|
|
fio_offset, fio_offset_inc)
|
2022-01-18 09:33:06 +00:00
|
|
|
configs.append(cfg)
|
|
|
|
|
2022-11-09 12:21:33 +00:00
|
|
|
for run_no in range(1, fio_run_num+1):
|
|
|
|
threads = []
|
|
|
|
power_daemon = None
|
2022-11-16 11:55:47 +00:00
|
|
|
measurements_prefix = "%s_%s_%s_m_%s_run_%s" % (block_size, io_depth, rw, fio_rw_mix_read, run_no)
|
2022-11-09 12:21:33 +00:00
|
|
|
|
|
|
|
for i, cfg in zip(initiators, configs):
|
|
|
|
t = threading.Thread(target=i.run_fio, args=(cfg, run_no))
|
|
|
|
threads.append(t)
|
|
|
|
if target_obj.enable_sar:
|
2022-11-16 11:55:47 +00:00
|
|
|
sar_file_prefix = measurements_prefix + "_sar"
|
2022-11-09 12:21:33 +00:00
|
|
|
t = threading.Thread(target=target_obj.measure_sar, args=(args.results, sar_file_prefix, fio_ramp_time, fio_run_time))
|
|
|
|
threads.append(t)
|
|
|
|
|
|
|
|
if target_obj.enable_pcm:
|
2022-11-16 11:55:47 +00:00
|
|
|
pcm_fnames = ["%s_%s.csv" % (measurements_prefix, x) for x in ["pcm_cpu", "pcm_memory", "pcm_power"]]
|
2022-11-09 12:21:33 +00:00
|
|
|
|
|
|
|
pcm_cpu_t = threading.Thread(target=target_obj.measure_pcm,
|
|
|
|
args=(args.results, pcm_fnames[0], fio_ramp_time, fio_run_time))
|
|
|
|
pcm_mem_t = threading.Thread(target=target_obj.measure_pcm_memory,
|
|
|
|
args=(args.results, pcm_fnames[1], fio_ramp_time, fio_run_time))
|
|
|
|
pcm_pow_t = threading.Thread(target=target_obj.measure_pcm_power,
|
|
|
|
args=(args.results, pcm_fnames[2], fio_ramp_time, fio_run_time))
|
|
|
|
|
|
|
|
threads.append(pcm_cpu_t)
|
|
|
|
threads.append(pcm_mem_t)
|
|
|
|
threads.append(pcm_pow_t)
|
|
|
|
|
|
|
|
if target_obj.enable_bw:
|
2022-11-16 11:55:47 +00:00
|
|
|
bandwidth_file_name = measurements_prefix + "_bandwidth.csv"
|
2022-11-09 12:21:33 +00:00
|
|
|
t = threading.Thread(target=target_obj.measure_network_bandwidth,
|
|
|
|
args=(args.results, bandwidth_file_name, fio_ramp_time, fio_run_time))
|
|
|
|
threads.append(t)
|
|
|
|
|
|
|
|
if target_obj.enable_dpdk_memory:
|
2022-11-16 11:55:47 +00:00
|
|
|
dpdk_mem_file_name = measurements_prefix + "_dpdk_mem.txt"
|
2022-11-10 09:30:00 +00:00
|
|
|
t = threading.Thread(target=target_obj.measure_dpdk_memory, args=(args.results, dpdk_mem_file_name, fio_ramp_time))
|
2022-11-09 12:21:33 +00:00
|
|
|
threads.append(t)
|
|
|
|
|
|
|
|
if target_obj.enable_pm:
|
|
|
|
power_daemon = threading.Thread(target=target_obj.measure_power,
|
2022-11-16 11:55:47 +00:00
|
|
|
args=(args.results, measurements_prefix, script_full_dir,
|
|
|
|
fio_ramp_time, fio_run_time))
|
2022-11-09 12:21:33 +00:00
|
|
|
threads.append(power_daemon)
|
|
|
|
|
|
|
|
if target_obj.enable_adq:
|
|
|
|
ethtool_thread = threading.Thread(target=target_obj.ethtool_after_fio_ramp, args=(fio_ramp_time,))
|
|
|
|
threads.append(ethtool_thread)
|
|
|
|
|
|
|
|
for t in threads:
|
|
|
|
t.start()
|
|
|
|
for t in threads:
|
|
|
|
t.join()
|
2022-01-18 09:33:06 +00:00
|
|
|
|
|
|
|
for i in initiators:
|
2022-09-28 13:26:40 +00:00
|
|
|
i.init_disconnect()
|
2022-01-18 09:33:06 +00:00
|
|
|
i.copy_result_files(args.results)
|
2022-11-09 12:21:33 +00:00
|
|
|
try:
|
|
|
|
parse_results(args.results, args.csv_filename)
|
|
|
|
except Exception:
|
|
|
|
logging.error("There was an error with parsing the results")
|
2022-01-18 09:33:06 +00:00
|
|
|
finally:
|
2018-10-14 19:51:14 +00:00
|
|
|
for i in initiators:
|
2022-01-18 09:33:06 +00:00
|
|
|
try:
|
|
|
|
i.stop()
|
|
|
|
except Exception as err:
|
|
|
|
pass
|
|
|
|
target_obj.stop()
|