diff --git a/CHANGELOG.md b/CHANGELOG.md index 6991afaae..a182f4c73 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -79,6 +79,13 @@ is no need to use syntax like "python ./scripts/rpc.py". All executable scripts must contain proper shebang pointing to the right interpreter. Scripts without shebang musn't be executable. +A Python script has been added to enable conversion of old INI config file +to new JSON-RPC config file format. This script can be found at +scripts/config_converter.py. Example how this script can be used: +~~~{.sh} +cat old_format.ini | scripts/config_converter.py > new_json_format.json +~~~ + ## v18.07: ### bdev diff --git a/autotest.sh b/autotest.sh index d9acb897d..7d878058f 100755 --- a/autotest.sh +++ b/autotest.sh @@ -102,6 +102,10 @@ if [ $SPDK_TEST_BLOCKDEV -eq 1 ]; then fi fi +if [ $SPDK_TEST_JSON -eq 1 ]; then + run_test suite test/config_converter/test_converter.sh +fi + if [ $SPDK_TEST_EVENT -eq 1 ]; then run_test suite test/event/event.sh fi diff --git a/scripts/config_converter.py b/scripts/config_converter.py new file mode 100755 index 000000000..d78541a78 --- /dev/null +++ b/scripts/config_converter.py @@ -0,0 +1,701 @@ +#!/usr/bin/python + +import configparser +import re +import sys +import json +from collections import OrderedDict + +bdev_dict = OrderedDict() +bdev_dict["set_bdev_options"] = [] +bdev_dict["construct_split_vbdev"] = [] +bdev_dict["set_bdev_nvme_options"] = [] +bdev_dict["construct_nvme_bdev"] = [] +bdev_dict["set_bdev_nvme_hotplug"] = [] +bdev_dict["construct_malloc_bdev"] = [] +bdev_dict["construct_aio_bdev"] = [] +bdev_dict["construct_pmem_bdev"] = [] +bdev_dict["construct_virtio_dev"] = [] + +vhost_dict = OrderedDict() +vhost_dict["construct_vhost_scsi_controller"] = [] +vhost_dict["construct_vhost_blk_controller"] = [] +vhost_dict["construct_vhost_nvme_controller"] = [] + +iscsi_dict = OrderedDict() +iscsi_dict["set_iscsi_options"] = [] +iscsi_dict["add_portal_group"] = [] +iscsi_dict["add_initiator_group"] = [] +iscsi_dict["construct_target_node"] = [] + +nvmf_dict = OrderedDict() +nvmf_dict["set_nvmf_target_config"] = [] +nvmf_dict["set_nvmf_target_options"] = [] +nvmf_dict["construct_nvmf_subsystem"] = [] + + +# dictionary with new config that will be written to new json config file +subsystem = { + "copy": None, + "interface": None, + "net_framework": None, + "bdev": bdev_dict, + "scsi": [], + "nvmf": nvmf_dict, + "nbd": [], + "vhost": vhost_dict, + "iscsi": iscsi_dict +} + + +class OptionOrderedDict(OrderedDict): + def __setitem__(self, option, value): + if option in self and isinstance(value, list): + self[option].extend(value) + return + super(OptionOrderedDict, self).__setitem__(option, value) + + +no_yes_map = {"no": False, "No": False, "Yes": True, "yes": True} + + +def generate_new_json_config(): + json_subsystem = [ + {'subsystem': "copy", 'config': None}, + {"subsystem": "interface", "config": None}, + {"subsystem": "net_framework", "config": None}, + {"subsystem": "bdev", "config": []}, + {"subsystem": "scsi", "config": None}, + {"subsystem": "nvmf", "config": []}, + {"subsystem": "nbd", "config": []}, + {"subsystem": "vhost", "config": []}, + {"subsystem": "iscsi", "config": []} + ] + for method in subsystem['bdev']: + for item in subsystem['bdev'][method]: + json_subsystem[3]['config'].append(item) + for item in subsystem['scsi']: + if json_subsystem[4]['config'] is None: + json_subsystem[4]['config'] = [] + json_subsystem[4]['config'].append(item) + for method in subsystem['nvmf']: + for item in subsystem['nvmf'][method]: + json_subsystem[5]['config'].append(item) + for method in subsystem['vhost']: + for item in subsystem['vhost'][method]: + json_subsystem[7]['config'].append(item) + for method in subsystem['iscsi']: + for item in subsystem['iscsi'][method]: + json_subsystem[8]['config'].append(item) + + return {"subsystems": json_subsystem} + + +section_to_subsystem = { + "Bdev": subsystem['bdev'], + "AIO": subsystem['bdev'], + "Malloc": subsystem['bdev'], + "Nvme": subsystem['bdev'], + "Pmem": subsystem['bdev'], + "Split": subsystem['bdev'], + "Nvmf": subsystem['nvmf'], + "Subsystem": subsystem['nvmf'], + "VhostScsi": subsystem['vhost'], + "VhostBlk": subsystem['vhost'], + "VhostNvme": subsystem['vhost'], + "VirtioUser": subsystem['bdev'], + "iSCSI": subsystem['iscsi'], + "PortalGroup": subsystem['iscsi'], + "InitiatorGroup": subsystem['iscsi'], + "TargetNode": subsystem['iscsi'] +} + + +def set_param(params, cfg_name, value): + for param in params: + if param[0] != cfg_name: + continue + if param[1] == "disable_chap": + param[3] = True if value == "None" else False + elif param[1] == "require_chap": + param[3] = True if value in ["CHAP", "Mutual"] else False + elif param[1] == "mutual_chap": + param[3] = True if value == "Mutual" else False + elif param[1] == "chap_group": + param[3] = int(value.replace("AuthGroup", "")) + elif param[2] == bool: + param[3] = True if value in ("yes", "true", "Yes") else False + elif param[2] == "hex": + param[3] = str(int(value, 16)) + elif param[2] == int: + param[3] = int(value) + elif param[2] == list: + param[3].append(value) + elif param[2] == "dev_type": + if value.lower() == "blk": + param[3] = "blk" + else: + param[3] = param[2](value.replace("\"", "")) + + +def to_json_params(params): + out = {} + for param in params: + if param[3] is not None: + out[param[1]] = param[3] + return out + + +def get_bdev_options_json(config, section): + params = [ + ["BdevIoPoolSize", "bdev_io_pool_size", int, 65536], + ["BdevIoCacheSize", "bdev_io_cache_size", int, 256] + ] + for option in config.options("Bdev"): + set_param(params, option, config.get("Bdev", option)) + + return [{"params": to_json_params(params), "method": "set_bdev_options"}] + + +def get_aio_bdev_json(config, section): + aio_json = [] + value = None + for option in config.options("AIO"): + if option == "AIO": + value = config.get("AIO", option).split("\n") + for item in value: + items = re.findall("\S+", item) + params = {} + params['filename'] = items[0] + params['name'] = items[1] + if len(items) == 3: + params['block_size'] = int(items[2]) + aio_json.append({ + "params": params, + "method": "construct_aio_bdev" + }) + + return aio_json + + +def get_malloc_bdev_json(config, section): + malloc_json = [] + params = [ + ['NumberOfLuns', '', int, -1], + ['LunSizeInMB', '', int, 20], + ['BlockSize', '', int, 512] + ] + for option in config.options("Malloc"): + set_param(params, option, config.get("Malloc", option)) + for lun in range(0, params[0][3]): + malloc_json.append({ + "params": { + "block_size": params[2][3], + "num_blocks": params[1][3] * 1024 * 1024 / params[2][3], + "name": "Malloc%s" % lun + }, + "method": "construct_malloc_bdev" + }) + + return malloc_json + + +def get_nvme_bdev_json(config, section): + params = [ + ["RetryCount", "retry_count", int, 4], + ["TimeoutuSec", "timeout_us", int, 0], + ["AdminPollRate", "nvme_adminq_poll_period_us", int, 1000000], + ["ActionOnTimeout", "action_on_timeout", str, "none"], + ["HotplugEnable", "enable", bool, False], + ["AdminPollRate", "period_us", int, 1000] + ] + nvme_json = [] + for option in config.options("Nvme"): + value = config.get("Nvme", option) + if "TransportID" == option: + entry = re.findall("\S+", value) + nvme_name = entry[-1] + trtype = re.findall("trtype:\S+", value) + if trtype: + trtype = trtype[0].replace("trtype:", "").replace("\"", "") + traddr = re.findall("traddr:\S+", value) + if traddr: + traddr = traddr[0].replace("traddr:", "").replace("\"", "") + nvme_json.append({ + "params": { + "trtype": trtype, + "name": nvme_name, + "traddr": traddr + }, + "method": "construct_nvme_bdev" + }) + else: + set_param(params, option, value) + params[3][3] = params[3][3].lower() + params[5][3] = params[5][3] * 100 + nvme_json.append({ + "params": to_json_params(params[4:6]), + "method": "set_bdev_nvme_hotplug" + }) + nvme_json.append({ + "params": to_json_params(params[0:4]), + "method": "set_bdev_nvme_options" + }) + return nvme_json + + +def get_pmem_bdev_json(config, section): + pmem_json = [] + for option in config.options(section): + if "Blk" == option: + for value in config.get(section, option).split("\n"): + items = re.findall("\S+", value) + pmem_json.append({ + "params": { + "name": items[1], + "pmem_file": items[0] + }, + "method": "construct_pmem_bdev" + }) + + return pmem_json + + +def get_split_bdev_json(config, section): + split_json = [] + value = [] + for option in config.options("Split"): + if "Split" == option: + value = config.get("Split", option) + if value and not isinstance(value, list): + value = [value] + for split in value: + items = re.findall("\S+", split) + split_size_mb = 0 + base_bdev = items[0] + split_count = int(items[1]) + if len(items) == 3: + split_size_mb = items[2] + split_json.append({ + "params": { + "base_bdev": base_bdev, + "split_size_mb": split_size_mb, + "split_count": split_count + }, + "method": "construct_split_vbdev" + }) + + return split_json + + +def get_nvmf_options_json(config, section): + params = [ + ["AcceptorPollRate", "acceptor_poll_rate", int, 10000], + ["MaxQueuesPerSession", "max_qpairs_per_ctrlr", int, 64], + ["MaxQueueDepth", "max_queue_depth", int, 128], + ["IncapsuleDataSize", "in_capsule_data_size", int, 4096], + ["MaxIOSize", "max_io_size", int, 131072], + ["IOUnitSize", "io_unit_size", int, 131072], + ["MaxSubsystems", "max_subsystems", int, 1024] + ] + for option in config.options("Nvmf"): + set_param(params, option, config.get("Nvmf", option)) + nvmf_json = [] + nvmf_json.append({ + "params": to_json_params([params[0]]), + "method": "set_nvmf_target_config" + }) + nvmf_json.append({ + "params": to_json_params(params[1:7]), + "method": "set_nvmf_target_options" + }) + + return nvmf_json + + +def get_nvmf_subsystem_json(config, section): + params = [ + ["NQN", "nqn", str, ""], + ["Host", "hosts", list, []], + ["AllowAnyHost", "allow_any_host", bool, True], + ["SN", "serial_number", str, ""], + ["MaxNamespaces", "max_namespaces", str, ""], + ] + listen_address = [] + namespaces = [] + nsid = 0 + searched_items = [param[0] for param in params] + for option in config.options(section): + value = config.get(section, option) + if option in searched_items: + set_param(params, option, value) + continue + if "Listen" == option: + items = re.findall("\S+", value) + adrfam = "IPv4" + if len(items[1].split(":")) > 2: + adrfam = "IPv6" + listen_address.append({ + "trtype": items[0], + "adrfam": adrfam, + "trsvcid": items[1].rsplit(":", 1)[-1], + "traddr": items[1].rsplit(":", 1)[0].replace( + "]", "").replace("[", "") + }) + if "Namespace" == option: + for item in value.split("\n"): + items = re.findall("\S+", item) + if len(items) == 2: + nsid = items[1] + else: + nsid += 1 + namespaces.append({ + "nsid": int(nsid), + "bdev_name": items[0], + }) + parameters = to_json_params(params[0:4]) + parameters['listen_addresses'] = listen_address + parameters['namespaces'] = namespaces + nvmf_subsystem = { + "params": parameters, + "method": "construct_nvmf_subsystem" + } + + if params[4][3]: + nvmf_subsystem['params']['max_namespaces'] = int(params[4][3]) + + return [nvmf_subsystem] + + +def get_vhost_scsi_json(config, section): + params = [ + ["Name", "ctrlr", str, None], + ["Cpumask", "cpumask", "hex", "1"], + ] + targets = [] + vhost_scsi_json = [] + for option in config.options(section): + value = config.get(section, option) + if option in ["Name", "Cpumask"]: + set_param(params, option, value) + if "Target" == option: + for item in value.split("\n"): + items = re.findall("\S+", item) + targets.append({ + "scsi_target_num": int(items[0]), + "ctrlr": params[0][3], + "bdev_name": items[1] + }) + vhost_scsi_json.append({ + "params": to_json_params(params), + "method": "construct_vhost_scsi_controller" + }) + for target in targets: + vhost_scsi_json.append({ + "params": target, + "method": "add_vhost_scsi_lun" + }) + + return vhost_scsi_json + + +def get_vhost_blk_json(config, section): + params = [ + ["ReadOnly", "readonly", bool, False], + ["Dev", "dev_name", str, ""], + ["Name", "ctrlr", str, ""], + ["Cpumask", "cpumask", "hex", ""] + ] + for option in config.options(section): + set_param(params, option, config.get(section, option)) + return [{"method": "construct_vhost_blk_controller", + "params": to_json_params(params)}] + + +def get_vhost_nvme_json(config, section): + params = [ + ["Name", "ctrlr", str, ""], + ["NumberOfQueues", "io_queues", int, -1], + ["Cpumask", "cpumask", "hex", 0x1], + ["Namespace", "bdev_name", list, []] + ] + for option in config.options(section): + values = config.get(section, option).split("\n") + for value in values: + set_param(params, option, value) + vhost_nvme_json = [] + vhost_nvme_json.append({ + "params": to_json_params(params[:3]), + "method": "construct_vhost_nvme_controller" + }) + for namespace in params[3][3]: + vhost_nvme_json.append({ + "params": { + "ctrlr": params[0][3], + "bdev_name": namespace, + }, + "method": "add_vhost_nvme_ns" + }) + + return vhost_nvme_json + + +def get_virtio_user_json(config, section): + params = [ + ["Path", "traddr", str, ""], + ["Queues", "vq_count", int, 1], + ["Type", "dev_type", "dev_type", "scsi"], + ["Name", "name", str, section], + # Define parameters with default values. + # These params are set by rpc commands and + # do not occur in ini config file. + # But they are visible in json config file + # with default values even if not set by rpc. + [None, "trtype", str, "user"], + [None, "vq_size", int, 512] + ] + for option in config.options(section): + set_param(params, option, config.get(section, option)) + dev_name = "Scsi" + if params[2][3] == "blk": + dev_name = "Blk" + params[3][3] = params[3][3].replace("User", dev_name) + + return [{ + "params": to_json_params(params), + "method": "construct_virtio_dev" + }] + + +def get_iscsi_options_json(config, section): + params = [ + ['AllowDuplicateIsid', 'allow_duplicated_isid', bool, False], + ['DefaultTime2Retain', 'default_time2retain', int, 20], + ['DiscoveryAuthMethod', 'mutual_chap', bool, False], + ['MaxConnectionsPerSession', 'max_connections_per_session', int, 2], + ['Timeout', 'nop_timeout', int, 60], + ['DiscoveryAuthMethod', 'disable_chap', bool, False], + ['DiscoveryAuthMethod', 'require_chap', bool, False], + ['NodeBase', 'node_base', str, "iqn.2016-06.io.spdk"], + ['AuthFile', 'auth_file', str, None], + ['DiscoveryAuthGroup', 'chap_group', int, 0], + ['MaxSessions', 'max_sessions', int, 128], + ['ImmediateData', 'immediate_data', bool, True], + ['ErrorRecoveryLevel', 'error_recovery_level', int, 0], + ['NopInInterval', 'nop_in_interval', int, 30], + ['MinConnectionsPerCore', 'min_connections_per_core', int, 4], + ['DefaultTime2Wait', 'default_time2wait', int, 2], + ['QueueDepth', 'max_queue_depth', int, 64], + ['', 'first_burst_length', int, 8192] + ] + for option in config.options(section): + set_param(params, option, config.get(section, option)) + return [{"method": "set_iscsi_options", "params": to_json_params(params)}] + + +def get_iscsi_portal_group_json(config, name): + portal_group_json = [] + portals = [] + for option in config.options(name): + if "Portal" == option: + for value in config.get(name, option).split("\n"): + items = re.findall("\S+", value) + portal = {'host': items[1].rsplit(":", 1)[0]} + if "@" in items[1]: + portal['port'] =\ + items[1].rsplit(":", 1)[1].split("@")[0] + portal['cpumask'] =\ + items[1].rsplit(":", 1)[1].split("@")[1] + else: + portal['port'] = items[1].rsplit(":", 1)[1] + portals.append(portal) + + portal_group_json.append({ + "params": { + "portals": portals, + "tag": int(re.findall('\d+', name)[0]) + }, + "method": "add_portal_group" + }) + + return portal_group_json + + +def get_iscsi_initiator_group_json(config, name): + initiators = [] + netmasks = [] + + for option in config.options(name): + if "InitiatorName" == option: + initiators.append(config.get(name, option)) + if "Netmask" == option: + netmasks.append(config.get(name, option)) + initiator_group_json = { + "params": { + "initiators": initiators, + "tag": int(re.findall('\d+', name)[0]), + "netmasks": netmasks + }, + "method": "add_initiator_group" + } + + return [initiator_group_json] + + +def get_iscsi_target_node_json(config, section): + luns = [] + mutual_chap = False + name = "" + alias_name = "" + require_chap = False + chap_group = 1 + pg_ig_maps = [] + data_digest = False + disable_chap = False + header_digest = False + queue_depth = 64 + + for option in config.options(section): + value = config.get(section, option) + if "TargetName" == option: + name = value + if "TargetAlias" == option: + alias_name = value.replace("\"", "") + if "Mapping" == option: + items = re.findall("\S+", value) + pg_ig_maps.append({ + "ig_tag": int(re.findall('\d+', items[1])[0]), + "pg_tag": int(re.findall('\d+', items[0])[0]) + }) + if "AuthMethod" == option: + items = re.findall("\S+", value) + for item in items: + if "CHAP" == item: + require_chap = True + elif "Mutual" == item: + mutual_chap = True + elif "Auto" == item: + disable_chap = False + require_chap = False + mutual_chap = False + elif "None" == item: + disable_chap = True + require_chap = False + mutual_chap = False + if "AuthGroup" == option: # AuthGroup1 + items = re.findall("\S+", value) + chap_group = int(re.findall('\d+', items[0])[0]) + if "UseDigest" == option: + items = re.findall("\S+", value) + for item in items: + if "Header" == item: + header_digest = True + elif "Data" == item: + data_digest = True + elif "Auto" == item: + header_digest = False + data_digest = False + + if re.match("LUN\d+", option): + luns.append({"lun_id": len(luns), + "bdev_name": value}) + if "QueueDepth" == option: + queue_depth = int(value) + + params = {"alias_name": alias_name} + params["name"] = "iqn.2016-06.io.spdk:%s" % name + params["luns"] = luns + params["pg_ig_maps"] = pg_ig_maps + params["queue_depth"] = queue_depth + params["chap_group"] = chap_group + params["header_digest"] = header_digest + params["mutual_chap"] = mutual_chap + params["require_chap"] = require_chap + params["data_digest"] = data_digest + params["disable_chap"] = disable_chap + + target_json = { + "params": params, + "method": "construct_target_node" + } + + return [target_json] + + +if __name__ == "__main__": + try: + config = configparser.ConfigParser(strict=False, delimiters=(' '), + dict_type=OptionOrderedDict, + allow_no_value=True) + # Do not parse options and values. Capital letters are relevant. + config.optionxform = str + config.read_file(sys.stdin) + except Exception as e: + print("Exception while parsing config: %s" % e) + exit(1) + # Add missing sections to generate default configuration + for section in ['Nvme', 'Nvmf', 'Bdev', 'iSCSI']: + if section not in config.sections(): + config.add_section(section) + + for section in config.sections(): + match = re.match("(Bdev|Nvme|Malloc|VirtioUser\d+|Split|Pmem|AIO|" + "iSCSI|PortalGroup\d+|InitiatorGroup\d+|" + "TargetNode\d+|Nvmf|Subsystem\d+|VhostScsi\d+|" + "VhostBlk\d+|VhostNvme\d+)", section) + if match: + match_section = ''.join(letter for letter in match.group(0) + if not letter.isdigit()) + if match_section == "Bdev": + items = get_bdev_options_json(config, section) + elif match_section == "AIO": + items = get_aio_bdev_json(config, section) + elif match_section == "Malloc": + items = get_malloc_bdev_json(config, section) + elif match_section == "Nvme": + items = get_nvme_bdev_json(config, section) + elif match_section == "Pmem": + items = get_pmem_bdev_json(config, section) + elif match_section == "Split": + items = get_split_bdev_json(config, section) + elif match_section == "Nvmf": + items = get_nvmf_options_json(config, section) + elif match_section == "Subsystem": + items = get_nvmf_subsystem_json(config, section) + elif match_section == "VhostScsi": + items = get_vhost_scsi_json(config, section) + elif match_section == "VhostBlk": + items = get_vhost_blk_json(config, section) + elif match_section == "VhostNvme": + items = get_vhost_nvme_json(config, section) + elif match_section == "VirtioUser": + items = get_virtio_user_json(config, section) + elif match_section == "iSCSI": + items = get_iscsi_options_json(config, section) + elif match_section == "PortalGroup": + items = get_iscsi_portal_group_json(config, section) + elif match_section == "InitiatorGroup": + items = get_iscsi_initiator_group_json(config, section) + elif match_section == "TargetNode": + items = get_iscsi_target_node_json(config, section) + for item in items: + if match_section == "VhostScsi": + section_to_subsystem[match_section][ + "construct_vhost_scsi_controller"].append(item) + elif match_section == "VhostNvme": + section_to_subsystem[match_section][ + "construct_vhost_nvme_controller"].append(item) + else: + section_to_subsystem[match_section][ + item['method']].append(item) + elif section == "Global": + pass + elif section == "VirtioPci": + print("Please use spdk target flags.") + exit(1) + else: + print("An invalid section detected: %s.\n" + "Please revise your config file." % section) + exit(1) + json.dump(generate_new_json_config(), sys.stdout, indent=2) + print("") diff --git a/scripts/pkgdep.sh b/scripts/pkgdep.sh index 1cd375a1b..da7181e95 100755 --- a/scripts/pkgdep.sh +++ b/scripts/pkgdep.sh @@ -37,7 +37,6 @@ if [ -s /etc/redhat-release ]; then yum install -y doxygen mscgen graphviz # Additional dependencies for building pmem based backends yum install -y libpmemblk-devel || true - # Additional dependencies for SPDK CLI - not available in rhel and centos if ! echo "$ID $VERSION_ID" | egrep -q 'rhel 7|centos 7'; then yum install -y python3-configshell python3-pexpect diff --git a/test/common/autotest_common.sh b/test/common/autotest_common.sh index 3bd5c677e..badfb8860 100755 --- a/test/common/autotest_common.sh +++ b/test/common/autotest_common.sh @@ -55,6 +55,7 @@ fi : ${SPDK_TEST_VHOST_INIT=1}; export SPDK_TEST_VHOST_INIT : ${SPDK_TEST_PMDK=1}; export SPDK_TEST_PMDK : ${SPDK_TEST_LVOL=1}; export SPDK_TEST_LVOL +: ${SPDK_TEST_JSON=1}; export SPDK_TEST_JSON : ${SPDK_RUN_ASAN=1}; export SPDK_RUN_ASAN : ${SPDK_RUN_UBSAN=1}; export SPDK_RUN_UBSAN : ${SPDK_RUN_INSTALLED_DPDK=1}; export SPDK_RUN_INSTALLED_DPDK diff --git a/test/config_converter/config.ini b/test/config_converter/config.ini new file mode 100644 index 000000000..2d71f9824 --- /dev/null +++ b/test/config_converter/config.ini @@ -0,0 +1,151 @@ +#comment1 +[Global] + Comment "Global section"#comment2 + ReactorMask 0xF #comment3 +#comment4 + #comment5 +[Nvmf] + MaxQueuesPerSession 4 + MaxQueueDepth 128 + InCapsuleDataSize 4096 + MaxIOSize 131072 + AcceptorPollRate 10000 + IOUnitSize 131072 + +[Nvme] + TransportID "trtype:PCIe traddr:0000:00:04.0" Nvme0 + +[Bdev] + BdevIoPoolSize 65536 + BdevIoCacheSize 256 + +[Split] + Split Nvme0n1 8 + +[Nvme] + RetryCount 4 + TimeoutUsec 0 + ActionOnTimeout None + AdminPollRate 100000 + HotplugEnable Yes + +[iSCSI] + NodeBase "iqn.2016-06.io.spdk" + AuthFile /usr/local/etc/spdk/auth.conf + Timeout 30 + DiscoveryAuthMethod Auto + DiscoveryAuthGroup AuthGroup1 + MaxSessions 16 + ImmediateData Yes + ErrorRecoveryLevel 0 + MaxR2T 256 + NopInInterval 10 + AllowDuplicateIsid Yes + MinConnectionsPerCore 4 + DefaultTime2Wait 2 + QueueDepth 128 + +[Malloc] + NumberOfLuns 8 + LunSizeInMB 128 + BlockSize 4096 + +[Pmem] + Blk /tmp/sample_pmem Pmem0 + +[AIO] + AIO /tmp/sample_aio0 AIO0 2048 + AIO /tmp/sample_aio1 AIO1 2048 + AIO /tmp/sample_aio2 AIO2 2048 + AIO /tmp/sample_aio1 AIO3 2048 + AIO /tmp/sample_aio2 AIO4 2048 + +[VhostBlk0] + Name vhost.1 + Dev Malloc6 + ReadOnly yes + Cpumask 0x1 + +[VhostScsi0] + Name naa.vhost.0 + Target 0 Malloc4 + Target 1 AIO3 + Target 2 Nvme0n1p2 + # Target 3 Nvme1n1p2 + Cpumask 0x1 + +[VhostScsi1] + Name naa.vhost.1 + Target 0 AIO4 + Cpumask 0x1 + +[VhostBlk1] + Name naa.vhost.2 + Dev Malloc5 + ReadOnly no + Cpumask 0x1 + +[VhostNvme0] + Name naa.vhost.3 + NumberOfQueues 2 + Namespace Nvme0n1p0 + Namespace Nvme0n1p1 + Cpumask 0x1 + +[Subsystem1] + NQN nqn.2016-06.io.spdk:cnode1 + Listen RDMA 10.0.2.15:4420 + AllowAnyHost No + Host nqn.2016-06.io.spdk:init + SN SPDK00000000000001 + MaxNamespaces 20 + Namespace Nvme0n1p5 1 + Namespace Nvme0n1p6 2 + +[Subsystem2] + NQN nqn.2016-06.io.spdk:cnode2 + Listen RDMA 10.0.2.15:4421 + AllowAnyHost No + Host nqn.2016-06.io.spdk:init + SN SPDK00000000000002 + Namespace Malloc1 + Namespace Malloc2 + Namespace AIO0 + Namespace AIO1 + +[InitiatorGroup1] + InitiatorName ANY + Netmask 127.0.0.1/32 + +[PortalGroup1] + Portal DA1 127.0.0.1:4000 + Portal DA2 127.0.0.1:4001@0xF + +[TargetNode1] + TargetName disk1 + TargetAlias "Data Disk1" + Mapping PortalGroup1 InitiatorGroup1 + AuthMethod Auto + AuthGroup AuthGroup1 + # Enable header and data digest + # UseDigest Header Data + UseDigest Auto + # Use the first malloc target + LUN0 Malloc0 + # Using the first AIO target + LUN1 AIO2 + # Using the second storage target + LUN2 AIO3 + # Using the third storage target + LUN3 AIO4 + QueueDepth 128 + +[TargetNode2] + TargetName disk2 + TargetAlias "Data Disk2" + Mapping PortalGroup1 InitiatorGroup1 + AuthMethod Auto + AuthGroup AuthGroup1 + UseDigest Auto + LUN0 Nvme0n1p3 + QueueDepth 32 diff --git a/test/config_converter/config_virtio.ini b/test/config_converter/config_virtio.ini new file mode 100644 index 000000000..b2b7f4c71 --- /dev/null +++ b/test/config_converter/config_virtio.ini @@ -0,0 +1,21 @@ +[VirtioUser0] + Path naa.vhost.0 + Queues 8 + +[VirtioUser1] + Path naa.vhost.1 + Queues 8 + +#[VirtioUser2] +# Path naa.vhost.3 +# Queues 8 + +#[VirtioUser3] +# Path naa.vhost.2 +# Type Blk +# Queues 8 + +[VirtioUser4] + Path vhost.1 + Type Blk +# Queues 8 diff --git a/test/config_converter/spdk_config.json b/test/config_converter/spdk_config.json new file mode 100644 index 000000000..4b4ba5728 --- /dev/null +++ b/test/config_converter/spdk_config.json @@ -0,0 +1,481 @@ +{ + "subsystems": [ + { + "subsystem": "copy", + "config": null + }, + { + "subsystem": "interface", + "config": null + }, + { + "subsystem": "net_framework", + "config": null + }, + { + "subsystem": "bdev", + "config": [ + { + "params": { + "bdev_io_pool_size": 65536, + "bdev_io_cache_size": 256 + }, + "method": "set_bdev_options" + }, + { + "params": { + "base_bdev": "Nvme0n1", + "split_size_mb": 0, + "split_count": 8 + }, + "method": "construct_split_vbdev" + }, + { + "params": { + "retry_count": 4, + "timeout_us": 0, + "nvme_adminq_poll_period_us": 100000, + "action_on_timeout": "none" + }, + "method": "set_bdev_nvme_options" + }, + { + "params": { + "trtype": "PCIe", + "name": "Nvme0", + "traddr": "0000:00:04.0" + }, + "method": "construct_nvme_bdev" + }, + { + "params": { + "enable": true, + "period_us": 10000000 + }, + "method": "set_bdev_nvme_hotplug" + }, + { + "params": { + "block_size": 4096, + "num_blocks": 32768, + "name": "Malloc0" + }, + "method": "construct_malloc_bdev" + }, + { + "params": { + "block_size": 4096, + "num_blocks": 32768, + "name": "Malloc1" + }, + "method": "construct_malloc_bdev" + }, + { + "params": { + "block_size": 4096, + "num_blocks": 32768, + "name": "Malloc2" + }, + "method": "construct_malloc_bdev" + }, + { + "params": { + "block_size": 4096, + "num_blocks": 32768, + "name": "Malloc3" + }, + "method": "construct_malloc_bdev" + }, + { + "params": { + "block_size": 4096, + "num_blocks": 32768, + "name": "Malloc4" + }, + "method": "construct_malloc_bdev" + }, + { + "params": { + "block_size": 4096, + "num_blocks": 32768, + "name": "Malloc5" + }, + "method": "construct_malloc_bdev" + }, + { + "params": { + "block_size": 4096, + "num_blocks": 32768, + "name": "Malloc6" + }, + "method": "construct_malloc_bdev" + }, + { + "params": { + "block_size": 4096, + "num_blocks": 32768, + "name": "Malloc7" + }, + "method": "construct_malloc_bdev" + }, + { + "params": { + "block_size": 2048, + "name": "AIO0", + "filename": "/tmp/sample_aio0" + }, + "method": "construct_aio_bdev" + }, + { + "params": { + "block_size": 2048, + "name": "AIO1", + "filename": "/tmp/sample_aio1" + }, + "method": "construct_aio_bdev" + }, + { + "params": { + "block_size": 2048, + "name": "AIO2", + "filename": "/tmp/sample_aio2" + }, + "method": "construct_aio_bdev" + }, + { + "params": { + "block_size": 2048, + "name": "AIO3", + "filename": "/tmp/sample_aio1" + }, + "method": "construct_aio_bdev" + }, + { + "params": { + "block_size": 2048, + "name": "AIO4", + "filename": "/tmp/sample_aio2" + }, + "method": "construct_aio_bdev" + }, + { + "params": { + "name": "Pmem0", + "pmem_file": "/tmp/sample_pmem" + }, + "method": "construct_pmem_bdev" + } + ] + }, + { + "subsystem": "scsi", + "config": null + }, + { + "subsystem": "nvmf", + "config": [ + { + "params": { + "acceptor_poll_rate": 10000 + }, + "method": "set_nvmf_target_config" + }, + { + "params": { + "in_capsule_data_size": 4096, + "io_unit_size": 131072, + "max_qpairs_per_ctrlr": 4, + "max_queue_depth": 128, + "max_io_size": 131072, + "max_subsystems": 1024 + }, + "method": "set_nvmf_target_options" + }, + { + "params": { + "max_namespaces": 20, + "listen_addresses": [ + { + "trtype": "RDMA", + "adrfam": "IPv4", + "trsvcid": "4420", + "traddr": "10.0.2.15" + } + ], + "hosts": [ + "nqn.2016-06.io.spdk:init" + ], + "namespaces": [ + { + "bdev_name": "Nvme0n1p5", + "nsid": 1 + }, + { + "bdev_name": "Nvme0n1p6", + "nsid": 2 + } + ], + "allow_any_host": false, + "serial_number": "SPDK00000000000001", + "nqn": "nqn.2016-06.io.spdk:cnode1" + }, + "method": "construct_nvmf_subsystem" + }, + { + "params": { + "listen_addresses": [ + { + "trtype": "RDMA", + "adrfam": "IPv4", + "trsvcid": "4421", + "traddr": "10.0.2.15" + } + ], + "hosts": [ + "nqn.2016-06.io.spdk:init" + ], + "namespaces": [ + { + "bdev_name": "Malloc1", + "nsid": 1 + }, + { + "bdev_name": "Malloc2", + "nsid": 2 + }, + { + "bdev_name": "AIO0", + "nsid": 3 + }, + { + "bdev_name": "AIO1", + "nsid": 4 + } + ], + "allow_any_host": false, + "serial_number": "SPDK00000000000002", + "nqn": "nqn.2016-06.io.spdk:cnode2" + }, + "method": "construct_nvmf_subsystem" + } + ] + }, + { + "subsystem": "nbd", + "config": [] + }, + { + "subsystem": "vhost", + "config": [ + { + "params": { + "cpumask": "1", + "ctrlr": "naa.vhost.0" + }, + "method": "construct_vhost_scsi_controller" + }, + { + "params": { + "scsi_target_num": 0, + "bdev_name": "Malloc4", + "ctrlr": "naa.vhost.0" + }, + "method": "add_vhost_scsi_lun" + }, + { + "params": { + "scsi_target_num": 1, + "bdev_name": "AIO3", + "ctrlr": "naa.vhost.0" + }, + "method": "add_vhost_scsi_lun" + }, + { + "params": { + "scsi_target_num": 2, + "bdev_name": "Nvme0n1p2", + "ctrlr": "naa.vhost.0" + }, + "method": "add_vhost_scsi_lun" + }, + { + "params": { + "cpumask": "1", + "ctrlr": "naa.vhost.1" + }, + "method": "construct_vhost_scsi_controller" + }, + { + "params": { + "scsi_target_num": 0, + "bdev_name": "AIO4", + "ctrlr": "naa.vhost.1" + }, + "method": "add_vhost_scsi_lun" + }, + { + "params": { + "dev_name": "Malloc6", + "readonly": true, + "ctrlr": "vhost.1", + "cpumask": "1" + }, + "method": "construct_vhost_blk_controller" + }, + { + "params": { + "dev_name": "Malloc5", + "readonly": false, + "ctrlr": "naa.vhost.2", + "cpumask": "1" + }, + "method": "construct_vhost_blk_controller" + }, + { + "params": { + "cpumask": "1", + "io_queues": 2, + "ctrlr": "naa.vhost.3" + }, + "method": "construct_vhost_nvme_controller" + }, + { + "params": { + "bdev_name": "Nvme0n1p0", + "ctrlr": "naa.vhost.3" + }, + "method": "add_vhost_nvme_ns" + }, + { + "params": { + "bdev_name": "Nvme0n1p1", + "ctrlr": "naa.vhost.3" + }, + "method": "add_vhost_nvme_ns" + } + ] + }, + { + "subsystem": "iscsi", + "config": [ + { + "params": { + "allow_duplicated_isid": true, + "default_time2retain": 20, + "mutual_chap": false, + "require_chap": false, + "immediate_data": true, + "node_base": "iqn.2016-06.io.spdk", + "nop_in_interval": 10, + "max_connections_per_session": 2, + "first_burst_length": 8192, + "max_queue_depth": 64, + "nop_timeout": 30, + "chap_group": 1, + "max_sessions": 16, + "error_recovery_level": 0, + "disable_chap": false, + "auth_file": "/usr/local/etc/spdk/auth.conf", + "min_connections_per_core": 4, + "default_time2wait": 2 + }, + "method": "set_iscsi_options" + }, + { + "params": { + "portals": [ + { + "cpumask": "0x1", + "host": "127.0.0.1", + "port": "4000" + }, + { + "cpumask": "0x1", + "host": "127.0.0.1", + "port": "4001" + } + ], + "tag": 1 + }, + "method": "add_portal_group" + }, + { + "params": { + "initiators": [ + "ANY" + ], + "tag": 1, + "netmasks": [ + "127.0.0.1/32" + ] + }, + "method": "add_initiator_group" + }, + { + "params": { + "luns": [ + { + "lun_id": 0, + "bdev_name": "Malloc0" + }, + { + "lun_id": 1, + "bdev_name": "AIO2" + }, + { + "lun_id": 2, + "bdev_name": "AIO3" + }, + { + "lun_id": 3, + "bdev_name": "AIO4" + } + ], + "mutual_chap": false, + "name": "iqn.2016-06.io.spdk:disk1", + "alias_name": "Data Disk1", + "require_chap": false, + "chap_group": 1, + "pg_ig_maps": [ + { + "ig_tag": 1, + "pg_tag": 1 + } + ], + "data_digest": false, + "disable_chap": false, + "header_digest": false, + "queue_depth": 64 + }, + "method": "construct_target_node" + }, + { + "params": { + "luns": [ + { + "lun_id": 0, + "bdev_name": "Nvme0n1p3" + } + ], + "mutual_chap": false, + "name": "iqn.2016-06.io.spdk:disk2", + "alias_name": "Data Disk2", + "require_chap": false, + "chap_group": 1, + "pg_ig_maps": [ + { + "ig_tag": 1, + "pg_tag": 1 + } + ], + "data_digest": false, + "disable_chap": false, + "header_digest": false, + "queue_depth": 32 + }, + "method": "construct_target_node" + } + ] + } + ] +} diff --git a/test/config_converter/spdk_config_virtio.json b/test/config_converter/spdk_config_virtio.json new file mode 100644 index 000000000..00391f0bb --- /dev/null +++ b/test/config_converter/spdk_config_virtio.json @@ -0,0 +1,138 @@ +{ + "subsystems": [ + { + "subsystem": "copy", + "config": null + }, + { + "subsystem": "interface", + "config": null + }, + { + "subsystem": "net_framework", + "config": null + }, + { + "subsystem": "bdev", + "config": [ + { + "params": { + "bdev_io_pool_size": 65536, + "bdev_io_cache_size": 256 + }, + "method": "set_bdev_options" + }, + { + "params": { + "retry_count": 4, + "timeout_us": 0, + "nvme_adminq_poll_period_us": 1000000, + "action_on_timeout": "none" + }, + "method": "set_bdev_nvme_options" + }, + { + "params": { + "enable": false, + "period_us": 100000 + }, + "method": "set_bdev_nvme_hotplug" + }, + { + "params": { + "name": "VirtioScsi0", + "dev_type": "scsi", + "vq_size": 512, + "trtype": "user", + "traddr": "naa.vhost.0", + "vq_count": 8 + }, + "method": "construct_virtio_dev" + }, + { + "params": { + "name": "VirtioScsi1", + "dev_type": "scsi", + "vq_size": 512, + "trtype": "user", + "traddr": "naa.vhost.1", + "vq_count": 8 + }, + "method": "construct_virtio_dev" + }, + { + "params": { + "name": "VirtioBlk4", + "dev_type": "blk", + "vq_size": 512, + "trtype": "user", + "traddr": "vhost.1", + "vq_count": 1 + }, + "method": "construct_virtio_dev" + } + ] + }, + { + "subsystem": "scsi", + "config": null + }, + { + "subsystem": "nvmf", + "config": [ + { + "params": { + "acceptor_poll_rate": 10000 + }, + "method": "set_nvmf_target_config" + }, + { + "params": { + "in_capsule_data_size": 4096, + "io_unit_size": 131072, + "max_qpairs_per_ctrlr": 64, + "max_queue_depth": 128, + "max_io_size": 131072, + "max_subsystems": 1024 + }, + "method": "set_nvmf_target_options" + } + ] + }, + { + "subsystem": "nbd", + "config": [] + }, + { + "subsystem": "vhost", + "config": [] + }, + { + "subsystem": "iscsi", + "config": [ + { + "params": { + "allow_duplicated_isid": false, + "default_time2retain": 20, + "mutual_chap": false, + "require_chap": false, + "immediate_data": true, + "node_base": "iqn.2016-06.io.spdk", + "nop_in_interval": 30, + "max_connections_per_session": 2, + "first_burst_length": 8192, + "max_queue_depth": 64, + "nop_timeout": 60, + "chap_group": 0, + "max_sessions": 128, + "error_recovery_level": 0, + "disable_chap": false, + "min_connections_per_core": 4, + "default_time2wait": 2 + }, + "method": "set_iscsi_options" + } + ] + } + ] +} diff --git a/test/config_converter/test_converter.sh b/test/config_converter/test_converter.sh new file mode 100755 index 000000000..5594df2d5 --- /dev/null +++ b/test/config_converter/test_converter.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash +CONVERTER_DIR=$(readlink -f $(dirname $0)) +SPDK_BUILD_DIR=$CONVERTER_DIR/../../ +source $CONVERTER_DIR/../common/autotest_common.sh + +function test_cleanup() { + rm -f $CONVERTER_DIR/config_converter.json $CONVERTER_DIR/config_virtio_converter.json +} + +function on_error_exit() { + set +e + test_cleanup + print_backtrace + exit 1 +} + +trap 'on_error_exit' ERR + +cat $CONVERTER_DIR/config.ini | python3 $SPDK_BUILD_DIR/scripts/config_converter.py > $CONVERTER_DIR/config_converter.json +cat $CONVERTER_DIR/config_virtio.ini | python3 $SPDK_BUILD_DIR/scripts/config_converter.py > $CONVERTER_DIR/config_virtio_converter.json +diff -I "cpumask" -I "max_queue_depth" -I "queue_depth" <(jq -S . $CONVERTER_DIR/config_converter.json) <(jq -S . $CONVERTER_DIR/spdk_config.json) +diff <(jq -S . $CONVERTER_DIR/config_virtio_converter.json) <(jq -S . $CONVERTER_DIR/spdk_config_virtio.json) +test_cleanup