From 1e92d78a1023ec222076774a99ce2a4807cabf32 Mon Sep 17 00:00:00 2001 From: Ben Walker Date: Wed, 3 Aug 2016 14:37:16 -0700 Subject: [PATCH] iscsi: Add an iscsi target application Similar to our NVMf target, this is an iSCSI target that can interoperate with the Linux and Windows standard iSCSI initiators. Change-Id: I6961c5ef99f7b161c396330ed5b543ea29b0ca7b Signed-off-by: Ben Walker --- app/Makefile | 3 + app/iscsi_tgt/.gitignore | 1 + app/iscsi_tgt/Makefile | 79 +++++ app/iscsi_tgt/iscsi_tgt.c | 243 ++++++++++++++ autotest.sh | 15 + etc/spdk/iscsi.conf.in | 174 ++++++++++ lib/iscsi/conn.c | 4 +- lib/iscsi/conn.h | 1 - lib/iscsi/iscsi.h | 2 +- scripts/autotest_common.sh | 46 +++ scripts/fio.py | 120 +++++++ scripts/rpc.py | 316 ++++++++++++++++++ test/iscsi_tgt/filesystem/filesystem.sh | 99 ++++++ test/iscsi_tgt/filesystem/iscsi.conf | 15 + test/iscsi_tgt/fio/fio.sh | 103 ++++++ test/iscsi_tgt/fio/iscsi.conf | 18 + test/iscsi_tgt/fio/running_config.sh | 25 ++ test/iscsi_tgt/reset/iscsi.conf | 14 + test/iscsi_tgt/reset/reset.sh | 85 +++++ test/iscsi_tgt/rpc_config/iscsi.conf | 14 + test/iscsi_tgt/rpc_config/rpc_config.py | 416 ++++++++++++++++++++++++ test/iscsi_tgt/rpc_config/rpc_config.sh | 47 +++ 22 files changed, 1836 insertions(+), 4 deletions(-) create mode 100644 app/iscsi_tgt/.gitignore create mode 100644 app/iscsi_tgt/Makefile create mode 100644 app/iscsi_tgt/iscsi_tgt.c create mode 100644 etc/spdk/iscsi.conf.in create mode 100755 scripts/fio.py create mode 100755 scripts/rpc.py create mode 100755 test/iscsi_tgt/filesystem/filesystem.sh create mode 100644 test/iscsi_tgt/filesystem/iscsi.conf create mode 100755 test/iscsi_tgt/fio/fio.sh create mode 100644 test/iscsi_tgt/fio/iscsi.conf create mode 100755 test/iscsi_tgt/fio/running_config.sh create mode 100644 test/iscsi_tgt/reset/iscsi.conf create mode 100755 test/iscsi_tgt/reset/reset.sh create mode 100644 test/iscsi_tgt/rpc_config/iscsi.conf create mode 100755 test/iscsi_tgt/rpc_config/rpc_config.py create mode 100755 test/iscsi_tgt/rpc_config/rpc_config.sh diff --git a/app/Makefile b/app/Makefile index 57e052b52..94e067e36 100644 --- a/app/Makefile +++ b/app/Makefile @@ -36,6 +36,9 @@ include $(SPDK_ROOT_DIR)/mk/spdk.common.mk DIRS-y += trace DIRS-$(CONFIG_RDMA) += nvmf_tgt +ifeq ($(OS),Linux) +DIRS-y += iscsi_tgt +endif .PHONY: all clean $(DIRS-y) diff --git a/app/iscsi_tgt/.gitignore b/app/iscsi_tgt/.gitignore new file mode 100644 index 000000000..14d948c59 --- /dev/null +++ b/app/iscsi_tgt/.gitignore @@ -0,0 +1 @@ +iscsi_tgt diff --git a/app/iscsi_tgt/Makefile b/app/iscsi_tgt/Makefile new file mode 100644 index 000000000..876819265 --- /dev/null +++ b/app/iscsi_tgt/Makefile @@ -0,0 +1,79 @@ +# +# BSD LICENSE +# +# Copyright (c) Intel Corporation. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +SPDK_ROOT_DIR := $(abspath $(CURDIR)/../..) +include $(SPDK_ROOT_DIR)/mk/spdk.common.mk +include $(SPDK_ROOT_DIR)/mk/spdk.modules.mk + +APP = iscsi_tgt + +CFLAGS += $(DPDK_INC) + +# Add iSCSI library directory to include path +# TODO: remove this once iSCSI has a public API header +CFLAGS += -I$(SPDK_ROOT_DIR)/lib + +C_SRCS := iscsi_tgt.c + +SPDK_LIBS = \ + $(SPDK_ROOT_DIR)/lib/json/libspdk_json.a \ + $(SPDK_ROOT_DIR)/lib/jsonrpc/libspdk_jsonrpc.a \ + $(SPDK_ROOT_DIR)/lib/rpc/libspdk_rpc.a \ + $(SPDK_ROOT_DIR)/lib/bdev/libspdk_bdev.a \ + $(SPDK_ROOT_DIR)/lib/iscsi/libspdk_iscsi.a \ + $(SPDK_ROOT_DIR)/lib/scsi/libspdk_scsi.a \ + $(SPDK_ROOT_DIR)/lib/net/libspdk_net.a \ + $(SPDK_ROOT_DIR)/lib/copy/libspdk_copy.a \ + $(SPDK_ROOT_DIR)/lib/trace/libspdk_trace.a \ + $(SPDK_ROOT_DIR)/lib/conf/libspdk_conf.a \ + $(SPDK_ROOT_DIR)/lib/util/libspdk_util.a \ + $(SPDK_ROOT_DIR)/lib/memory/libspdk_memory.a \ + $(SPDK_ROOT_DIR)/lib/log/libspdk_log.a \ + $(SPDK_ROOT_DIR)/lib/log/rpc/libspdk_log_rpc.a \ + $(SPDK_ROOT_DIR)/lib/event/libspdk_event.a \ + $(SPDK_ROOT_DIR)/lib/event/rpc/libspdk_app_rpc.a \ + +LIBS += -Wl,--whole-archive $(SPDK_LIBS) -Wl,--no-whole-archive +LIBS += -lcrypto $(PCIACCESS_LIB) $(DPDK_LIB) +LIBS += $(BLOCKDEV_MODULES_LINKER_ARGS) \ + $(COPY_MODULES_LINKER_ARGS) + +all : $(APP) + +$(APP) : $(OBJS) $(SPDK_LIBS) + $(LINK_C) + +clean : + $(CLEAN_C) $(APP) + +include $(SPDK_ROOT_DIR)/mk/spdk.deps.mk diff --git a/app/iscsi_tgt/iscsi_tgt.c b/app/iscsi_tgt/iscsi_tgt.c new file mode 100644 index 000000000..cfcbdf9f0 --- /dev/null +++ b/app/iscsi_tgt/iscsi_tgt.c @@ -0,0 +1,243 @@ +/*- + * BSD LICENSE + * + * Copyright (c) Intel Corporation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include + +#include "spdk/event.h" +#include "spdk/net.h" +#include "iscsi/iscsi.h" +#include "spdk/scsi.h" +#include "spdk/log.h" +#include "spdk/bdev.h" +#include "spdk/copy_engine.h" + +uint64_t g_flush_timeout; + +static void +spdk_iscsi_dump_memory_info(void) +{ + struct rte_malloc_socket_stats stats; + int i; + + for (i = 0; i < RTE_MAX_NUMA_NODES; i++) { + rte_malloc_get_socket_stats(i, &stats); + if (stats.heap_totalsz_bytes > 0) + fprintf(stderr, "Socket %d: Total memory %"PRIu64" MB," + " Free memory %"PRIu64" MB\n", + i, stats.heap_totalsz_bytes >> 20, + stats.heap_freesz_bytes >> 20); + } +} + +static void +spdk_sigusr1(int signo __attribute__((__unused__))) +{ + char *config_str = NULL; + if (spdk_app_get_running_config(&config_str, "iscsi.conf") < 0) + fprintf(stderr, "Error getting config\n"); + else { + fprintf(stdout, "============================\n"); + fprintf(stdout, " iSCSI target running config\n"); + fprintf(stdout, "=============================\n"); + fprintf(stdout, "%s", config_str); + } + free(config_str); +} + +static void +usage(char *executable_name) +{ + printf("%s [options]\n", executable_name); + printf("options:\n"); + printf(" -c config config file (default %s)\n", SPDK_ISCSI_DEFAULT_CONFIG); + printf(" -e mask tracepoint group mask for spdk trace buffers (default 0x0)\n"); + printf(" -m mask core mask for DPDK\n"); + printf(" -i instance ID\n"); + printf(" -l facility use specific syslog facility (default %s)\n", + SPDK_APP_DEFAULT_LOG_FACILITY); + printf(" -n channel number of memory channels used for DPDK\n"); + printf(" -p core master (primary) core for DPDK\n"); + printf(" -s size memory size in MB for DPDK\n"); +#ifdef DEBUG + printf(" -t flag trace flag (all, net, iscsi, scsi, target, debug)\n"); +#else + printf(" -t flag trace flag (not supported - must rebuild with CONFIG_DEBUG=y)\n"); +#endif + printf(" -v verbose (enable warnings)\n"); + printf(" -H show this usage\n"); + printf(" -V show version\n"); + printf(" -d disable coredump file enabling\n"); +} + +/*! \file + +This is the main file. + +*/ + +/*! + +\brief This is the main function for the iSCSI server application. + +\msc + + c_runtime [label="C Runtime"],libuns,dpdk [label="DPDK"], iSCSI [label="iSCSI Server"]; + c_runtime=>libuns [label="__msa_init()"]; + libuns=>dpdk [label="rte_eal_init()"]; + libuns<iSCSI [label="main()"]; + iSCSI=>iSCSI [label="spdk_dpdk_framework_init()"]; + iSCSI=>iSCSI [label="spdk_app_init()"]; + iSCSI=>iSCSI [label="spdk_event_allocate()"]; + iSCSI=>iSCSI [label="spdk_app_start()"]; + iSCSI=>iSCSI [label="spdk_app_fini()"]; + c_runtime< " + print "advanced usage:" + print "If you want to run fio with verify, please add verify string after runtime." + print "Currently fio.py only support write rw randwrite randrw with verify enabled." + sys.exit(1) + + io_size = int(sys.argv[1]) + queue_depth = int(sys.argv[2]) + test_type = sys.argv[3] + runtime = sys.argv[4] + if len(sys.argv) > 5: + verify = True + else: + verify = False + + devices = get_target_devices() + print "Found devices: ", devices + + configure_devices(devices) + fio_executable = '/usr/bin/fio' + + device_paths = ['/dev/' + dev for dev in devices] + sys.stdout.flush() + signal.signal(signal.SIGTERM, interrupt_handler) + signal.signal(signal.SIGINT, interrupt_handler) + fio = Popen([fio_executable, '-'], stdin=PIPE) + fio.communicate(create_fio_config(io_size, queue_depth, device_paths, test_type, runtime, verify)) + fio.stdin.close() + rc = fio.wait() + print "FIO completed with code %d\n" % rc + sys.stdout.flush() + sys.exit(rc) + +def get_target_devices(): + output = check_output('iscsiadm -m session -P 3', shell=True) + return re.findall("Attached scsi disk (sd[a-z]+)", output) + +def create_fio_config(size, q_depth, devices, test, run_time, verify): + if not verify: + verifyfio = "" + else: + verifyfio = verify_template + fiofile = fio_template % {"blocksize": size, "iodepth": q_depth, + "testtype": test, "runtime": run_time, "verify": verifyfio} + for (i, dev) in enumerate(devices): + fiofile += fio_job_template % {"jobnumber": i, "device": dev} + return fiofile + +def set_device_parameter(devices, filename_template, value): + for dev in devices: + filename = filename_template % dev + f = open(filename, 'r+b') + f.write(value) + f.close() + +def configure_devices(devices): + set_device_parameter(devices, "/sys/block/%s/queue/nomerges", "2") + set_device_parameter(devices, "/sys/block/%s/queue/nr_requests", "128") + requested_qd = 128 + qd = requested_qd + while qd > 0: + try: + set_device_parameter(devices, "/sys/block/%s/device/queue_depth", str(qd)) + break + except IOError: + qd = qd - 1 + if qd == 0: + print "Could not set block device queue depths." + else: + print "Requested queue_depth {} but only {} is supported.".format(str(requested_qd), str(qd)) + set_device_parameter(devices, "/sys/block/%s/queue/scheduler", "noop") + +if __name__ == "__main__": + main() diff --git a/scripts/rpc.py b/scripts/rpc.py new file mode 100755 index 000000000..5c63e3113 --- /dev/null +++ b/scripts/rpc.py @@ -0,0 +1,316 @@ +#!/usr/bin/env python + +import argparse +import json +import socket + +SPDK_JSONRPC_PORT_BASE = 5260 + +def print_dict(d): + print json.dumps(d, indent=2) + +parser = argparse.ArgumentParser(description='SPDK RPC command line interface') +parser.add_argument('-s', dest='server_ip', help='RPC server IP address', default='127.0.0.1') +parser.add_argument('-p', dest='instance_id', help='RPC server instance ID', default=0, type=int) +subparsers = parser.add_subparsers(help='RPC methods') + + +def int_arg(arg): + return int(arg, 0) + + +def jsonrpc_call(method, params={}): + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.connect((args.server_ip, SPDK_JSONRPC_PORT_BASE + args.instance_id)) + req = {} + req['jsonrpc'] = '2.0' + req['method'] = method + req['id'] = 1 + if (params): + req['params'] = params + reqstr = json.dumps(req) + s.sendall(reqstr) + buf = '' + closed = False + response = {} + while not closed: + newdata = s.recv(4096) + if (newdata == b''): + closed = True + buf += newdata + try: + response = json.loads(buf) + except ValueError: + continue # incomplete response; keep buffering + break + s.close() + + if not response: + if method == "kill_instance": + exit(0) + print "Connection closed with partial response:" + print buf + exit(1) + + if 'error' in response: + print "Got JSON-RPC error response" + print "request:" + print_dict(json.loads(reqstr)) + print "response:" + print_dict(response['error']) + exit(1) + + return response['result'] + +def get_luns(args): + print_dict(jsonrpc_call('get_luns')) + +p = subparsers.add_parser('get_luns', help='Display active LUNs') +p.set_defaults(func=get_luns) + + +def get_portal_groups(args): + print_dict(jsonrpc_call('get_portal_groups')) + +p = subparsers.add_parser('get_portal_groups', help='Display current portal group configuration') +p.set_defaults(func=get_portal_groups) + + +def get_initiator_groups(args): + print_dict(jsonrpc_call('get_initiator_groups')) + +p = subparsers.add_parser('get_initiator_groups', help='Display current initiator group configuration') +p.set_defaults(func=get_initiator_groups) + + +def get_target_nodes(args): + print_dict(jsonrpc_call('get_target_nodes')) + +p = subparsers.add_parser('get_target_nodes', help='Display target nodes') +p.set_defaults(func=get_target_nodes) + + +def construct_target_node(args): + lun_name_id_dict = dict(u.split(":") + for u in args.lun_name_id_pairs.split(" ")) + lun_names = lun_name_id_dict.keys() + lun_ids = list(map(int, lun_name_id_dict.values())) + + pg_tags = [] + ig_tags = [] + for u in args.pg_ig_mappings.split(" "): + pg, ig = u.split(":") + pg_tags.append(int(pg)) + ig_tags.append(int(ig)) + + params = { + 'name': args.name, + 'alias_name': args.alias_name, + 'pg_tags': pg_tags, + 'ig_tags': ig_tags, + 'lun_names': lun_names, + 'lun_ids': lun_ids, + 'queue_depth': args.queue_depth, + 'chap_disabled': args.chap_disabled, + 'chap_required': args.chap_required, + 'chap_mutual': args.chap_mutual, + 'chap_auth_group': args.chap_auth_group, + } + jsonrpc_call('construct_target_node', params) + +p = subparsers.add_parser('construct_target_node', help='Add a target node') +p.add_argument('name', help='Target node name (ASCII)') +p.add_argument('alias_name', help='Target node alias name (ASCII)') +p.add_argument('lun_name_id_pairs', help="""Whitespace-separated list of LUN pairs enclosed +in quotes. Format: 'lun_name0:id0 lun_name1:id1' etc +Example: 'Malloc0:0 Malloc1:1 Malloc5:2' +*** The LUNs must pre-exist *** +*** LUN0 (id = 0) is required *** +*** LUN names cannot contain space or colon characters ***""") +p.add_argument('pg_ig_mappings', help="""List of (Portal_Group_Tag:Initiator_Group_Tag) mappings +Whitespace separated, quoted, mapping defined with colon +separated list of "tags" (int > 0) +Example: '1:1 2:2 2:1' +*** The Portal/Initiator Groups must be precreated ***""") +p.add_argument('queue_depth', help='Desired target queue depth', type=int) +p.add_argument('chap_disabled', help="""CHAP authentication should be disabled for this target node. +*** Mutually exclusive with chap_required ***""", type=int) +p.add_argument('chap_required', help="""CHAP authentication should be required for this target node. +*** Mutually exclusive with chap_disabled ***""", type=int) +p.add_argument('chap_mutual', help='CHAP authentication should be mutual/bidirectional.', type=int) +p.add_argument('chap_auth_group', help="""Authentication group ID for this target node. +*** Authentication group must be precreated ***""", type=int) +p.set_defaults(func=construct_target_node) + + +def construct_malloc_lun(args): + num_blocks = (args.total_size * 1024 * 1024) / args.block_size + params = {'num_blocks': num_blocks, 'block_size': args.block_size} + jsonrpc_call('construct_malloc_lun', params) + +p = subparsers.add_parser('construct_malloc_lun', help='Add a LUN with malloc backend') +p.add_argument('total_size', help='Size of malloc LUN in MB (int > 0)', type=int) +p.add_argument('block_size', help='Block size for this LUN', type=int) +p.set_defaults(func=construct_malloc_lun) + + +def construct_aio_lun(args): + params = {'fname': args.fname} + jsonrpc_call('construct_aio_lun', params) + +p = subparsers.add_parser('construct_aio_lun', help='Add a LUN with aio backend') +p.add_argument('fname', help='Path to device or file (ex: /dev/sda)') +p.set_defaults(func=construct_aio_lun) + + +def set_trace_flag(args): + params = {'flag': args.flag} + jsonrpc_call('set_trace_flag', params) + +p = subparsers.add_parser('set_trace_flag', help='set trace flag') +p.add_argument('flag', help='trace mask we want to set. (for example "debug").') +p.set_defaults(func=set_trace_flag) + + +def clear_trace_flag(args): + params = {'flag': args.flag} + jsonrpc_call('clear_trace_flag', params) + +p = subparsers.add_parser('clear_trace_flag', help='clear trace flag') +p.add_argument('flag', help='trace mask we want to clear. (for example "debug").') +p.set_defaults(func=clear_trace_flag) + + +def get_trace_flags(args): + print_dict(jsonrpc_call('get_trace_flags')) + +p = subparsers.add_parser('get_trace_flags', help='get trace flags') +p.set_defaults(func=get_trace_flags) + + +def add_portal_group(args): + # parse out portal list host1:port1 host2:port2 + portals = [] + for p in args.portal_list: + host_port = p.split(':') + portals.append({'host': host_port[0], 'port': host_port[1]}) + + params = {'tag': args.tag, 'portals': portals} + jsonrpc_call('add_portal_group', params) + +p = subparsers.add_parser('add_portal_group', help='Add a portal group') +p.add_argument('tag', help='Portal group tag (unique, integer > 0)', type=int) +p.add_argument('portal_list', nargs=argparse.REMAINDER, help="""List of portals in 'host:port' format, separated by whitespace +Example: '192.168.100.100:3260' '192.168.100.100:3261'""") +p.set_defaults(func=add_portal_group) + + +def add_initiator_group(args): + initiators = [] + netmasks = [] + for i in args.initiator_list.split(' '): + initiators.append(i) + for n in args.netmask_list.split(' '): + netmasks.append(n) + + params = {'tag': args.tag, 'initiators': initiators, 'netmasks': netmasks} + jsonrpc_call('add_initiator_group', params) + + +p = subparsers.add_parser('add_initiator_group', help='Add an initiator group') +p.add_argument('tag', help='Initiator group tag (unique, integer > 0)', type=int) +p.add_argument('initiator_list', help="""Whitespace-separated list of initiator hostnames or IP addresses, +enclosed in quotes. Example: 'ALL' or '127.0.0.1 192.168.200.100'""") +p.add_argument('netmask_list', help="""Whitespace-separated list of initiator netmasks enclosed in quotes. +Example: '255.255.0.0 255.248.0.0' etc""") +p.set_defaults(func=add_initiator_group) + + +def delete_target_node(args): + params = {'name': args.target_node_name} + jsonrpc_call('delete_target_node', params) + +p = subparsers.add_parser('delete_target_node', help='Delete a target node') +p.add_argument('target_node_name', help='Target node name to be deleted. Example: iqn.2016-06.io.spdk:disk1.') +p.set_defaults(func=delete_target_node) + + +def delete_portal_group(args): + params = {'tag': args.tag} + jsonrpc_call('delete_portal_group', params) + +p = subparsers.add_parser('delete_portal_group', help='Delete a portal group') +p.add_argument('tag', help='Portal group tag (unique, integer > 0)', type=int) +p.set_defaults(func=delete_portal_group) + + +def delete_initiator_group(args): + params = {'tag': args.tag} + jsonrpc_call('delete_initiator_group', params) + +p = subparsers.add_parser('delete_initiator_group', help='Delete an initiator group') +p.add_argument('tag', help='Initiator group tag (unique, integer > 0)', type=int) +p.set_defaults(func=delete_initiator_group) + + +def delete_lun(args): + params = {'name': args.lun_name} + jsonrpc_call('delete_lun', params) + +p = subparsers.add_parser('delete_lun', help='Delete a LUN') +p.add_argument('lun_name', help='LUN name to be deleted. Example: Malloc0.') +p.set_defaults(func=delete_lun) + + +def get_iscsi_connections(args): + print_dict(jsonrpc_call('get_iscsi_connections')) + +p = subparsers.add_parser('get_iscsi_connections', help='Display iSCSI connections') +p.set_defaults(func=get_iscsi_connections) + + +def get_scsi_devices(args): + print_dict(jsonrpc_call('get_scsi_devices')) + +p = subparsers.add_parser('get_scsi_devices', help='Display SCSI devices') +p.set_defaults(func=get_scsi_devices) + + +def add_ip_address(args): + params = {'ifc_index': args.ifc_index, 'ip_address': args.ip_addr} + jsonrpc_call('add_ip_address', params) + +p = subparsers.add_parser('add_ip_address', help='Add IP address') +p.add_argument('ifc_index', help='ifc index of the nic device.', type=int) +p.add_argument('ip_addr', help='ip address will be added.') +p.set_defaults(func=add_ip_address) + + +def delete_ip_address(args): + params = {'ifc_index': args.ifc_index, 'ip_address': args.ip_addr} + jsonrpc_call('delete_ip_address', params) + +p = subparsers.add_parser('delete_ip_address', help='Delete IP address') +p.add_argument('ifc_index', help='ifc index of the nic device.', type=int) +p.add_argument('ip_addr', help='ip address will be deleted.') +p.set_defaults(func=delete_ip_address) + + +def get_interfaces(args): + print_dict(jsonrpc_call('get_interfaces')) + +p = subparsers.add_parser('get_interfaces', help='Display current interface list') +p.set_defaults(func=get_interfaces) + + +def kill_instance(args): + params = {'sig_name': args.sig_name} + jsonrpc_call('kill_instance', params) + +p = subparsers.add_parser('kill_instance', help='Send signal to instance') +p.add_argument('sig_name', help='signal will be sent to server.') +p.set_defaults(func=kill_instance) + + +args = parser.parse_args() +args.func(args) diff --git a/test/iscsi_tgt/filesystem/filesystem.sh b/test/iscsi_tgt/filesystem/filesystem.sh new file mode 100755 index 000000000..c99abf8f3 --- /dev/null +++ b/test/iscsi_tgt/filesystem/filesystem.sh @@ -0,0 +1,99 @@ +#!/usr/bin/env bash + +testdir=$(readlink -f $(dirname $0)) +rootdir=$testdir/../../.. +source $rootdir/scripts/autotest_common.sh + +if [ -z "$TARGET_IP" ]; then + echo "TARGET_IP not defined in environment" + exit 1 +fi + +if [ -z "$INITIATOR_IP" ]; then + echo "INITIATOR_IP not defined in environment" + exit 1 +fi + +timing_enter filesystem + +# iSCSI target configuration +PORT=3260 +RPC_PORT=5260 +INITIATOR_TAG=2 +INITIATOR_NAME=ALL +NETMASK=$INITIATOR_IP/32 +MALLOC_LUN_SIZE=256 +MALLOC_BLOCK_SIZE=512 + +rpc_py="python $rootdir/scripts/rpc.py" + +./app/iscsi_tgt/iscsi_tgt -c $testdir/iscsi.conf & +pid=$! +echo "Process pid: $pid" + +trap "process_core; killprocess $pid; exit 1" SIGINT SIGTERM EXIT + +waitforlisten $pid ${RPC_PORT} +echo "iscsi_tgt is listening. Running tests..." + +$rpc_py add_portal_group 1 $TARGET_IP:$PORT +$rpc_py add_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK +$rpc_py construct_malloc_lun $MALLOC_LUN_SIZE $MALLOC_BLOCK_SIZE +# "Malloc0:0" ==> use Malloc0 blockdev for LUN0 +# "1:2" ==> map PortalGroup1 to InitiatorGroup2 +# "64" ==> iSCSI queue depth 64 +# "1 0 0 0" ==> disable CHAP authentication +$rpc_py construct_target_node Target3 Target3_alias 'Malloc0:0' '1:2' 256 1 0 0 0 +sleep 1 + +iscsiadm -m discovery -t sendtargets -p $TARGET_IP:$PORT +iscsiadm -m node --login -p $TARGET_IP:$PORT + +trap "umount /mnt/device; rm -rf /mnt/device; iscsicleanup; process_core; killprocess $pid; exit 1" SIGINT SIGTERM EXIT + +sleep 1 + +mkdir -p /mnt/device + +dev=$(iscsiadm -m session -P 3 | grep "Attached scsi disk" | awk '{print $4}') + +parted -s /dev/$dev mklabel msdos +parted -s /dev/$dev mkpart primary '0%' '100%' +sleep 1 + +for fstype in "ext4" "btrfs" "xfs"; do + + if [ "$fstype" == "ext4" ]; then + mkfs.${fstype} -F /dev/${dev}1 + else + mkfs.${fstype} -f /dev/${dev}1 + fi + mount /dev/${dev}1 /mnt/device + touch /mnt/device/aaa + umount /mnt/device + + iscsiadm -m node --logout + sleep 1 + iscsiadm -m node --login -p $TARGET_IP:$PORT + sleep 1 + dev=$(iscsiadm -m session -P 3 | grep "Attached scsi disk" | awk '{print $4}') + mount -o rw /dev/${dev}1 /mnt/device + + if [ -f "/mnt/device/aaa" ]; then + echo "File existed." + else + echo "File doesn't exist." + exit 1 + fi + + rm -rf /mnt/device/aaa + umount /mnt/device +done + +rm -rf /mnt/device + +trap - SIGINT SIGTERM EXIT + +iscsicleanup +killprocess $pid +timing_exit filesystem diff --git a/test/iscsi_tgt/filesystem/iscsi.conf b/test/iscsi_tgt/filesystem/iscsi.conf new file mode 100644 index 000000000..0c90807d7 --- /dev/null +++ b/test/iscsi_tgt/filesystem/iscsi.conf @@ -0,0 +1,15 @@ +[Global] + ReactorMask 0xFFFF + LogFacility "local7" + +[iSCSI] + NodeBase "iqn.2016-06.io.spdk" + AuthFile /usr/local/etc/spdk/auth.conf + Timeout 30 + DiscoveryAuthMethod Auto + MaxSessions 16 + ImmediateData Yes + ErrorRecoveryLevel 0 + +[Rpc] + Enable Yes diff --git a/test/iscsi_tgt/fio/fio.sh b/test/iscsi_tgt/fio/fio.sh new file mode 100755 index 000000000..fe5842d81 --- /dev/null +++ b/test/iscsi_tgt/fio/fio.sh @@ -0,0 +1,103 @@ +#!/usr/bin/env bash + +testdir=$(readlink -f $(dirname $0)) +rootdir=$testdir/../../.. +source $rootdir/scripts/autotest_common.sh + +function running_config() { + # generate a config file from the running iscsi_tgt + # running_config.sh will leave the file at /tmp/iscsi.conf + $testdir/running_config.sh + sleep 1 + + # now start iscsi_tgt again using the generated config file + # keep the same iscsiadm configuration to confirm that the + # config file matched the running configuration + killprocess $pid + trap "iscsicleanup; exit 1" SIGINT SIGTERM EXIT + ./app/iscsi_tgt/iscsi_tgt -c /tmp/iscsi.conf & + pid=$! + echo "Process pid: $pid" + trap "iscsicleanup; process_core; killprocess $pid; exit 1" SIGINT SIGTERM EXIT + waitforlisten $pid ${RPC_PORT} + echo "iscsi_tgt is listening. Running tests..." + + sleep 1 + $fio_py 4096 1 randrw 5 +} + +if [ -z "$TARGET_IP" ]; then + echo "TARGET_IP not defined in environment" + exit 1 +fi + +if [ -z "$INITIATOR_IP" ]; then + echo "INITIATOR_IP not defined in environment" + exit 1 +fi + +timing_enter fio + +# iSCSI target configuration +PORT=3260 +RPC_PORT=5260 +INITIATOR_TAG=2 +INITIATOR_NAME=ALL +NETMASK=$INITIATOR_IP/32 +MALLOC_LUN_SIZE=64 +MALLOC_BLOCK_SIZE=4096 + +rpc_py="python $rootdir/scripts/rpc.py" +fio_py="python $rootdir/scripts/fio.py" + +./app/iscsi_tgt/iscsi_tgt -c $testdir/iscsi.conf & +pid=$! +echo "Process pid: $pid" + +trap "process_core; killprocess $pid; exit 1" SIGINT SIGTERM EXIT + +waitforlisten $pid ${RPC_PORT} +echo "iscsi_tgt is listening. Running tests..." + +$rpc_py add_portal_group 1 $TARGET_IP:$PORT +$rpc_py add_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK +$rpc_py construct_malloc_lun $MALLOC_LUN_SIZE $MALLOC_BLOCK_SIZE +# "Malloc0:0" ==> use Malloc0 blockdev for LUN0 +# "1:2" ==> map PortalGroup1 to InitiatorGroup2 +# "64" ==> iSCSI queue depth 64 +# "1 0 0 0" ==> disable CHAP authentication +$rpc_py construct_target_node Target3 Target3_alias 'Malloc0:0' '1:2' 64 1 0 0 0 +sleep 1 + +iscsiadm -m discovery -t sendtargets -p $TARGET_IP:$PORT +iscsiadm -m node --login -p $TARGET_IP:$PORT + +trap "iscsicleanup; process_core; killprocess $pid; exit 1" SIGINT SIGTERM EXIT + +sleep 1 +$fio_py 4096 1 randrw 5 verify +$fio_py 131072 32 randrw 5 verify + +if [ $RUN_NIGHTLY -eq 1 ]; then + VERIFY_TIME=300 +else + VERIFY_TIME=10 +fi +$fio_py 4096 1 write $VERIFY_TIME verify + +# Run the running_config test which will generate a config file from the +# running iSCSI target, then kill and restart the iSCSI target using the +# generated config file +if [ $RUN_NIGHTLY -eq 1 ]; then + running_config +fi + +if [ -f "./local-job0-0-verify.state" ]; then + mv ./local-job0-0-verify.state $output_dir +fi + +trap - SIGINT SIGTERM EXIT + +iscsicleanup +killprocess $pid +timing_exit fio diff --git a/test/iscsi_tgt/fio/iscsi.conf b/test/iscsi_tgt/fio/iscsi.conf new file mode 100644 index 000000000..723adf940 --- /dev/null +++ b/test/iscsi_tgt/fio/iscsi.conf @@ -0,0 +1,18 @@ +[Global] + LogFacility "local7" + +[iSCSI] + NodeBase "iqn.2016-06.io.spdk" + AuthFile /usr/local/etc/spdk/auth.conf + Timeout 30 + DiscoveryAuthMethod Auto + MaxSessions 16 + ImmediateData Yes + ErrorRecoveryLevel 0 + +[Rpc] + Enable Yes + +[Nvme] + NvmeLunsPerNs 1 + UnbindFromKernel Yes diff --git a/test/iscsi_tgt/fio/running_config.sh b/test/iscsi_tgt/fio/running_config.sh new file mode 100755 index 000000000..796132408 --- /dev/null +++ b/test/iscsi_tgt/fio/running_config.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +set -xe + +if [ $EUID -ne 0 ]; then + echo "$0 must be run as root" + exit 1 +fi + +if [ ! -f /var/run/iscsi.pid.0 ]; then + echo "ids is not running" + exit 1 +fi + +# delete any existing temporary iscsi.conf files +rm -f /tmp/iscsi.conf.* + +kill -USR1 `cat /var/run/iscsi.pid.0` + +if [ ! -f `ls /tmp/iscsi.conf.*` ]; then + echo "ids did not generate config file" + exit 1 +fi + +mv `ls /tmp/iscsi.conf.*` /tmp/iscsi.conf diff --git a/test/iscsi_tgt/reset/iscsi.conf b/test/iscsi_tgt/reset/iscsi.conf new file mode 100644 index 000000000..bcc80a845 --- /dev/null +++ b/test/iscsi_tgt/reset/iscsi.conf @@ -0,0 +1,14 @@ +[Global] + LogFacility "local7" + +[iSCSI] + NodeBase "iqn.2016-06.io.spdk" + AuthFile /usr/local/etc/spdk/auth.conf + Timeout 30 + DiscoveryAuthMethod Auto + MaxSessions 16 + ImmediateData Yes + ErrorRecoveryLevel 0 + +[Rpc] + Enable Yes diff --git a/test/iscsi_tgt/reset/reset.sh b/test/iscsi_tgt/reset/reset.sh new file mode 100755 index 000000000..3155bf2c0 --- /dev/null +++ b/test/iscsi_tgt/reset/reset.sh @@ -0,0 +1,85 @@ +#!/usr/bin/env bash + +set -xe + +testdir=$(readlink -f $(dirname $0)) +rootdir=$testdir/../../.. +source $rootdir/scripts/autotest_common.sh + +if [ -z "$TARGET_IP" ]; then + echo "TARGET_IP not defined in environment" + exit 1 +fi + +if [ -z "$INITIATOR_IP" ]; then + echo "INITIATOR_IP not defined in environment" + exit 1 +fi + +timing_enter reset + +# iSCSI target configuration +PORT=3260 +RPC_PORT=5260 +INITIATOR_TAG=2 +INITIATOR_NAME=ALL +NETMASK=$INITIATOR_IP/32 +MALLOC_LUN_SIZE=64 +MALLOC_BLOCK_SIZE=512 + +rpc_py="python $rootdir/scripts/rpc.py" +fio_py="python $rootdir/scripts/fio.py" + +if ! hash sg_reset; then + exit 1 +fi + +./app/iscsi_tgt/iscsi_tgt -c $testdir/iscsi.conf & +pid=$! +echo "Process pid: $pid" + +trap "process_core; killprocess $pid; exit 1" SIGINT SIGTERM EXIT + +waitforlisten $pid ${RPC_PORT} +echo "iscsi_tgt is listening. Running tests..." + +$rpc_py add_portal_group 1 $TARGET_IP:$PORT +$rpc_py add_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK +$rpc_py construct_malloc_lun $MALLOC_LUN_SIZE $MALLOC_BLOCK_SIZE +# "Malloc0:0" ==> use Malloc0 blockdev for LUN0 +# "1:2" ==> map PortalGroup1 to InitiatorGroup2 +# "64" ==> iSCSI queue depth 64 +# "1 0 0 0" ==> disable CHAP authentication +$rpc_py construct_target_node Target3 Target3_alias 'Malloc0:0' '1:2' 64 1 0 0 0 +sleep 1 + +iscsiadm -m discovery -t sendtargets -p $TARGET_IP:$PORT +iscsiadm -m node --login -p $TARGET_IP:$PORT +dev=$(iscsiadm -m session -P 3 | grep "Attached scsi disk" | awk '{print $4}') + +sleep 1 +$fio_py 512 1 read 60 & +fiopid=$! +echo "FIO pid: $fiopid" + +trap "iscsicleanup; process_core; killprocess $pid; killprocess $fiopid; exit 1" SIGINT SIGTERM EXIT + +# Do 3 resets while making sure iscsi_tgt and fio are still running +for i in 1 2 3; do + sleep 1 + kill -s 0 $pid + kill -s 0 $fiopid + sg_reset -d /dev/$dev + sleep 1 + kill -s 0 $pid + kill -s 0 $fiopid +done + +kill $fiopid +wait $fiopid || true + +trap - SIGINT SIGTERM EXIT + +iscsicleanup +killprocess $pid +timing_exit reset diff --git a/test/iscsi_tgt/rpc_config/iscsi.conf b/test/iscsi_tgt/rpc_config/iscsi.conf new file mode 100644 index 000000000..bcc80a845 --- /dev/null +++ b/test/iscsi_tgt/rpc_config/iscsi.conf @@ -0,0 +1,14 @@ +[Global] + LogFacility "local7" + +[iSCSI] + NodeBase "iqn.2016-06.io.spdk" + AuthFile /usr/local/etc/spdk/auth.conf + Timeout 30 + DiscoveryAuthMethod Auto + MaxSessions 16 + ImmediateData Yes + ErrorRecoveryLevel 0 + +[Rpc] + Enable Yes diff --git a/test/iscsi_tgt/rpc_config/rpc_config.py b/test/iscsi_tgt/rpc_config/rpc_config.py new file mode 100755 index 000000000..79f0fd1e2 --- /dev/null +++ b/test/iscsi_tgt/rpc_config/rpc_config.py @@ -0,0 +1,416 @@ +#!/usr/bin/env python + + +import os +import os.path +import re +import sys +import time +import json +import random +from subprocess import check_call, call, check_output, Popen, PIPE, CalledProcessError + +netmask = ('127.0.0.1', '127.0.0.0') +rpc_param = { + 'target_ip': '127.0.0.1', + 'port': 3260, + 'initiator_name': 'ALL', + 'netmask': netmask, + 'lun_total': 3, + 'malloc_lun_size': 64, + 'malloc_block_size': 512, + 'queue_depth': 64, + 'target_name': 'Target3', + 'alias_name': 'Target3_alias', + 'chap_disable': 1, + 'chap_mutal': 0, + 'chap_required': 0, + 'chap_auth_group': 0, + 'trace_flag': 'rpc' +} + + +class RpcException(Exception): + + def __init__(self, retval, *args): + super(RpcException, self).__init__(*args) + self.retval = retval + + +class spdk_rpc(object): + + def __init__(self, rpc_py): + self.rpc_py = rpc_py + + def __getattr__(self, name): + def call(*args): + cmd = "python {} {}".format(self.rpc_py, name) + for arg in args: + cmd += " {}".format(arg) + return check_output(cmd, shell=True) + return call + + +def verify(expr, retcode, msg): + if not expr: + raise RpcException(retcode, msg) + + +def verify_trace_flag_rpc_methods(rpc_py, rpc_param): + rpc = spdk_rpc(rpc_py) + output = rpc.get_trace_flags() + jsonvalue = json.loads(output) + verify(not jsonvalue[rpc_param['trace_flag']], 1, + "get_trace_flags returned {}, expected false".format(jsonvalue)) + rpc.set_trace_flag(rpc_param['trace_flag']) + output = rpc.get_trace_flags() + jsonvalue = json.loads(output) + verify(jsonvalue[rpc_param['trace_flag']], 1, + "get_trace_flags returned {}, expected true".format(jsonvalue)) + rpc.clear_trace_flag(rpc_param['trace_flag']) + output = rpc.get_trace_flags() + jsonvalue = json.loads(output) + verify(not jsonvalue[rpc_param['trace_flag']], 1, + "get_trace_flags returned {}, expected false".format(jsonvalue)) + + print "verify_trace_flag_rpc_methods passed" + +def verify_iscsi_connection_rpc_methods(rpc_py): + rpc = spdk_rpc(rpc_py) + output = rpc.get_iscsi_connections() + jsonvalue = json.loads(output) + verify(not jsonvalue, 1, + "get_iscsi_connections returned {}, expected empty".format(jsonvalue)) + + portal_tag = '1' + initiator_tag = '1' + rpc.construct_malloc_lun(rpc_param['malloc_lun_size'], rpc_param['malloc_block_size']) + rpc.add_portal_group(portal_tag, "{}:{}".format(rpc_param['target_ip'], str(rpc_param['port']))) + rpc.add_initiator_group(initiator_tag, rpc_param['initiator_name'], rpc_param['netmask'][0]) + + lun_mapping = "Malloc" + str(rpc_param['lun_total']) + ":0" + net_mapping = portal_tag + ":" + initiator_tag + rpc.construct_target_node(rpc_param['target_name'], rpc_param['alias_name'], lun_mapping, net_mapping, rpc_param['queue_depth'], + rpc_param['chap_disable'], rpc_param['chap_mutal'], rpc_param['chap_required'], rpc_param['chap_auth_group']) + check_output('iscsiadm -m discovery -t st -p {}'.format(rpc_param['target_ip']), shell=True) + check_output('iscsiadm -m node --login', shell=True) + name = json.loads(rpc.get_target_nodes())[0]['name'] + output = rpc.get_iscsi_connections() + jsonvalues = json.loads(output) + verify(jsonvalues[0]['target_node_name'] == rpc_param['target_name'], 1, + "target node name vaule is {}, expected {}".format(jsonvalues[0]['target_node_name'], rpc_param['target_name'])) + verify(jsonvalues[0]['id'] == 0, 1, + "device id value is {}, expected 0".format(jsonvalues[0]['id'])) + verify(jsonvalues[0]['initiator_addr'] == rpc_param['target_ip'], 1, + "initiator address values is {}, expected {}".format(jsonvalues[0]['initiator_addr'], rpc_param['target_ip'])) + verify(jsonvalues[0]['target_addr'] == rpc_param['target_ip'], 1, + "target address values is {}, expected {}".format(jsonvalues[0]['target_addr'], rpc_param['target_ip'])) + + check_output('iscsiadm -m node --logout', shell=True) + check_output('iscsiadm -m node -o delete', shell=True) + rpc.delete_initiator_group(initiator_tag) + rpc.delete_portal_group(portal_tag) + rpc.delete_target_node(name) + output = rpc.get_iscsi_connections() + jsonvalues = json.loads(output) + verify(not jsonvalues, 1, + "get_iscsi_connections returned {}, expected empty".format(jsonvalues)) + + print "verify_iscsi_connection_rpc_methods passed" + +def verify_scsi_devices_rpc_methods(rpc_py): + rpc = spdk_rpc(rpc_py) + output = rpc.get_scsi_devices() + jsonvalue = json.loads(output) + verify(not jsonvalue, 1, + "get_scsi_devices returned {}, expected empty".format(jsonvalue)) + + portal_tag = '1' + initiator_tag = '1' + rpc.construct_malloc_lun(rpc_param['malloc_lun_size'], rpc_param['malloc_block_size']) + rpc.add_portal_group(portal_tag, "{}:{}".format(rpc_param['target_ip'], str(rpc_param['port']))) + rpc.add_initiator_group(initiator_tag, rpc_param['initiator_name'], rpc_param['netmask'][0]) + + lun_mapping = "Malloc" + str(rpc_param['lun_total']) + ":0" + net_mapping = portal_tag + ":" + initiator_tag + rpc.construct_target_node(rpc_param['target_name'], rpc_param['alias_name'], lun_mapping, net_mapping, rpc_param['queue_depth'], + rpc_param['chap_disable'], rpc_param['chap_mutal'], rpc_param['chap_required'], rpc_param['chap_auth_group']) + check_output('iscsiadm -m discovery -t st -p {}'.format(rpc_param['target_ip']), shell=True) + check_output('iscsiadm -m node --login', shell=True) + name = json.loads(rpc.get_target_nodes())[0]['name'] + output = rpc.get_scsi_devices() + jsonvalues = json.loads(output) + verify(jsonvalues[0]['device_name'] == rpc_param['target_name'], 1, + "device name vaule is {}, expected {}".format(jsonvalues[0]['device_name'], rpc_param['target_name'])) + verify(jsonvalues[0]['id'] == 0, 1, + "device id value is {}, expected 0".format(jsonvalues[0]['id'])) + + check_output('iscsiadm -m node --logout', shell=True) + check_output('iscsiadm -m node -o delete', shell=True) + rpc.delete_initiator_group(initiator_tag) + rpc.delete_portal_group(portal_tag) + rpc.delete_target_node(name) + output = rpc.get_scsi_devices() + jsonvalues = json.loads(output) + verify(not jsonvalues, 1, + "get_scsi_devices returned {}, expected empty".format(jsonvalues)) + + print "verify_scsi_devices_rpc_methods passed" + + +def verify_luns_rpc_methods(rpc_py, rpc_param): + rpc = spdk_rpc(rpc_py) + output = rpc.get_luns() + jsonvalue = json.loads(output) + verify(not jsonvalue, 1, + "get_luns returned {}, expected empty".format(jsonvalue)) + + for i in range(1, rpc_param['lun_total'] + 1): + rpc.construct_malloc_lun(rpc_param['malloc_lun_size'], rpc_param['malloc_block_size']) + output = rpc.get_luns() + jsonvalue = json.loads(output) + verify(not jsonvalue, 1, + "get_luns returned {}, expected empty".format(jsonvalue)) + + print "verify_luns_rpc_methods passed" + + +def verify_portal_groups_rpc_methods(rpc_py, rpc_param): + rpc = spdk_rpc(rpc_py) + output = rpc.get_portal_groups() + jsonvalues = json.loads(output) + verify(not jsonvalues, 1, + "get_portal_groups returned {} groups, expected empty".format(jsonvalues)) + + lo_ip = ('127.0.0.1', '127.0.0.6') + nics = json.loads(rpc.get_interfaces()) + for x in nics: + if x["ifc_index"] == 'lo': + rpc.add_ip_address(x["ifc_index"], lo_ip[1]) + for idx, value in enumerate(lo_ip): + # The portal group tag must start at 1 + tag = idx + 1 + rpc.add_portal_group(tag, "{}:{}".format(value, rpc_param['port'])) + output = rpc.get_portal_groups() + jsonvalues = json.loads(output) + verify(len(jsonvalues) == tag, 1, + "get_portal_groups returned {} groups, expected {}".format(len(jsonvalues), tag)) + + tag_list = [] + for idx, value in enumerate(jsonvalues): + verify(value['portals'][0]['host'] == lo_ip[idx], 1, + "host value is {}, expected {}".format(value['portals'][0]['host'], rpc_param['target_ip'])) + verify(value['portals'][0]['port'] == str(rpc_param['port']), 1, + "port value is {}, expected {}".format(value['portals'][0]['port'], str(rpc_param['port']))) + tag_list.append(value['tag']) + verify(value['tag'] == idx + 1, 1, + "tag value is {}, expected {}".format(value['tag'], idx + 1)) + + for idx, value in enumerate(tag_list): + rpc.delete_portal_group(value) + output = rpc.get_portal_groups() + jsonvalues = json.loads(output) + verify(len(jsonvalues) == (len(tag_list) - (idx + 1)), 1, + "get_portal_group returned {} groups, expected {}".format(len(jsonvalues), (len(tag_list) - (idx + 1)))) + if not jsonvalues: + break + + for jidx, jvalue in enumerate(jsonvalues): + verify(jvalue['portals'][0]['host'] == lo_ip[idx + jidx + 1], 1, + "host value is {}, expected {}".format(jvalue['portals'][0]['host'], lo_ip[idx + jidx + 1])) + verify(jvalue['portals'][0]['port'] == str(rpc_param['port']), 1, + "port value is {}, expected {}".format(jvalue['portals'][0]['port'], str(rpc_param['port']))) + verify(jvalue['tag'] != value or jvalue['tag'] == tag_list[idx + jidx + 1], 1, + "tag value is {}, expected {} and not {}".format(jvalue['tag'], tag_list[idx + jidx + 1], value)) + + for x in nics: + if x["ifc_index"] == 'lo': + rpc.delete_ip_address(x["ifc_index"], lo_ip[1]) + + print "verify_portal_groups_rpc_methods passed" + + +def verify_initiator_groups_rpc_methods(rpc_py, rpc_param): + rpc = spdk_rpc(rpc_py) + output = rpc.get_initiator_groups() + jsonvalues = json.loads(output) + verify(not jsonvalues, 1, + "get_initiator_groups returned {}, expected empty".format(jsonvalues)) + for idx, value in enumerate(rpc_param['netmask']): + # The initiator group tag must start at 1 + tag = idx + 1 + rpc.add_initiator_group(tag, rpc_param['initiator_name'], value) + output = rpc.get_initiator_groups() + jsonvalues = json.loads(output) + verify(len(jsonvalues) == tag, 1, + "get_initiator_groups returned {} groups, expected {}".format(len(jsonvalues), tag)) + + tag_list = [] + for idx, value in enumerate(jsonvalues): + verify(value['initiators'][0] == rpc_param['initiator_name'], 1, + "initiator value is {}, expected {}".format(value['initiators'][0], rpc_param['initiator_name'])) + tag_list.append(value['tag']) + verify(value['tag'] == idx + 1, 1, + "tag value is {}, expected {}".format(value['tag'], idx + 1)) + verify(value['netmasks'][0] == rpc_param['netmask'][idx], 1, + "netmasks value is {}, expected {}".format(value['netmasks'][0], rpc_param['netmask'][idx])) + + for idx, value in enumerate(tag_list): + rpc.delete_initiator_group(value) + output = rpc.get_initiator_groups() + jsonvalues = json.loads(output) + verify(len(jsonvalues) == (len(tag_list) - (idx + 1)), 1, + "get_initiator_groups returned {} groups, expected {}".format(len(jsonvalues), (len(tag_list) - (idx + 1)))) + if not jsonvalues: + break + for jidx, jvalue in enumerate(jsonvalues): + verify(jvalue['initiators'][0] == rpc_param['initiator_name'], 1, + "initiator value is {}, expected {}".format(jvalue['initiators'][0], rpc_param['initiator_name'])) + verify(jvalue['tag'] != value or jvalue['tag'] == tag_list[idx + jidx + 1], 1, + "tag value is {}, expected {} and not {}".format(jvalue['tag'], tag_list[idx + jidx + 1], value)) + verify(jvalue['netmasks'][0] == rpc_param['netmask'][idx + jidx + 1], 1, + "netmasks value is {}, expected {}".format(jvalue['netmasks'][0], rpc_param['netmask'][idx + jidx + 1])) + + print "verify_initiator_groups_rpc_method passed." + + +def verify_target_nodes_rpc_methods(rpc_py, rpc_param): + rpc = spdk_rpc(rpc_py) + portal_tag = '1' + initiator_tag = '1' + output = rpc.get_target_nodes() + jsonvalues = json.loads(output) + verify(not jsonvalues, 1, + "get_target_nodes returned {}, expected empty".format(jsonvalues)) + + rpc.construct_malloc_lun(rpc_param['malloc_lun_size'], rpc_param['malloc_block_size']) + rpc.add_portal_group(portal_tag, "{}:{}".format(rpc_param['target_ip'], str(rpc_param['port']))) + rpc.add_initiator_group(initiator_tag, rpc_param['initiator_name'], rpc_param['netmask'][0]) + + lun_mapping = "Malloc" + str(rpc_param['lun_total']) + ":0" + net_mapping = portal_tag + ":" + initiator_tag + rpc.construct_target_node(rpc_param['target_name'], rpc_param['alias_name'], lun_mapping, net_mapping, rpc_param['queue_depth'], + rpc_param['chap_disable'], rpc_param['chap_mutal'], rpc_param['chap_required'], rpc_param['chap_auth_group']) + output = rpc.get_target_nodes() + jsonvalues = json.loads(output) + verify(len(jsonvalues) == 1, 1, + "get_target_nodes returned {} nodes, expected 1".format(len(jsonvalues))) + verify(jsonvalues[0]['lun_names'][0] == "Malloc" + str(rpc_param['lun_total']), 1, + "lun_name value is {}, expected Malloc{}".format(jsonvalues[0]['lun_names'][0], str(rpc_param['lun_total']))) + name = jsonvalues[0]['name'] + verify(name == "iqn.2016-06.io.spdk:" + rpc_param['target_name'], 1, + "target name value is {}, expected {}".format(name, "iqn.2016-06.io.spdk:" + rpc_param['target_name'])) + verify(jsonvalues[0]['alias_name'] == rpc_param['alias_name'], 1, + "target alias_name value is {}, expected {}".format(jsonvalues[0]['alias_name'], rpc_param['alias_name'])) + verify(jsonvalues[0]['lun_ids'][0] == 0, 1, + "lun id value is {}, expected 0".format(jsonvalues[0]['lun_ids'][0])) + verify(jsonvalues[0]['initiator_group_tags'][0] == int(initiator_tag), 1, + "initiator group tag value is {}, expected {}".format(jsonvalues[0]['initiator_group_tags'][0], initiator_tag)) + verify(jsonvalues[0]['queue_depth'] == rpc_param['queue_depth'], 1, + "queue depth value is {}, expected {}".format(jsonvalues[0]['queue_depth'], rpc_param['queue_depth'])) + verify(jsonvalues[0]['portal_group_tags'][0] == int(portal_tag), 1, + "portal group tag value is {}, expected {}".format(jsonvalues[0]['portal_group_tags'][0], portal_tag)) + verify(jsonvalues[0]['chap_disabled'] == rpc_param['chap_disable'], 1, + "chap disable value is {}, expected {}".format(jsonvalues[0]['chap_disabled'], rpc_param['chap_disable'])) + verify(jsonvalues[0]['chap_mutual'] == rpc_param['chap_mutal'], 1, + "chap mutual value is {}, expected {}".format(jsonvalues[0]['chap_mutual'], rpc_param['chap_mutal'])) + verify(jsonvalues[0]['chap_required'] == rpc_param['chap_required'], 1, + "chap required value is {}, expected {}".format(jsonvalues[0]['chap_required'], rpc_param['chap_required'])) + verify(jsonvalues[0]['chap_auth_group'] == rpc_param['chap_auth_group'], 1, + "chap auth group value is {}, expected {}".format(jsonvalues[0]['chap_auth_group'], rpc_param['chap_auth_group'])) + output = rpc.get_luns() + jsonvalue = json.loads(output) + verify(jsonvalue[0]['claimed'] is True, 1, + "The claimed value is {}, expected true".format(jsonvalue[0]['claimed'])) + + rpc.delete_target_node(name) + output = rpc.get_target_nodes() + jsonvalues = json.loads(output) + verify(not jsonvalues, 1, + "get_target_nodes returned {}, expected empty".format(jsonvalues)) + + rpc.construct_target_node(rpc_param['target_name'], rpc_param['alias_name'], lun_mapping, net_mapping, rpc_param['queue_depth'], + rpc_param['chap_disable'], rpc_param['chap_mutal'], rpc_param['chap_required'], rpc_param['chap_auth_group']) + + rpc.delete_portal_group(portal_tag) + rpc.delete_initiator_group(initiator_tag) + rpc.delete_target_node(name) + output = rpc.get_target_nodes() + jsonvalues = json.loads(output) + if not jsonvalues: + print "This issue will be fixed later." + + print "verify_target_nodes_rpc_methods passed." + +def verify_get_interfaces(rpc_py): + rpc = spdk_rpc(rpc_py) + nics = json.loads(rpc.get_interfaces()) + nics_names = set(x["name"].encode('ascii', 'ignore') for x in nics) + # parse ip link show to verify the get_interfaces result + ifcfg_nics = set(re.findall("\S+:\s(\S+):\s<.*", check_output(["ip", "link", "show"]))) + verify(nics_names == ifcfg_nics, 1, "get_interfaces returned {}".format(nics)) + print "verify_get_interfaces passed." + +def help_get_interface_ip_list(rpc_py, nic_name): + rpc = spdk_rpc(rpc_py) + nics = json.loads(rpc.get_interfaces()) + nic = filter(lambda x: x["name"] == nic_name, nics) + verify(len(nic) != 0, 1, + "Nic name: {} is not found in {}".format(nic_name, [x["name"] for x in nics])) + return nic[0]["ip_addr"] + +def verify_add_delete_ip_address(rpc_py): + rpc = spdk_rpc(rpc_py) + nics = json.loads(rpc.get_interfaces()) + # add ip on all nic + for x in nics: + faked_ip = "123.123.{}.{}".format(random.randint(1, 254), random.randint(1, 254)) + rpc.add_ip_address(x["ifc_index"], faked_ip) + verify(faked_ip in help_get_interface_ip_list(rpc_py, x["name"]), 1, + "add ip {} to nic {} failed.".format(faked_ip, x["name"])) + try: + check_call(["ping", "-c", "1", "-W", "1", faked_ip]) + except: + verify(False, 1, + "ping ip {} for {} was failed(adding was successful)".format + (faked_ip, x["name"])) + rpc.delete_ip_address(x["ifc_index"], faked_ip) + verify(faked_ip not in help_get_interface_ip_list(rpc_py, x["name"]), 1, + "delete ip {} from nic {} failed.(adding and ping were successful)".format + (faked_ip, x["name"])) + # ping should be failed and throw an CalledProcessError exception + try: + check_call(["ping", "-c", "1", "-W", "1", faked_ip]) + except CalledProcessError as _: + pass + except Exception as e: + verify(False, 1, + "Unexpected exception was caught {}(adding/ping/delete were successful)".format + (str(e))) + else: + verify(False, 1, + "ip {} for {} could be pinged after delete ip(adding/ping/delete were successful)".format + (faked_ip, x["name"])) + print "verify_add_delete_ip_address passed." + +if __name__ == "__main__": + + rpc_py = sys.argv[1] + + try: + verify_trace_flag_rpc_methods(rpc_py, rpc_param) + verify_get_interfaces(rpc_py) + verify_add_delete_ip_address(rpc_py) + verify_luns_rpc_methods(rpc_py, rpc_param) + verify_portal_groups_rpc_methods(rpc_py, rpc_param) + verify_initiator_groups_rpc_methods(rpc_py, rpc_param) + verify_target_nodes_rpc_methods(rpc_py, rpc_param) + verify_scsi_devices_rpc_methods(rpc_py) + verify_iscsi_connection_rpc_methods(rpc_py) + except RpcException as e: + print "{}. Exiting with status {}".format(e.message, e.retval) + raise e + except Exception as e: + raise e + + sys.exit(0) diff --git a/test/iscsi_tgt/rpc_config/rpc_config.sh b/test/iscsi_tgt/rpc_config/rpc_config.sh new file mode 100755 index 000000000..361af8735 --- /dev/null +++ b/test/iscsi_tgt/rpc_config/rpc_config.sh @@ -0,0 +1,47 @@ +#!/usr/bin/env bash + +testdir=$(readlink -f $(dirname $0)) +rootdir=$testdir/../../.. +source $rootdir/scripts/autotest_common.sh + +if [ -z "$TARGET_IP" ]; then + echo "TARGET_IP not defined in environment" + exit 1 +fi + +if [ -z "$INITIATOR_IP" ]; then + echo "INITIATOR_IP not defined in environment" + exit 1 +fi + +timing_enter rpc_config + +# iSCSI target configuration +PORT=3260 +RPC_PORT=5260 +INITIATOR_TAG=2 +INITIATOR_NAME=ALL +NETMASK=$INITIATOR_IP/32 +MALLOC_LUN_SIZE=64 + + +rpc_py=$rootdir/scripts/rpc.py +rpc_config_py="python $testdir/rpc_config.py" + + +./app/iscsi_tgt/iscsi_tgt -c $testdir/iscsi.conf & +pid=$! +echo "Process pid: $pid" + +trap "process_core; killprocess $pid; exit 1" SIGINT SIGTERM EXIT + +waitforlisten $pid ${RPC_PORT} +echo "iscsi_tgt is listening. Running tests..." + +$rpc_config_py $rpc_py + +trap - SIGINT SIGTERM EXIT + +iscsicleanup +killprocess $pid +timing_exit rpc_config