net/vpp: include VPP into iSCSI test scripts

Tests that were thus far performed using posix net framework
can now be run with VPP. This patch adds network interface
configuration for VPP to work in iSCSI tests.

Some tests are disabled on purpose:
- IP Migration, RBD and NVMe-oF due to their tests lacking network
  namespace support
- rpc_config adding/deleting IP, as VPP has separate utility for that

calsoft.sh doesn't handle TCP stream properly and fails decoding iSCSI
requests when are divided by TCP segmentation. This is very common
situation for VPP and causes that calsoft.sh never PASS.

Change-Id: I7c80427ca1675a1789ce7440796cc8d9956f1c9e
Signed-off-by: Slawomir Mrozowicz <slawomirx.mrozowicz@intel.com>
Signed-off-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Signed-off-by: Tomasz Kulasek <tomaszx.kulasek@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/394174
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Tomasz Zawadzki 2018-06-25 08:44:59 -04:00 committed by Ben Walker
parent 29408ebaa9
commit ef489303b4
6 changed files with 121 additions and 12 deletions

View File

@ -199,6 +199,10 @@ if [ $SPDK_RUN_FUNCTIONAL_TEST -eq 1 ]; then
run_test suite test/spdkcli/raid.sh
fi
if [ $SPDK_TEST_VPP -eq 1 ]; then
run_test suite ./test/iscsi_tgt/iscsi_tgt.sh vpp
fi
if [ $SPDK_TEST_BLOBFS -eq 1 ]; then
run_test suite ./test/blobfs/rocksdb/rocksdb.sh
run_test suite ./test/blobstore/blobstore.sh

View File

@ -13,6 +13,9 @@ INITIATOR_TAG=2
INITIATOR_NAME=ANY
PORTAL_TAG=1
ISCSI_APP="$TARGET_NS_CMD ./app/iscsi_tgt/iscsi_tgt"
if [ $SPDK_TEST_VPP -eq 1 ]; then
ISCSI_APP+=" -L sock_vpp"
fi
ISCSI_TEST_CORE_MASK=0xFF
function create_veth_interfaces() {
@ -34,17 +37,25 @@ function create_veth_interfaces() {
# Accept connections from veth interface
iptables -I INPUT 1 -i $INITIATOR_INTERFACE -p tcp --dport $ISCSI_PORT -j ACCEPT
$TARGET_NS_CMD ip link set lo up
$TARGET_NS_CMD ip addr add $TARGET_IP/24 dev $TARGET_INTERFACE
$TARGET_NS_CMD ip link set $TARGET_INTERFACE up
# Verify connectivity
ping -c 1 $TARGET_IP
ip netns exec $TARGET_NAMESPACE ping -c 1 $INITIATOR_IP
if [ "$1" == "posix" ]; then
$TARGET_NS_CMD ip link set lo up
$TARGET_NS_CMD ip addr add $TARGET_IP/24 dev $TARGET_INTERFACE
# Verify connectivity
ping -c 1 $TARGET_IP
ip netns exec $TARGET_NAMESPACE ping -c 1 $INITIATOR_IP
else
start_vpp
fi
}
function cleanup_veth_interfaces() {
# $1 = test type (posix/vpp)
if [ "$1" == "vpp" ]; then
kill_vpp
fi
# Cleanup veth interfaces and network namespace
# Note: removing one veth, removes the pair
@ -90,3 +101,72 @@ function iscsitestfini() {
$rootdir/scripts/setup.sh reset
fi
}
function start_vpp() {
# We need to make sure that posix side doesn't send jumbo packets while
# for VPP side maximal size of MTU for TCP is 1460 and tests doesn't work
# stable with larger packets
MTU=1460
ip link set dev $INITIATOR_INTERFACE mtu $MTU
ethtool -K $INITIATOR_INTERFACE tso off
ethtool -k $INITIATOR_INTERFACE
# Start VPP process in SPDK target network namespace
$TARGET_NS_CMD vpp \
unix { nodaemon cli-listen /run/vpp/cli.sock } \
dpdk { no-pci num-mbufs 128000 } \
session { evt_qs_memfd_seg } \
socksvr { socket-name /run/vpp-api.sock } \
plugins { \
plugin default { disable } \
plugin dpdk_plugin.so { enable } \
} &
vpp_pid=$!
echo "VPP Process pid: $vpp_pid"
# Wait until VPP starts responding
xtrace_disable
counter=40
while [ $counter -gt 0 ] ; do
vppctl show version &> /dev/null && break
counter=$(( $counter - 1 ))
sleep 0.5
done
xtrace_restore
if [ $counter -eq 0 ] ; then
return 1
fi
# Setup host interface
vppctl create host-interface name $TARGET_INTERFACE
VPP_TGT_INT="host-$TARGET_INTERFACE"
vppctl set interface state $VPP_TGT_INT up
vppctl set interface ip address $VPP_TGT_INT $TARGET_IP/24
vppctl set interface mtu $MTU $VPP_TGT_INT
vppctl show interface
# Disable session layer
# NOTE: VPP net framework should enable it itself.
vppctl session disable
# Verify connectivity
vppctl show int addr
ip addr show $INITIATOR_INTERFACE
ip netns exec $TARGET_NAMESPACE ip addr show $TARGET_INTERFACE
sleep 3
ping -c 1 $TARGET_IP -s $(( $MTU - 28 )) -M do
vppctl ping $INITIATOR_IP repeat 1 size $(( $MTU - (28 + 8) )) verbose
}
function kill_vpp() {
vppctl delete host-interface name $TARGET_INTERFACE
# Dump VPP configuration before kill
vppctl show api clients
vppctl show session
vppctl show errors
killprocess $vpp_pid
}

View File

@ -53,6 +53,7 @@ $rpc_py bdev_inject_error EE_Malloc0 'all' 'failure' -n 1000
dev=$(iscsiadm -m session -P 3 | grep "Attached scsi disk" | awk '{print $4}')
set +e
waitforfile /dev/$dev
mkfs.ext4 -F /dev/$dev
if [ $? -eq 0 ]; then
echo "mkfs successful - expected failure"

View File

@ -24,15 +24,25 @@ create_veth_interfaces $TEST_TYPE
trap "cleanup_veth_interfaces $TEST_TYPE; exit 1" SIGINT SIGTERM EXIT
run_test suite ./test/iscsi_tgt/sock/sock.sh
run_test suite ./test/iscsi_tgt/calsoft/calsoft.sh
run_test suite ./test/iscsi_tgt/sock/sock.sh $TEST_TYPE
if [ "$TEST_TYPE" == "posix" ]; then
# calsoft doesn't handle TCP stream properly and fails decoding iSCSI
# requests when are divided by TCP segmentation. This is very common
# situation for VPP and causes that calsoft.sh never PASS.
run_test suite ./test/iscsi_tgt/calsoft/calsoft.sh
fi
run_test suite ./test/iscsi_tgt/filesystem/filesystem.sh
run_test suite ./test/iscsi_tgt/reset/reset.sh
run_test suite ./test/iscsi_tgt/rpc_config/rpc_config.sh $TEST_TYPE
run_test suite ./test/iscsi_tgt/lvol/iscsi_lvol.sh
run_test suite ./test/iscsi_tgt/fio/fio.sh
run_test suite ./test/iscsi_tgt/qos/qos.sh
run_test suite ./test/iscsi_tgt/ip_migration/ip_migration.sh
# IP Migration tests do not support network namespaces,
# they can only be run on posix sockets.
if [ "$TEST_TYPE" == "posix" ]; then
run_test suite ./test/iscsi_tgt/ip_migration/ip_migration.sh
fi
run_test suite ./test/iscsi_tgt/trace_record/trace_record.sh
if [ $RUN_NIGHTLY -eq 1 ]; then
@ -43,14 +53,22 @@ if [ $RUN_NIGHTLY -eq 1 ]; then
run_test suite ./test/iscsi_tgt/digests/digests.sh
fi
if [ $SPDK_TEST_RBD -eq 1 ]; then
run_test suite ./test/iscsi_tgt/rbd/rbd.sh
# RBD tests do not support network namespaces,
# they can only be run on posix sockets.
if [ "$TEST_TYPE" == "posix" ]; then
run_test suite ./test/iscsi_tgt/rbd/rbd.sh
fi
fi
trap "cleanup_veth_interfaces $TEST_TYPE; exit 1" SIGINT SIGTERM EXIT
if [ $SPDK_TEST_NVMF -eq 1 ]; then
# Test configure remote NVMe device from rpc and conf file
run_test suite ./test/iscsi_tgt/nvme_remote/fio_remote_nvme.sh
# NVMe-oF tests do not support network namespaces,
# they can only be run on posix sockets.
if [ "$TEST_TYPE" == "posix" ]; then
# Test configure remote NVMe device from rpc and conf file
run_test suite ./test/iscsi_tgt/nvme_remote/fio_remote_nvme.sh
fi
fi
if [ $RUN_NIGHTLY -eq 1 ]; then

View File

@ -486,7 +486,10 @@ if __name__ == "__main__":
try:
verify_log_flag_rpc_methods(rpc_py, rpc_param)
verify_get_interfaces(rpc_py)
verify_add_delete_ip_address(rpc_py)
# Add/delete IP will not be supported in VPP.
# It has separate vppctl utility for that.
if test_type == 'posix':
verify_add_delete_ip_address(rpc_py)
create_malloc_bdevs_rpc_methods(rpc_py, rpc_param)
verify_portal_groups_rpc_methods(rpc_py, rpc_param)
verify_initiator_groups_rpc_methods(rpc_py, rpc_param)

View File

@ -67,6 +67,9 @@ function waitfortcp() {
iscsitestinit $1 $2
HELLO_SOCK_APP="$TARGET_NS_CMD $rootdir/examples/sock/hello_world/hello_sock"
if [ $SPDK_TEST_VPP -eq 1 ]; then
HELLO_SOCK_APP+=" -L sock_vpp"
fi
SOCAT_APP="socat"
# ----------------