test/nvmf: run host/perf.sh separately for each transport

While here, remove NVMF_TCP_IP_ADDRESS since we already
have NVMF_FIRST_TARGET_IP which serves the same purpose
when testing the TCP transport.

Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: I7cc4712cd9746377937e889127aa5a61566d8846

Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/456705
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Jim Harris 2019-06-03 15:22:51 -07:00 committed by Ben Walker
parent aa429c8044
commit c4d5d2fdcf
3 changed files with 65 additions and 80 deletions

View File

@ -186,7 +186,12 @@ function nvmfappstart()
nvmfpid=$!
trap "process_shm --id $NVMF_APP_SHM_ID; nvmftestfini; exit 1" SIGINT SIGTERM EXIT
waitforlisten $nvmfpid
modprobe nvme-$TEST_TRANSPORT
# currently we run the host/perf test for TCP even on systems without kernel nvme-tcp
# support; that's fine since the host/perf test uses the SPDK initiator
# maybe later we will enforce modprobe to succeed once we have systems in the test pool
# with nvme-tcp kernel support - but until then let this pass so we can still run the
# host/perf test with the tcp transport
modprobe nvme-$TEST_TRANSPORT || true
timing_exit start_nvmf_tgt
}

View File

@ -12,26 +12,12 @@ rpc_py="$rootdir/scripts/rpc.py"
set -e
nvmftestinit
TYPES="TCP"
if [ -z $NVMF_FIRST_TARGET_IP ]; then
echo "no RDMA NIC for nvmf test, will only test TCP/IP transport"
else
TYPES=${TYPES}" RDMA"
fi
timing_enter perf
timing_enter start_nvmf_tgt
$NVMF_APP -m 0xF &
nvmfpid=$!
nvmftestinit
nvmfappstart "-m 0xF"
trap "process_shm --id $NVMF_APP_SHM_ID; nvmftestfini; exit 1" SIGINT SIGTERM EXIT
waitforlisten $nvmfpid
$rootdir/scripts/gen_nvme.sh --json | $rpc_py load_subsystem_config
timing_exit start_nvmf_tgt
local_nvme_trid="trtype:PCIe traddr:"$($rpc_py get_subsystem_config bdev | jq -r '.[].params | select(.name=="Nvme0").traddr')
bdevs="$bdevs $($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
@ -40,70 +26,55 @@ if [ -n "$local_nvme_trid" ]; then
bdevs="$bdevs Nvme0n1"
fi
function test_perf()
{
TYPE=$1
NVMF_TARGET_IP=$2
$rpc_py nvmf_create_transport -t $TYPE
$rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
for bdev in $bdevs; do
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 $bdev
done
$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TYPE -a $NVMF_TARGET_IP -s $NVMF_PORT
# Test multi-process access to local NVMe device
if [ -n "$local_nvme_trid" ]; then
$rootdir/examples/nvme/perf/perf -i $NVMF_APP_SHM_ID -q 32 -o 4096 -w randrw -M 50 -t 1 -r "$local_nvme_trid"
fi
$rootdir/examples/nvme/perf/perf -q 32 -o 4096 -w randrw -M 50 -t 1 -r "trtype:$TYPE adrfam:IPv4 traddr:$NVMF_TARGET_IP trsvcid:$NVMF_PORT"
sync
$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1
if [ $RUN_NIGHTLY -eq 1 ]; then
# Configure nvme devices with nvmf lvol_bdev backend
if [ -n "$local_nvme_trid" ]; then
ls_guid=$($rpc_py construct_lvol_store Nvme0n1 lvs_0)
get_lvs_free_mb $ls_guid
lb_guid=$($rpc_py construct_lvol_bdev -u $ls_guid lbd_0 $free_mb)
# Create lvol bdev for nested lvol stores
ls_nested_guid=$($rpc_py construct_lvol_store $lb_guid lvs_n_0)
get_lvs_free_mb $ls_nested_guid
lb_nested_guid=$($rpc_py construct_lvol_bdev -u $ls_nested_guid lbd_nest_0 $free_mb)
$rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
for bdev in $lb_nested_guid; do
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 $bdev
done
$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TYPE -a $NVMF_TARGET_IP -s $NVMF_PORT
# Test perf as host with different io_size and qd_depth in nightly
qd_depth=("1" "128")
io_size=("512" "131072")
for qd in ${qd_depth[@]}; do
for o in ${io_size[@]}; do
$rootdir/examples/nvme/perf/perf -q $qd -o $o -w randrw -M 50 -t 10 -r "trtype:$TYPE adrfam:IPv4 traddr:$NVMF_TARGET_IP trsvcid:$NVMF_PORT"
done
done
# Delete subsystems, lvol_bdev and destroy lvol_store.
$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1
$rpc_py destroy_lvol_bdev "$lb_nested_guid"
$rpc_py destroy_lvol_store -l lvs_n_0
$rpc_py destroy_lvol_bdev "$lb_guid"
$rpc_py destroy_lvol_store -l lvs_0
fi
fi
}
for type in $TYPES; do
if [ $type == "TCP" ]; then
nvmf_tgt_ip=$NVMF_TCP_IP_ADDRESS
else
nvmf_tgt_ip=$NVMF_FIRST_TARGET_IP
fi
test_perf $type $nvmf_tgt_ip
$rpc_py nvmf_create_transport -t $TEST_TRANSPORT
$rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
for bdev in $bdevs; do
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 $bdev
done
$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
# Test multi-process access to local NVMe device
if [ -n "$local_nvme_trid" ]; then
$rootdir/examples/nvme/perf/perf -i $NVMF_APP_SHM_ID -q 32 -o 4096 -w randrw -M 50 -t 1 -r "$local_nvme_trid"
fi
$rootdir/examples/nvme/perf/perf -q 32 -o 4096 -w randrw -M 50 -t 1 -r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT"
sync
$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1
if [ $RUN_NIGHTLY -eq 1 ]; then
# Configure nvme devices with nvmf lvol_bdev backend
if [ -n "$local_nvme_trid" ]; then
ls_guid=$($rpc_py construct_lvol_store Nvme0n1 lvs_0)
get_lvs_free_mb $ls_guid
lb_guid=$($rpc_py construct_lvol_bdev -u $ls_guid lbd_0 $free_mb)
# Create lvol bdev for nested lvol stores
ls_nested_guid=$($rpc_py construct_lvol_store $lb_guid lvs_n_0)
get_lvs_free_mb $ls_nested_guid
lb_nested_guid=$($rpc_py construct_lvol_bdev -u $ls_nested_guid lbd_nest_0 $free_mb)
$rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
for bdev in $lb_nested_guid; do
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 $bdev
done
$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
# Test perf as host with different io_size and qd_depth in nightly
qd_depth=("1" "128")
io_size=("512" "131072")
for qd in ${qd_depth[@]}; do
for o in ${io_size[@]}; do
$rootdir/examples/nvme/perf/perf -q $qd -o $o -w randrw -M 50 -t 10 -r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT"
done
done
# Delete subsystems, lvol_bdev and destroy lvol_store.
$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1
$rpc_py destroy_lvol_bdev "$lb_nested_guid"
$rpc_py destroy_lvol_store -l lvs_n_0
$rpc_py destroy_lvol_bdev "$lb_guid"
$rpc_py destroy_lvol_store -l lvs_0
fi
fi
trap - SIGINT SIGTERM EXIT

View File

@ -43,6 +43,15 @@ timing_enter host
run_test suite test/nvmf/host/bdevperf.sh $TEST_ARGS
run_test suite test/nvmf/host/identify.sh $TEST_ARGS
run_test suite test/nvmf/host/perf.sh $TEST_ARGS
# This script has traditionally tested the tcp transport, and then
# also the rdma transport if it's available. Now that this script
# is parameterized, explicitly run the test a second time for the
# tcp transport, at least until the test pool is set up with a VM
# that can run all of the tcp tests. At that point, this whole
# script will be run twice, once for rdma and once for tcp, and
# then this second invocation can be removed.
run_test suite test/nvmf/host/perf.sh $TEST_ARGS --transport=tcp
# TODO: disabled due to intermittent failures (RDMA_CM_EVENT_UNREACHABLE/ETIMEDOUT)
#run_test test/nvmf/host/identify_kernel_nvmf.sh $TEST_ARGS
run_test suite test/nvmf/host/aer.sh $TEST_ARGS