From a02b1481db822178602951934261e3d3a96c9c75 Mon Sep 17 00:00:00 2001 From: Jim Harris Date: Mon, 13 May 2019 12:11:00 -0700 Subject: [PATCH] test/nvmf: reduce duplication between target scripts All of the checking for NVMF_FIRST_TARGET_IP, starting the target, waiting on the target and modprobing nvme-rdma is duplicated in every script. So move a lot of this either to nvmftestinit() or a new nvmfappstart() function in common.sh. Also just kill the nvmf target in nvmftestfini, rather than the script to explicitly kill it. Signed-off-by: Jim Harris Change-Id: I5864404610a4244473f460d48264de92687ed867 Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/454678 Reviewed-by: Shuhei Matsumoto Reviewed-by: Seth Howell Reviewed-by: Changpeng Liu Tested-by: SPDK CI Jenkins --- test/iscsi_tgt/nvme_remote/fio_remote_nvme.sh | 20 ++++-------- test/nvmf/common.sh | 18 +++++++++++ test/nvmf/host/aer.sh | 3 +- test/nvmf/host/bdevperf.sh | 3 +- test/nvmf/host/fio.sh | 3 +- test/nvmf/host/identify.sh | 3 +- test/nvmf/host/perf.sh | 3 +- test/nvmf/target/bdev_io_wait.sh | 22 ++----------- test/nvmf/target/bdevio.sh | 23 ++------------ test/nvmf/target/connect_disconnect.sh | 25 +++------------ test/nvmf/target/create_transport.sh | 26 +++------------- test/nvmf/target/discovery.sh | 26 +++------------- test/nvmf/target/filesystem.sh | 20 ++---------- test/nvmf/target/fio.sh | 25 ++------------- test/nvmf/target/multiconnection.sh | 22 ++----------- test/nvmf/target/nmic.sh | 25 +++------------ test/nvmf/target/nvme_cli.sh | 21 ++----------- test/nvmf/target/nvmf_lvol.sh | 22 ++----------- test/nvmf/target/rpc.sh | 24 ++------------ test/nvmf/target/shutdown.sh | 31 +++++-------------- test/nvmf/target/srq_overwhelm.sh | 20 ++---------- 21 files changed, 73 insertions(+), 312 deletions(-) diff --git a/test/iscsi_tgt/nvme_remote/fio_remote_nvme.sh b/test/iscsi_tgt/nvme_remote/fio_remote_nvme.sh index 3d8b82c66..406e51d33 100755 --- a/test/iscsi_tgt/nvme_remote/fio_remote_nvme.sh +++ b/test/iscsi_tgt/nvme_remote/fio_remote_nvme.sh @@ -8,18 +8,11 @@ source $rootdir/test/common/autotest_common.sh source $rootdir/test/nvmf/common.sh source $rootdir/test/iscsi_tgt/common.sh +nvmftestinit # $1 = "iso" - triggers isolation mode (setting up required environment). # $2 = test type posix or vpp. defaults to posix. -nvmftestinit $1 iscsitestinit $1 $2 -RDMA_IP_LIST=$(get_available_rdma_ips) -NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1) -if [ -z $NVMF_FIRST_TARGET_IP ]; then - echo "no NIC for nvmf test" - exit 0 -fi - rpc_py="$rootdir/scripts/rpc.py" fio_py="$rootdir/scripts/fio.py" @@ -38,7 +31,7 @@ function run_nvme_remote() { $ISCSI_APP -r "$iscsi_rpc_addr" -m 0x1 -p 0 -s 512 --wait-for-rpc & iscsipid=$! echo "iSCSI target launched. pid: $iscsipid" - trap "killprocess $iscsipid; killprocess $nvmfpid; iscsitestfini $1 $2; nvmftestfini $1; exit 1" SIGINT SIGTERM EXIT + trap "killprocess $iscsipid; iscsitestfini $1 $2; nvmftestfini; exit 1" SIGINT SIGTERM EXIT waitforlisten $iscsipid "$iscsi_rpc_addr" $rpc_py -s "$iscsi_rpc_addr" set_iscsi_options -o 30 -a 16 $rpc_py -s "$iscsi_rpc_addr" start_subsystem_init @@ -71,7 +64,7 @@ NVMF_APP="$rootdir/app/nvmf_tgt/nvmf_tgt" $NVMF_APP -m 0x2 -p 1 -s 512 --wait-for-rpc & nvmfpid=$! echo "NVMf target launched. pid: $nvmfpid" -trap "killprocess $nvmfpid; iscsitestfini $1 $2; nvmftestfini $1; exit 1" SIGINT SIGTERM EXIT +trap "iscsitestfini $1 $2; nvmftestfini; exit 1" SIGINT SIGTERM EXIT waitforlisten $nvmfpid $rpc_py start_subsystem_init $rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4 @@ -88,8 +81,8 @@ timing_enter start_iscsi_tgt run_nvme_remote "local" -trap "iscsicleanup; killprocess $iscsipid; killprocess $nvmfpid; \ - rm -f ./local-job0-0-verify.state; iscsitestfini $1 $2; nvmftestfini $1; exit 1" SIGINT SIGTERM EXIT +trap "iscsicleanup; killprocess $iscsipid; \ + rm -f ./local-job0-0-verify.state; iscsitestfini $1 $2; nvmftestfini; exit 1" SIGINT SIGTERM EXIT sleep 1 echo "Running FIO" @@ -110,9 +103,8 @@ trap - SIGINT SIGTERM EXIT iscsicleanup killprocess $iscsipid $rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1 -killprocess $nvmfpid report_test_completion "iscsi_nvme_remote" iscsitestfini $1 $2 -nvmftestfini $1 +nvmftestfini timing_exit nvme_remote diff --git a/test/nvmf/common.sh b/test/nvmf/common.sh index df34cce59..9df1ba309 100755 --- a/test/nvmf/common.sh +++ b/test/nvmf/common.sh @@ -163,10 +163,28 @@ function nvmftestinit() $rootdir/scripts/setup.sh rdma_device_init fi + RDMA_IP_LIST=$(get_available_rdma_ips) + NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1) + if [ -z $NVMF_FIRST_TARGET_IP ]; then + echo "no NIC for nvmf test" + exit 0 + fi +} + +function nvmfappstart() +{ + timing_enter start_nvmf_tgt + $NVMF_APP $1 & + nvmfpid=$! + trap "process_shm --id $NVMF_APP_SHM_ID; nvmftestfini; exit 1" SIGINT SIGTERM EXIT + waitforlisten $nvmfpid + modprobe nvme-rdma + timing_exit start_nvmf_tgt } function nvmftestfini() { + killprocess $nvmfpid if [ "$NVMF_TEST_MODE" == "iso" ]; then $rootdir/scripts/setup.sh reset rdma_device_init diff --git a/test/nvmf/host/aer.sh b/test/nvmf/host/aer.sh index bee3d73b0..96242187b 100755 --- a/test/nvmf/host/aer.sh +++ b/test/nvmf/host/aer.sh @@ -26,7 +26,7 @@ timing_enter start_nvmf_tgt $NVMF_APP -m 0xF & nvmfpid=$! -trap "process_shm --id $NVMF_APP_SHM_ID; killprocess $nvmfpid; nvmftestfini; exit 1" SIGINT SIGTERM EXIT +trap "process_shm --id $NVMF_APP_SHM_ID; nvmftestfini; exit 1" SIGINT SIGTERM EXIT waitforlisten $nvmfpid $rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4 @@ -82,6 +82,5 @@ trap - SIGINT SIGTERM EXIT nvmfcleanup -killprocess $nvmfpid nvmftestfini timing_exit aer diff --git a/test/nvmf/host/bdevperf.sh b/test/nvmf/host/bdevperf.sh index d02c47339..e7bdaa3f3 100755 --- a/test/nvmf/host/bdevperf.sh +++ b/test/nvmf/host/bdevperf.sh @@ -29,7 +29,7 @@ timing_enter start_nvmf_tgt $NVMF_APP -m 0xF & nvmfpid=$! -trap "process_shm --id $NVMF_APP_SHM_ID; killprocess $nvmfpid; nvmftestfini; exit 1" SIGINT SIGTERM EXIT +trap "process_shm --id $NVMF_APP_SHM_ID; nvmftestfini; exit 1" SIGINT SIGTERM EXIT waitforlisten $nvmfpid $rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4 @@ -49,6 +49,5 @@ $rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1 trap - SIGINT SIGTERM EXIT -killprocess $nvmfpid nvmftestfini timing_exit bdevperf diff --git a/test/nvmf/host/fio.sh b/test/nvmf/host/fio.sh index c43c48e51..287038ad5 100755 --- a/test/nvmf/host/fio.sh +++ b/test/nvmf/host/fio.sh @@ -32,7 +32,7 @@ timing_enter start_nvmf_tgt $NVMF_APP -m 0xF & nvmfpid=$! -trap "process_shm --id $NVMF_APP_SHM_ID; killprocess $nvmfpid; nvmftestfini; exit 1" SIGINT SIGTERM EXIT +trap "process_shm --id $NVMF_APP_SHM_ID; nvmftestfini; exit 1" SIGINT SIGTERM EXIT waitforlisten $nvmfpid $rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4 @@ -91,6 +91,5 @@ fi trap - SIGINT SIGTERM EXIT rm -f ./local-test-0-verify.state -killprocess $nvmfpid nvmftestfini timing_exit fio diff --git a/test/nvmf/host/identify.sh b/test/nvmf/host/identify.sh index 8d176e305..08fb8c81d 100755 --- a/test/nvmf/host/identify.sh +++ b/test/nvmf/host/identify.sh @@ -27,7 +27,7 @@ timing_enter start_nvmf_tgt $NVMF_APP -m 0xF & nvmfpid=$! -trap "process_shm --id $NVMF_APP_SHM_ID; killprocess $nvmfpid; nvmftestfini; exit 1" SIGINT SIGTERM EXIT +trap "process_shm --id $NVMF_APP_SHM_ID; nvmftestfini; exit 1" SIGINT SIGTERM EXIT waitforlisten $nvmfpid $rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4 @@ -61,6 +61,5 @@ $rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1 trap - SIGINT SIGTERM EXIT -killprocess $nvmfpid nvmftestfini timing_exit identify diff --git a/test/nvmf/host/perf.sh b/test/nvmf/host/perf.sh index 08d5bcf20..5940a7d5b 100755 --- a/test/nvmf/host/perf.sh +++ b/test/nvmf/host/perf.sh @@ -31,7 +31,7 @@ timing_enter start_nvmf_tgt $NVMF_APP -m 0xF & nvmfpid=$! -trap "process_shm --id $NVMF_APP_SHM_ID; killprocess $nvmfpid; nvmftestfini; exit 1" SIGINT SIGTERM EXIT +trap "process_shm --id $NVMF_APP_SHM_ID; nvmftestfini; exit 1" SIGINT SIGTERM EXIT waitforlisten $nvmfpid $rootdir/scripts/gen_nvme.sh --json | $rpc_py load_subsystem_config @@ -111,6 +111,5 @@ done trap - SIGINT SIGTERM EXIT -killprocess $nvmfpid nvmftestfini timing_exit perf diff --git a/test/nvmf/target/bdev_io_wait.sh b/test/nvmf/target/bdev_io_wait.sh index c1bdef593..0a0a801ee 100755 --- a/test/nvmf/target/bdev_io_wait.sh +++ b/test/nvmf/target/bdev_io_wait.sh @@ -12,33 +12,16 @@ rpc_py="$rootdir/scripts/rpc.py" set -e +timing_enter bdev_io_wait # pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization. # e.g. sudo ./bdev_io_wait.sh iso nvmftestinit +nvmfappstart "-m 0xF --wait-for-rpc" -RDMA_IP_LIST=$(get_available_rdma_ips) -NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1) -if [ -z $NVMF_FIRST_TARGET_IP ]; then - echo "no NIC for nvmf test" - exit 0 -fi - -timing_enter bdev_io_wait -timing_enter start_nvmf_tgt - -$NVMF_APP -m 0xF --wait-for-rpc & -nvmfpid=$! - -trap "process_shm --id $NVMF_APP_SHM_ID; killprocess $nvmfpid; nvmftestfini; exit 1" SIGINT SIGTERM EXIT - -waitforlisten $nvmfpid # Minimal number of bdev io pool (5) and cache (1) $rpc_py set_bdev_options -p 5 -c 1 $rpc_py start_subsystem_init $rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4 -timing_exit start_nvmf_tgt - -modprobe -v nvme-rdma $rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0 $rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001 @@ -68,6 +51,5 @@ $rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1 trap - SIGINT SIGTERM EXIT nvmfcleanup -killprocess $nvmfpid nvmftestfini timing_exit bdev_io_wait diff --git a/test/nvmf/target/bdevio.sh b/test/nvmf/target/bdevio.sh index ecae8c7f0..492769637 100755 --- a/test/nvmf/target/bdevio.sh +++ b/test/nvmf/target/bdevio.sh @@ -12,29 +12,11 @@ rpc_py="$rootdir/scripts/rpc.py" set -e +timing_enter bdevio # pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization. # e.g. sudo ./bdev_io_wait.sh iso nvmftestinit - -RDMA_IP_LIST=$(get_available_rdma_ips) -NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1) -if [ -z $NVMF_FIRST_TARGET_IP ]; then - echo "no NIC for nvmf test" - exit 0 -fi - -timing_enter bdevio -timing_enter start_nvmf_tgt - -$NVMF_APP -m 0xF & -nvmfpid=$! - -trap "process_shm --id $NVMF_APP_SHM_ID; killprocess $nvmfpid; nvmftestfini; exit 1" SIGINT SIGTERM EXIT - -waitforlisten $nvmfpid -timing_exit start_nvmf_tgt - -modprobe -v nvme-rdma +nvmfappstart "-m 0xF" $rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4 $rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0 @@ -53,6 +35,5 @@ $rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1 trap - SIGINT SIGTERM EXIT nvmfcleanup -killprocess $nvmfpid nvmftestfini timing_exit bdev_io_wait diff --git a/test/nvmf/target/connect_disconnect.sh b/test/nvmf/target/connect_disconnect.sh index ea9ed33d7..aadcddbc9 100755 --- a/test/nvmf/target/connect_disconnect.sh +++ b/test/nvmf/target/connect_disconnect.sh @@ -12,31 +12,16 @@ rpc_py="$rootdir/scripts/rpc.py" set -e -# pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization. -# e.g. sudo ./filesystem.sh iso -nvmftestinit - -RDMA_IP_LIST=$(get_available_rdma_ips) -NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1) -if [ -z $NVMF_FIRST_TARGET_IP ]; then - echo "no NIC for nvmf test" - exit 0 -fi - # connect disconnect is geared towards ensuring that we are properly freeing resources after disconnecting qpairs. timing_enter connect_disconnect -# Start up the NVMf target in another process -$NVMF_APP -m 0xF & -nvmfpid=$! +# pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization. +# e.g. sudo ./filesystem.sh iso +nvmftestinit +nvmfappstart "-m 0xF" -trap "process_shm --id $NVMF_APP_SHM_ID; killprocess $nvmfpid; nvmftestfini; exit 1" SIGINT SIGTERM EXIT - -waitforlisten $nvmfpid $rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4 -c 0 -modprobe -v nvme-rdma - bdev="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)" $rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001 @@ -61,7 +46,5 @@ set -x trap - SIGINT SIGTERM EXIT nvmfcleanup -killprocess $nvmfpid - nvmftestfini timing_exit connect_disconnect diff --git a/test/nvmf/target/create_transport.sh b/test/nvmf/target/create_transport.sh index 3d6843cc0..bebbaa1d7 100755 --- a/test/nvmf/target/create_transport.sh +++ b/test/nvmf/target/create_transport.sh @@ -12,40 +12,23 @@ rpc_py="$rootdir/scripts/rpc.py" set -e -# pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization. -# e.g. sudo ./crt_trprt.sh iso -nvmftestinit - if ! hash nvme; then echo "nvme command not found; skipping create transport test" exit 0 fi -RDMA_IP_LIST=$(get_available_rdma_ips) -NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1) -if [ -z $NVMF_FIRST_TARGET_IP ]; then - echo "no NIC for nvmf test" - exit 0 -fi - timing_enter cr_trprt -timing_enter start_nvmf_tgt -# Start up the NVMf target in another process -$NVMF_APP -m 0xF & -nvmfpid=$! +# pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization. +# e.g. sudo ./crt_trprt.sh iso +nvmftestinit +nvmfappstart "-m 0xF" -trap "killprocess $nvmfpid; nvmftestfini; exit 1" SIGINT SIGTERM EXIT - -waitforlisten $nvmfpid # Use nvmf_create_transport call to create transport $rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4 -timing_exit start_nvmf_tgt null_bdevs="$($rpc_py construct_null_bdev Null0 $NULL_BDEV_SIZE $NULL_BLOCK_SIZE) " null_bdevs+="$($rpc_py construct_null_bdev Null1 $NULL_BDEV_SIZE $NULL_BLOCK_SIZE)" -modprobe -v nvme-rdma - $rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001 for null_bdev in $null_bdevs; do $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 $null_bdev @@ -72,6 +55,5 @@ fi trap - SIGINT SIGTERM EXIT nvmfcleanup -killprocess $nvmfpid nvmftestfini timing_exit crt_trprt diff --git a/test/nvmf/target/discovery.sh b/test/nvmf/target/discovery.sh index a499b4212..7171a8da1 100755 --- a/test/nvmf/target/discovery.sh +++ b/test/nvmf/target/discovery.sh @@ -12,39 +12,22 @@ rpc_py="$rootdir/scripts/rpc.py" set -e -# pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization. -# e.g. sudo ./discovery.sh iso -nvmftestinit - if ! hash nvme; then echo "nvme command not found; skipping discovery test" exit 0 fi -RDMA_IP_LIST=$(get_available_rdma_ips) -NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1) -if [ -z $NVMF_FIRST_TARGET_IP ]; then - echo "no NIC for nvmf test" - exit 0 -fi - timing_enter discovery -timing_enter start_nvmf_tgt -# Start up the NVMf target in another process -$NVMF_APP -m 0xF & -nvmfpid=$! +# pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization. +# e.g. sudo ./discovery.sh iso +nvmftestinit +nvmfappstart "-m 0xF" -trap "process_shm --id $NVMF_APP_SHM_ID; killprocess $nvmfpid; nvmftestfini; exit 1" SIGINT SIGTERM EXIT - -waitforlisten $nvmfpid $rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4 -timing_exit start_nvmf_tgt null_bdevs="$($rpc_py construct_null_bdev Null0 $NULL_BDEV_SIZE $NULL_BLOCK_SIZE) " null_bdevs+="$($rpc_py construct_null_bdev Null1 $NULL_BDEV_SIZE $NULL_BLOCK_SIZE)" -modprobe -v nvme-rdma - $rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001 for null_bdev in $null_bdevs; do $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 $null_bdev @@ -71,6 +54,5 @@ fi trap - SIGINT SIGTERM EXIT nvmfcleanup -killprocess $nvmfpid nvmftestfini timing_exit discovery diff --git a/test/nvmf/target/filesystem.sh b/test/nvmf/target/filesystem.sh index 9ba548e45..3c14f2444 100755 --- a/test/nvmf/target/filesystem.sh +++ b/test/nvmf/target/filesystem.sh @@ -12,34 +12,20 @@ rpc_py="$rootdir/scripts/rpc.py" set -e +timing_enter fs_test + # pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization. # e.g. sudo ./filesystem.sh iso nvmftestinit -RDMA_IP_LIST=$(get_available_rdma_ips) -NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1) -if [ -z $NVMF_FIRST_TARGET_IP ]; then - echo "no NIC for nvmf test" - exit 0 -fi - -timing_enter fs_test - for incapsule in 0 4096; do - # Start up the NVMf target in another process - $NVMF_APP -m 0xF & - nvmfpid=$! + nvmfappstart "-m 0xF" - trap "process_shm --id $NVMF_APP_SHM_ID; killprocess $nvmfpid; nvmftestfini; exit 1" SIGINT SIGTERM EXIT - - waitforlisten $nvmfpid $rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4 -c $incapsule bdevs="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)" bdevs+=" $($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)" - modprobe -v nvme-rdma - $rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001 for bdev in $bdevs; do $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 $bdev diff --git a/test/nvmf/target/fio.sh b/test/nvmf/target/fio.sh index 4e1ce08b8..0bf2852f7 100755 --- a/test/nvmf/target/fio.sh +++ b/test/nvmf/target/fio.sh @@ -12,30 +12,14 @@ rpc_py="$rootdir/scripts/rpc.py" set -e +timing_enter fio # pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization. # e.g. sudo ./fio.sh iso nvmftestinit +nvmfappstart "-m 0xF" -RDMA_IP_LIST=$(get_available_rdma_ips) -NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1) -if [ -z $NVMF_FIRST_TARGET_IP ]; then - echo "no NIC for nvmf test" - exit 0 -fi - -timing_enter fio -timing_enter start_nvmf_tgt -# Start up the NVMf target in another process -$NVMF_APP -m 0xF & -nvmfpid=$! - -trap "process_shm --id $NVMF_APP_SHM_ID; killprocess $nvmfpid; nvmftestfini; exit 1" SIGINT SIGTERM EXIT - -waitforlisten $nvmfpid $rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4 -timing_exit start_nvmf_tgt - malloc_bdevs="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE) " malloc_bdevs+="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)" # Create a RAID-0 bdev from two malloc bdevs @@ -43,8 +27,6 @@ raid_malloc_bdevs="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLO raid_malloc_bdevs+="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)" $rpc_py construct_raid_bdev -n raid0 -s 64 -r 0 -b "$raid_malloc_bdevs" -modprobe -v nvme-rdma - $rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001 for malloc_bdev in $malloc_bdevs; do $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 "$malloc_bdev" @@ -87,7 +69,7 @@ nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" || true if [ $fio_status -eq 0 ]; then echo "nvmf hotplug test: fio successful - expected failure" nvmfcleanup - killprocess $nvmfpid + nvmftestfini exit 1 else echo "nvmf hotplug test: fio failed as expected" @@ -103,6 +85,5 @@ rm -f ./local-job2-2-verify.state trap - SIGINT SIGTERM EXIT nvmfcleanup -killprocess $nvmfpid nvmftestfini timing_exit fio diff --git a/test/nvmf/target/multiconnection.sh b/test/nvmf/target/multiconnection.sh index cfa66e32e..739a455bd 100755 --- a/test/nvmf/target/multiconnection.sh +++ b/test/nvmf/target/multiconnection.sh @@ -13,16 +13,11 @@ rpc_py="$rootdir/scripts/rpc.py" set -e +timing_enter multiconnection # pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization. # e.g. sudo ./multiconnection.sh iso nvmftestinit - -RDMA_IP_LIST=$(get_available_rdma_ips) -NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1) -if [ -z $NVMF_FIRST_TARGET_IP ]; then - echo "no NIC for nvmf test" - exit 0 -fi +nvmfappstart "-m 0xF" # SoftRoce does not have enough queues available for # multiconnection tests. Detect if we're using software RDMA. @@ -32,19 +27,7 @@ if check_ip_is_soft_roce $NVMF_FIRST_TARGET_IP; then NVMF_SUBSYS=1 fi -timing_enter multiconnection -timing_enter start_nvmf_tgt -# Start up the NVMf target in another process -$NVMF_APP -m 0xF & -pid=$! - -trap "process_shm --id $NVMF_APP_SHM_ID; killprocess $pid; nvmftestfini; exit 1" SIGINT SIGTERM EXIT - -waitforlisten $pid $rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4 -timing_exit start_nvmf_tgt - -modprobe -v nvme-rdma for i in `seq 1 $NVMF_SUBSYS` do @@ -75,6 +58,5 @@ rm -f ./local-job0-0-verify.state trap - SIGINT SIGTERM EXIT nvmfcleanup -killprocess $pid nvmftestfini timing_exit multiconnection diff --git a/test/nvmf/target/nmic.sh b/test/nvmf/target/nmic.sh index 7c06a2f0c..634fd76fb 100755 --- a/test/nvmf/target/nmic.sh +++ b/test/nvmf/target/nmic.sh @@ -12,29 +12,15 @@ rpc_py="$rootdir/scripts/rpc.py" set -e +timing_enter nmic # pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization. # e.g. sudo ./nmic.sh iso nvmftestinit +nvmfappstart "-m 0xF" -RDMA_IP_LIST=$(get_available_rdma_ips) -NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1) NVMF_SECOND_TARGET_IP=$(echo "$RDMA_IP_LIST" | sed -n 2p) -if [ -z $NVMF_FIRST_TARGET_IP ]; then - echo "no NIC for nvmf test" - exit 0 -fi -timing_enter nmic -timing_enter start_nvmf_tgt -# Start up the NVMf target in another process -$NVMF_APP -m 0xF & -pid=$! - -trap "killprocess $pid; nvmftestfini; exit 1" SIGINT SIGTERM EXIT - -waitforlisten $pid $rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4 -timing_exit start_nvmf_tgt # Create subsystems $rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0 @@ -51,15 +37,14 @@ nmic_status=$? if [ $nmic_status -eq 0 ]; then echo " Adding namespace passed - failure expected." - killprocess $pid + nvmfcleanup + nvmftestfini exit 1 else echo " Adding namespace failed - expected result." fi set -e -modprobe -v nvme-rdma - echo "test case2: host connect to nvmf target in multiple paths" if [ ! -z $NVMF_SECOND_TARGET_IP ]; then $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t RDMA -a $NVMF_SECOND_TARGET_IP -s $NVMF_PORT @@ -77,7 +62,5 @@ nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" || true trap - SIGINT SIGTERM EXIT nvmfcleanup -killprocess $pid - nvmftestfini timing_exit nmic diff --git a/test/nvmf/target/nvme_cli.sh b/test/nvmf/target/nvme_cli.sh index ca32da4f8..db16f3c1a 100755 --- a/test/nvmf/target/nvme_cli.sh +++ b/test/nvmf/target/nvme_cli.sh @@ -19,33 +19,17 @@ rpc_py="$rootdir/scripts/rpc.py" set -e +timing_enter nvme_cli # pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization. # e.g. sudo ./nvme_cli.sh iso nvmftestinit +nvmfappstart "-m 0xF" -RDMA_IP_LIST=$(get_available_rdma_ips) -NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1) -if [ -z $NVMF_FIRST_TARGET_IP ]; then - echo "no NIC for nvmf test" - exit 0 -fi - -timing_enter nvme_cli -timing_enter start_nvmf_tgt -$NVMF_APP -m 0xF & -nvmfpid=$! - -trap "process_shm --id $NVMF_APP_SHM_ID; killprocess $nvmfpid; nvmftestfini; exit 1" SIGINT SIGTERM EXIT - -waitforlisten $nvmfpid $rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4 -timing_exit start_nvmf_tgt $rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0 $rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc1 -modprobe -v nvme-rdma - $rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001 -d SPDK_Controller1 $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc0 $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1 @@ -95,7 +79,6 @@ $rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1 trap - SIGINT SIGTERM EXIT nvmfcleanup -killprocess $nvmfpid nvmftestfini report_test_completion "nvmf_spdk_nvme_cli" timing_exit nvme_cli diff --git a/test/nvmf/target/nvmf_lvol.sh b/test/nvmf/target/nvmf_lvol.sh index 4220e007f..54d3b55a5 100755 --- a/test/nvmf/target/nvmf_lvol.sh +++ b/test/nvmf/target/nvmf_lvol.sh @@ -14,16 +14,11 @@ rpc_py="$rootdir/scripts/rpc.py" set -e +timing_enter lvol_integrity # pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization. # e.g. sudo ./nvmf_lvol.sh iso nvmftestinit - -RDMA_IP_LIST=$(get_available_rdma_ips) -NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1) -if [ -z $NVMF_FIRST_TARGET_IP ]; then - echo "no NIC for nvmf test" - exit 0 -fi +nvmfappstart "-m 0x7" # SoftRoce does not have enough queues available for # multiconnection tests. Detect if we're using software RDMA. @@ -33,19 +28,7 @@ if check_ip_is_soft_roce $NVMF_FIRST_TARGET_IP; then SUBSYS_NR=1 fi -timing_enter lvol_integrity -timing_enter start_nvmf_tgt -# Start up the NVMf target in another process -$NVMF_APP -m 0x7 & -pid=$! - -trap "process_shm --id $NVMF_APP_SHM_ID; killprocess $pid; nvmftestfini; exit 1" SIGINT SIGTERM EXIT - -waitforlisten $pid $rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4 -timing_exit start_nvmf_tgt - -modprobe -v nvme-rdma # Construct a RAID volume for the logical volume store base_bdevs="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE) " @@ -88,6 +71,5 @@ rm -f ./local-job* trap - SIGINT SIGTERM EXIT nvmfcleanup -killprocess $pid nvmftestfini timing_exit lvol_integrity diff --git a/test/nvmf/target/rpc.sh b/test/nvmf/target/rpc.sh index 28bf793c7..8d3fd90f0 100755 --- a/test/nvmf/target/rpc.sh +++ b/test/nvmf/target/rpc.sh @@ -9,28 +9,13 @@ rpc_py="$rootdir/scripts/rpc.py" set -e +timing_enter rpc # pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization. # e.g. sudo ./rpc.sh iso nvmftestinit +nvmfappstart "-m 0xF" -RDMA_IP_LIST=$(get_available_rdma_ips) -NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1) -if [ -z $NVMF_FIRST_TARGET_IP ]; then - echo "no NIC for nvmf test" - exit 0 -fi - -timing_enter rpc -timing_enter start_nvmf_tgt -# Start up the NVMf target in another process -$NVMF_APP -m 0xF & -pid=$! - -trap "process_shm --id $NVMF_APP_SHM_ID; killprocess $pid; nvmftestfini; exit 1" SIGINT SIGTERM EXIT - -waitforlisten $pid $rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4 -timing_exit start_nvmf_tgt # set times for subsystem construct/delete if [ $RUN_NIGHTLY -eq 1 ]; then @@ -52,9 +37,6 @@ done $rpc_py nvmf_subsystem_allow_any_host -d nqn.2016-06.io.spdk:cnode1 $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t RDMA -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT -modprobe -v nvme-rdma -trap "killprocess $pid; nvmfcleanup; exit 1" SIGINT SIGTERM EXIT - # This connect should fail - the host NQN is not allowed ! nvme connect -t rdma -n nqn.2016-06.io.spdk:cnode1 -q nqn.2016-06.io.spdk:host1 -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT" @@ -111,7 +93,6 @@ do done nvmfcleanup -trap "killprocess $pid; exit 1" SIGINT SIGTERM EXIT # do frequent add delete. for i in `seq 1 $times` @@ -140,6 +121,5 @@ done trap - SIGINT SIGTERM EXIT -killprocess $pid nvmftestfini timing_exit rpc diff --git a/test/nvmf/target/shutdown.sh b/test/nvmf/target/shutdown.sh index 1f9869eda..4b59fceff 100755 --- a/test/nvmf/target/shutdown.sh +++ b/test/nvmf/target/shutdown.sh @@ -12,17 +12,6 @@ rpc_py="$rootdir/scripts/rpc.py" set -e -# pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization. -# e.g. sudo ./shutdown.sh iso -nvmftestinit - -RDMA_IP_LIST=$(get_available_rdma_ips) -NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1) -if [ -z $NVMF_FIRST_TARGET_IP ]; then - echo "no NIC for nvmf test" - exit 0 -fi - function waitforio() { # $1 = RPC socket if [ -z "$1" ]; then @@ -48,16 +37,12 @@ function waitforio() { } timing_enter shutdown -timing_enter start_nvmf_tgt -# Start up the NVMf target in another process -$NVMF_APP -m 0xF & -pid=$! +# pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization. +# e.g. sudo ./shutdown.sh iso +nvmftestinit +nvmfappstart "-m 0xF" -trap "process_shm --id $NVMF_APP_SHM_ID; killprocess $pid; nvmfcleanup; nvmftestfini; exit 1" SIGINT SIGTERM EXIT - -waitforlisten $pid $rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4 -timing_exit start_nvmf_tgt num_subsystems=10 # SoftRoce does not have enough queues available for @@ -100,7 +85,7 @@ rm -f /var/run/spdk_bdev1 # Verify the target stays up sleep 1 -kill -0 $pid +kill -0 $nvmfpid # Connect with bdevperf and confirm it works $rootdir/test/bdev/bdevperf/bdevperf -r /var/tmp/bdevperf.sock -c $testdir/bdevperf.conf -q 64 -o 65536 -w verify -t 1 @@ -122,7 +107,7 @@ killprocess $perfpid # Verify the target stays up sleep 1 -kill -0 $pid +kill -0 $nvmfpid timing_exit test2 # Test 3: Kill the target unexpectedly with I/O outstanding @@ -135,12 +120,12 @@ waitforlisten $perfpid /var/tmp/bdevperf.sock $rpc_py -s /var/tmp/bdevperf.sock wait_subsystem_init # Expand the trap to clean up bdevperf if something goes wrong -trap "process_shm --id $NVMF_APP_SHM_ID; killprocess $pid; kill -9 $perfpid; nvmfcleanup; nvmftestfini; exit 1" SIGINT SIGTERM EXIT +trap "process_shm --id $NVMF_APP_SHM_ID; kill -9 $perfpid; nvmfcleanup; nvmftestfini; exit 1" SIGINT SIGTERM EXIT waitforio /var/tmp/bdevperf.sock Nvme1n1 # Kill the target half way through -killprocess $pid +killprocess $nvmfpid # Verify bdevperf exits successfully sleep 1 diff --git a/test/nvmf/target/srq_overwhelm.sh b/test/nvmf/target/srq_overwhelm.sh index 45503f936..add4dc46c 100755 --- a/test/nvmf/target/srq_overwhelm.sh +++ b/test/nvmf/target/srq_overwhelm.sh @@ -12,36 +12,21 @@ rpc_py="$rootdir/scripts/rpc.py" set -e +timing_enter srq_overwhelm # pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization. # e.g. sudo ./fio.sh iso nvmftestinit -RDMA_IP_LIST=$(get_available_rdma_ips) -NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1) -if [ -z $NVMF_FIRST_TARGET_IP ]; then - echo "no NIC for nvmf test" - exit 0 -fi - if check_ip_is_soft_roce $NVMF_FIRST_TARGET_IP; then echo "Using software RDMA, Likely not enough memory to run this test. aborting." exit 0 fi -timing_enter srq_overwhelm -timing_enter start_nvmf_tgt +nvmfappstart "-m 0xF" -$NVMF_APP -m 0xF & -nvmfpid=$! - -trap "process_shm --id $NVMF_APP_SHM_ID; killprocess $nvmfpid; nvmftestfini; exit 1" SIGINT SIGTERM EXIT - -waitforlisten $nvmfpid # create the rdma transport with an intentionally small SRQ depth $rpc_py nvmf_create_transport -t RDMA -u 8192 -s 1024 -timing_exit start_nvmf_tgt -modprobe -v nvme-rdma declare -a malloc_bdevs malloc_bdevs[0]="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)" malloc_bdevs[1]+="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)" @@ -77,6 +62,5 @@ done trap - SIGINT SIGTERM EXIT nvmfcleanup -killprocess $nvmfpid nvmftestfini timing_exit srq_overwhelm