From 49de02ec4d4166791d0d0f8f4c5982eb9eec2d8c Mon Sep 17 00:00:00 2001 From: Tomasz Zawadzki Date: Thu, 27 Feb 2020 11:58:44 -0500 Subject: [PATCH] Revert "test/hotremove: Select test cases to be run for scci and blk hotremove." This reverts commit 29e9fdc8572acc3fa70397abac4d8d47904c6637. Nightly tests after merging the reverted patch show that it does not pass in current state. It affected the per-patch, since the new tests weren't added to skipped_tests list. Signed-off-by: Tomasz Zawadzki Change-Id: If8a58cbe9ecd0e58f20c0a9ee844bc9a8ee046a3 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1045 Reviewed-by: Jim Harris Reviewed-by: Seth Howell Reviewed-by: Ben Walker Tested-by: SPDK CI Jenkins --- test/vhost/hotplug/blk_hotremove.sh | 59 +++++++++++----------------- test/vhost/hotplug/common.sh | 35 ----------------- test/vhost/hotplug/scsi_hotremove.sh | 57 +++++++++++---------------- test/vhost/manual.sh | 16 ++------ test/vhost/vhost.sh | 6 --- 5 files changed, 47 insertions(+), 126 deletions(-) diff --git a/test/vhost/hotplug/blk_hotremove.sh b/test/vhost/hotplug/blk_hotremove.sh index 2b90db1ab..e2715b814 100644 --- a/test/vhost/hotplug/blk_hotremove.sh +++ b/test/vhost/hotplug/blk_hotremove.sh @@ -44,11 +44,11 @@ function blk_hotremove_tc1() { echo "Blk hotremove test case 1" traddr="" # 1. Run the command to hot remove NVMe disk. - delete_nvme $hotnvmename + get_traddr "Nvme0" + delete_nvme "Nvme0" # 2. If vhost had crashed then tests would stop running sleep 1 - set_hotnvmename - add_nvme $hotnvmename "$traddr" + add_nvme "HotInNvme0" "$traddr" sleep 1 } @@ -56,7 +56,7 @@ function blk_hotremove_tc1() { function blk_hotremove_tc2() { echo "Blk hotremove test case 2" # 1. Use rpc command to create blk controllers. - $rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 "${hotnvmename}n1p0" + $rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 HotInNvme0n1p0 $rpc_py vhost_create_blk_controller naa.Nvme0n1p1.0 Mallocp0 $rpc_py vhost_create_blk_controller naa.Nvme0n1p2.1 Mallocp1 $rpc_py vhost_create_blk_controller naa.Nvme0n1p3.1 Mallocp2 @@ -72,7 +72,7 @@ function blk_hotremove_tc2() { local last_pid=$! sleep 3 # 4. Run the command to hot remove NVMe disk. - delete_nvme $hotnvmename + delete_nvme "HotInNvme0" local retcode=0 wait_for_finish $last_pid || retcode=$? # 5. Check that fio job run on hot-removed device stopped. @@ -90,8 +90,7 @@ function blk_hotremove_tc2() { check_fio_retcode "Blk hotremove test case 2: Iteration 2." 1 $retcode vm_shutdown_all vhost_delete_controllers - set_hotnvmename - add_nvme $hotnvmename "$traddr" + add_nvme "HotInNvme1" "$traddr" sleep 1 } @@ -99,9 +98,9 @@ function blk_hotremove_tc2() { function blk_hotremove_tc3() { echo "Blk hotremove test case 3" # 1. Use rpc command to create blk controllers. - $rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 "${hotnvmename}n1p0" + $rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 HotInNvme1n1p0 $rpc_py vhost_create_blk_controller naa.Nvme0n1p1.0 Mallocp0 - $rpc_py vhost_create_blk_controller naa.Nvme0n1p2.1 "${hotnvmename}n1p1" + $rpc_py vhost_create_blk_controller naa.Nvme0n1p2.1 HotInNvme1n1p1 $rpc_py vhost_create_blk_controller naa.Nvme0n1p3.1 Mallocp1 # 2. Run two VMs and attach every VM to two blk controllers. vm_run_with_arg "0 1" @@ -115,7 +114,7 @@ function blk_hotremove_tc3() { local last_pid=$! sleep 3 # 4. Run the command to hot remove of first NVMe disk. - delete_nvme $hotnvmename + delete_nvme "HotInNvme1" local retcode=0 wait_for_finish $last_pid || retcode=$? # 6. Check that fio job run on hot-removed device stopped. @@ -133,8 +132,7 @@ function blk_hotremove_tc3() { check_fio_retcode "Blk hotremove test case 3: Iteration 2." 1 $retcode vm_shutdown_all vhost_delete_controllers - set_hotnvmename - add_nvme $hotnvmename "$traddr" + add_nvme "HotInNvme2" "$traddr" sleep 1 } @@ -142,9 +140,9 @@ function blk_hotremove_tc3() { function blk_hotremove_tc4() { echo "Blk hotremove test case 4" # 1. Use rpc command to create blk controllers. - $rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 "${hotnvmename}n1p0" + $rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 HotInNvme2n1p0 $rpc_py vhost_create_blk_controller naa.Nvme0n1p1.0 Mallocp0 - $rpc_py vhost_create_blk_controller naa.Nvme0n1p2.1 "${hotnvmename}n1p1" + $rpc_py vhost_create_blk_controller naa.Nvme0n1p2.1 HotInNvme2n1p1 $rpc_py vhost_create_blk_controller naa.Nvme0n1p3.1 Mallocp1 # 2. Run two VM, attached to blk controllers. vm_run_with_arg "0 1" @@ -163,7 +161,7 @@ function blk_hotremove_tc4() { sleep 3 prepare_fio_cmd_tc1 "0 1" # 5. Run the command to hot remove of first NVMe disk. - delete_nvme $hotnvmename + delete_nvme "HotInNvme2" local retcode_vm0=0 local retcode_vm1=0 wait_for_finish $last_pid_vm0 || retcode_vm0=$? @@ -185,8 +183,7 @@ function blk_hotremove_tc4() { vm_shutdown_all vhost_delete_controllers - set_hotnvmename - add_nvme $hotnvmename "$traddr" + add_nvme "HotInNvme3" "$traddr" sleep 1 } @@ -194,7 +191,7 @@ function blk_hotremove_tc4() { function blk_hotremove_tc5() { echo "Blk hotremove test case 5" # 1. Use rpc command to create blk controllers. - $rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 "${hotnvmename}n1p0" + $rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 HotInNvme3n1p0 $rpc_py vhost_create_blk_controller naa.Nvme0n1p1.0 Mallocp0 $rpc_py vhost_create_blk_controller naa.Nvme0n1p2.1 Mallocp1 $rpc_py vhost_create_blk_controller naa.Nvme0n1p3.1 Mallocp2 @@ -208,7 +205,7 @@ function blk_hotremove_tc5() { local last_pid=$! sleep 3 # 4. Run the command to hot remove of first NVMe disk. - delete_nvme $hotnvmename + delete_nvme "HotInNvme3" local retcode=0 wait_for_finish $last_pid || retcode=$? # 5. Check that fio job run on hot-removed device stopped. @@ -226,25 +223,13 @@ function blk_hotremove_tc5() { check_fio_retcode "Blk hotremove test case 5: Iteration 2." 1 $retcode vm_shutdown_all vhost_delete_controllers - set_hotnvmename - add_nvme $hotnvmename "$traddr" + add_nvme "HotInNvme4" "$traddr" sleep 1 } vms_setup -get_traddr "Nvme0" -if $tc1; then - blk_hotremove_tc1 -fi -if $tc2; then - blk_hotremove_tc2 -fi -if $tc3; then - blk_hotremove_tc3 -fi -if $tc4; then - blk_hotremove_tc4 -fi -if $tc5; then - blk_hotremove_tc5 -fi +blk_hotremove_tc1 +blk_hotremove_tc2 +blk_hotremove_tc3 +blk_hotremove_tc4 +blk_hotremove_tc5 diff --git a/test/vhost/hotplug/common.sh b/test/vhost/hotplug/common.sh index 67688e014..adf7cd44b 100644 --- a/test/vhost/hotplug/common.sh +++ b/test/vhost/hotplug/common.sh @@ -16,7 +16,6 @@ x="" scsi_hot_remove_test=0 blk_hot_remove_test=0 readonly="" -test_cases="all" function usage() { @@ -39,9 +38,6 @@ function usage() { echo " DISKS - VM os test disks/devices path (virtio - optional, kernel_vhost - mandatory)" echo " --scsi-hotremove-test Run scsi hotremove tests" echo " --readonly Use readonly for fio" - echo " --blk-hotremove-test Run blk hotremove tests." - echo " --test-cases=[num] Run comma separated test cases. Assign all if all test cases should be run" - echo " Default value is all" exit 0 } @@ -57,7 +53,6 @@ while getopts 'xh-:' optchar; do scsi-hotremove-test) scsi_hot_remove_test=1 ;; blk-hotremove-test) blk_hot_remove_test=1 ;; readonly) readonly="--readonly" ;; - test-cases=*) test_cases="${OPTARG#*=}" ;; *) usage $0 "Invalid argument '$OPTARG'" ;; esac ;; @@ -69,36 +64,6 @@ while getopts 'xh-:' optchar; do done shift $(( OPTIND - 1 )) -if [ ${test_cases} == "all" ]; then - test_cases="1,2,3,4,5" -fi -tc1=false -tc2=false -tc3=false -tc4=false -tc5=false -IFS=',' read -ra tc <<< "${test_cases}" -for i in "${tc[@]}"; do - if [ $i == "1" ]; then - tc1=true - elif [ $i == "2" ]; then - tc2=true - elif [ $i == "3" ]; then - tc3=true - elif [ $i == "4" ]; then - tc4=true - elif [ $i == "5" ]; then - tc5=true - fi -done - -hotnvmenumber=0 -hotnvmename="Nvme0" -function set_hotnvmename() { - hotnvmename="HotInNvme${hotnvmenumber}" - hotnvmenumber=$((hotnvmenumber + 1)) -} - fio_job=$testdir/fio_jobs/default_integrity.job tmp_attach_job=$testdir/fio_jobs/fio_attach.job.tmp tmp_detach_job=$testdir/fio_jobs/fio_detach.job.tmp diff --git a/test/vhost/hotplug/scsi_hotremove.sh b/test/vhost/hotplug/scsi_hotremove.sh index 400b0dd24..c234c4d2e 100644 --- a/test/vhost/hotplug/scsi_hotremove.sh +++ b/test/vhost/hotplug/scsi_hotremove.sh @@ -43,21 +43,23 @@ function scsi_hotremove_tc1() { traddr="" get_traddr "Nvme0" # 1. Run the command to hot remove NVMe disk. - delete_nvme $hotnvmename + delete_nvme "Nvme0" # 2. If vhost had crashed then tests would stop running sleep 1 - set_hotnvmename - add_nvme $hotnvmename "$traddr" + add_nvme "HotInNvme0" "$traddr" } # Test Case 2 function scsi_hotremove_tc2() { echo "Scsi hotremove test case 2" # 1. Attach split NVMe bdevs to scsi controller. - $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p0.0 0 "${hotnvmename}n1p0" - $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p2.1 0 "${hotnvmename}n1p1" + $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p0.0 0 HotInNvme0n1p0 + $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p1.0 0 Mallocp0 + $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p2.1 0 HotInNvme0n1p1 + $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p3.1 0 Mallocp1 # 2. Run two VMs, attached to scsi controllers. + vms_setup vm_run_with_arg 0 1 vms_prepare "0 1" @@ -72,7 +74,7 @@ function scsi_hotremove_tc2() { local last_pid=$! sleep 3 # 4. Run the command to hot remove NVMe disk. - delete_nvme $hotnvmename + delete_nvme "HotInNvme0" # 5. Check that fio job run on hot-remove device stopped on VM. # Expected: Fio should return error message and return code != 0. @@ -93,8 +95,7 @@ function scsi_hotremove_tc2() { # Expected: Fio should return error message and return code != 0. check_fio_retcode "Scsi hotremove test case 2: Iteration 2." 1 $retcode vm_shutdown_all - set_hotnvmename - add_nvme $hotnvmename "$traddr" + add_nvme "HotInNvme1" "$traddr" sleep 1 } @@ -102,7 +103,7 @@ function scsi_hotremove_tc2() { function scsi_hotremove_tc3() { echo "Scsi hotremove test case 3" # 1. Attach added NVMe bdev to scsi controller. - $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p0.0 0 "${hotnvmename}n1p0" + $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p0.0 0 HotInNvme1n1p0 # 2. Run two VM, attached to scsi controllers. vm_run_with_arg 0 1 vms_prepare "0 1" @@ -116,7 +117,7 @@ function scsi_hotremove_tc3() { local last_pid=$! sleep 3 # 4. Run the command to hot remove NVMe disk. - delete_nvme $hotnvmename + delete_nvme "HotInNvme1" # 5. Check that fio job run on hot-remove device stopped on first VM. # Expected: Fio should return error message and return code != 0. wait_for_finish $last_pid || retcode=$? @@ -135,8 +136,7 @@ function scsi_hotremove_tc3() { # Expected: Fio should return error message and return code != 0. check_fio_retcode "Scsi hotremove test case 3: Iteration 2." 1 $retcode vm_shutdown_all - set_hotnvmename - add_nvme $hotnvmename "$traddr" + add_nvme "HotInNvme2" "$traddr" sleep 1 } @@ -144,8 +144,8 @@ function scsi_hotremove_tc3() { function scsi_hotremove_tc4() { echo "Scsi hotremove test case 4" # 1. Attach NVMe bdevs to scsi controllers. - $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p0.0 0 "${hotnvmename}n1p0" - $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p2.1 0 "${hotnvmename}n1p1" + $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p0.0 0 HotInNvme2n1p0 + $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p2.1 0 HotInNvme2n1p1 # 2. Run two VMs, attach to scsi controller. vm_run_with_arg 0 1 vms_prepare "0 1" @@ -168,7 +168,7 @@ function scsi_hotremove_tc4() { # 5. Run the command to hot remove NVMe disk. traddr="" get_traddr "Nvme0" - delete_nvme $hotnvmename + delete_nvme "HotInNvme2" # 6. Check that fio job run on hot-removed devices stopped. # Expected: Fio should return error message and return code != 0. local retcode_vm0=0 @@ -205,9 +205,10 @@ function scsi_hotremove_tc4() { # Expected: Fio should return return code == 0. check_fio_retcode "Scsi hotremove test case 4: Iteration 4." 0 $retcode vm_shutdown_all - set_hotnvmename - add_nvme $hotnvmename "$traddr" + add_nvme "HotInNvme3" "$traddr" sleep 1 + $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p1.0 0 + $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p3.1 0 } function pre_scsi_hotremove_test_case() { @@ -215,13 +216,9 @@ function pre_scsi_hotremove_test_case() { $rpc_py vhost_create_scsi_controller naa.Nvme0n1p1.0 $rpc_py vhost_create_scsi_controller naa.Nvme0n1p2.1 $rpc_py vhost_create_scsi_controller naa.Nvme0n1p3.1 - $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p1.0 0 Mallocp0 - $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p3.1 0 Mallocp1 } function post_scsi_hotremove_test_case() { - $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p1.0 0 - $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p3.1 0 $rpc_py vhost_delete_controller naa.Nvme0n1p0.0 $rpc_py vhost_delete_controller naa.Nvme0n1p1.0 $rpc_py vhost_delete_controller naa.Nvme0n1p2.1 @@ -229,18 +226,8 @@ function post_scsi_hotremove_test_case() { } pre_scsi_hotremove_test_case -vms_setup -if $tc1; then - scsi_hotremove_tc1 -fi -if $tc2; then - scsi_hotremove_tc2 -fi -if $tc3; then - scsi_hotremove_tc3 -fi -if $tc4; then - scsi_hotremove_tc4 -fi -sleep 1 +scsi_hotremove_tc1 +scsi_hotremove_tc2 +scsi_hotremove_tc3 +scsi_hotremove_tc4 post_scsi_hotremove_test_case diff --git a/test/vhost/manual.sh b/test/vhost/manual.sh index 16ec1ae18..02137bed6 100755 --- a/test/vhost/manual.sh +++ b/test/vhost/manual.sh @@ -19,7 +19,6 @@ case $1 in echo " -shr|--scsi-hot-remove for running scsi hot remove tests" echo " -bhr|--blk-hot-remove for running blk hot remove tests" echo " -h |--help prints this message" - echo " -tc|--test-cases define test cases to run for hotremove test" echo "" echo "Environment:" echo " VM_IMAGE path to QCOW2 VM image used during test (default: $HOME/vhost_vm_image.qcow2)" @@ -51,13 +50,6 @@ DISKS_NUMBER=$(lspci -mm -n | grep 0108 | tr -d '"' | awk -F " " '{print "0000:" WORKDIR=$(readlink -f $(dirname $0)) -test_cases="all" -if [ -n "$2" ]; then - case $2 in - -tc=*|-test-cases=*) test_cases="${2#*=}" ;; - esac -fi - case $1 in -hp|--hotplug) echo 'Running hotplug tests suite...' @@ -76,8 +68,7 @@ case $1 in --vm=1,$VM_IMAGE,Nvme0n1p2:Nvme0n1p3 \ --test-type=spdk_vhost_scsi \ --scsi-hotremove-test \ - --fio-jobs=$WORKDIR/hotplug/fio_jobs/default_integrity.job \ - --test-cases=$test_cases + --fio-jobs=$WORKDIR/hotplug/fio_jobs/default_integrity.job ;; -bhr|--blk-hot-remove) echo 'Running blk hotremove tests suite...' @@ -86,9 +77,8 @@ case $1 in --vm=1,$VM_IMAGE,Nvme0n1p2:Nvme0n1p3 \ --test-type=spdk_vhost_blk \ --blk-hotremove-test \ - --fio-jobs=$WORKDIR/hotplug/fio_jobs/default_integrity.job \ - --test-cases=$test_cases - ;; + --fio-jobs=$WORKDIR/hotplug/fio_jobs/default_integrity.job + ;; *) echo "unknown test type: $1" exit 1 diff --git a/test/vhost/vhost.sh b/test/vhost/vhost.sh index 773947c2c..9015efb3b 100755 --- a/test/vhost/vhost.sh +++ b/test/vhost/vhost.sh @@ -94,12 +94,6 @@ if [ $RUN_NIGHTLY -eq 1 ]; then echo 'Running migration suite...' run_test "vhost_migration" $WORKDIR/migration/migration.sh -x \ --fio-bin=$FIO_BIN --os=$VM_IMAGE - - echo "Running scsi hotremove test" - run_test "scsi_hotremove" $WORKDIR/vhost/manual.sh -shr --test-cases=1,2,3,4 - - echo "Running blk hotremove test" - run_test "blk_hotremove" $WORKDIR/vhost/manual.sh -bhr --test-cases=1,2,3,4,5 fi echo 'Running lvol integrity suite...'