Revert "test/hotremove: Select test cases to be run for scci and blk hotremove."
This reverts commit 29e9fdc857
.
Nightly tests after merging the reverted patch show that it does not
pass in current state. It affected the per-patch, since the new
tests weren't added to skipped_tests list.
Signed-off-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Change-Id: If8a58cbe9ecd0e58f20c0a9ee844bc9a8ee046a3
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1045
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Seth Howell <seth.howell5141@gmail.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
parent
dad9d17406
commit
49de02ec4d
@ -44,11 +44,11 @@ function blk_hotremove_tc1() {
|
|||||||
echo "Blk hotremove test case 1"
|
echo "Blk hotremove test case 1"
|
||||||
traddr=""
|
traddr=""
|
||||||
# 1. Run the command to hot remove NVMe disk.
|
# 1. Run the command to hot remove NVMe disk.
|
||||||
delete_nvme $hotnvmename
|
get_traddr "Nvme0"
|
||||||
|
delete_nvme "Nvme0"
|
||||||
# 2. If vhost had crashed then tests would stop running
|
# 2. If vhost had crashed then tests would stop running
|
||||||
sleep 1
|
sleep 1
|
||||||
set_hotnvmename
|
add_nvme "HotInNvme0" "$traddr"
|
||||||
add_nvme $hotnvmename "$traddr"
|
|
||||||
sleep 1
|
sleep 1
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -56,7 +56,7 @@ function blk_hotremove_tc1() {
|
|||||||
function blk_hotremove_tc2() {
|
function blk_hotremove_tc2() {
|
||||||
echo "Blk hotremove test case 2"
|
echo "Blk hotremove test case 2"
|
||||||
# 1. Use rpc command to create blk controllers.
|
# 1. Use rpc command to create blk controllers.
|
||||||
$rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 "${hotnvmename}n1p0"
|
$rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 HotInNvme0n1p0
|
||||||
$rpc_py vhost_create_blk_controller naa.Nvme0n1p1.0 Mallocp0
|
$rpc_py vhost_create_blk_controller naa.Nvme0n1p1.0 Mallocp0
|
||||||
$rpc_py vhost_create_blk_controller naa.Nvme0n1p2.1 Mallocp1
|
$rpc_py vhost_create_blk_controller naa.Nvme0n1p2.1 Mallocp1
|
||||||
$rpc_py vhost_create_blk_controller naa.Nvme0n1p3.1 Mallocp2
|
$rpc_py vhost_create_blk_controller naa.Nvme0n1p3.1 Mallocp2
|
||||||
@ -72,7 +72,7 @@ function blk_hotremove_tc2() {
|
|||||||
local last_pid=$!
|
local last_pid=$!
|
||||||
sleep 3
|
sleep 3
|
||||||
# 4. Run the command to hot remove NVMe disk.
|
# 4. Run the command to hot remove NVMe disk.
|
||||||
delete_nvme $hotnvmename
|
delete_nvme "HotInNvme0"
|
||||||
local retcode=0
|
local retcode=0
|
||||||
wait_for_finish $last_pid || retcode=$?
|
wait_for_finish $last_pid || retcode=$?
|
||||||
# 5. Check that fio job run on hot-removed device stopped.
|
# 5. Check that fio job run on hot-removed device stopped.
|
||||||
@ -90,8 +90,7 @@ function blk_hotremove_tc2() {
|
|||||||
check_fio_retcode "Blk hotremove test case 2: Iteration 2." 1 $retcode
|
check_fio_retcode "Blk hotremove test case 2: Iteration 2." 1 $retcode
|
||||||
vm_shutdown_all
|
vm_shutdown_all
|
||||||
vhost_delete_controllers
|
vhost_delete_controllers
|
||||||
set_hotnvmename
|
add_nvme "HotInNvme1" "$traddr"
|
||||||
add_nvme $hotnvmename "$traddr"
|
|
||||||
sleep 1
|
sleep 1
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -99,9 +98,9 @@ function blk_hotremove_tc2() {
|
|||||||
function blk_hotremove_tc3() {
|
function blk_hotremove_tc3() {
|
||||||
echo "Blk hotremove test case 3"
|
echo "Blk hotremove test case 3"
|
||||||
# 1. Use rpc command to create blk controllers.
|
# 1. Use rpc command to create blk controllers.
|
||||||
$rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 "${hotnvmename}n1p0"
|
$rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 HotInNvme1n1p0
|
||||||
$rpc_py vhost_create_blk_controller naa.Nvme0n1p1.0 Mallocp0
|
$rpc_py vhost_create_blk_controller naa.Nvme0n1p1.0 Mallocp0
|
||||||
$rpc_py vhost_create_blk_controller naa.Nvme0n1p2.1 "${hotnvmename}n1p1"
|
$rpc_py vhost_create_blk_controller naa.Nvme0n1p2.1 HotInNvme1n1p1
|
||||||
$rpc_py vhost_create_blk_controller naa.Nvme0n1p3.1 Mallocp1
|
$rpc_py vhost_create_blk_controller naa.Nvme0n1p3.1 Mallocp1
|
||||||
# 2. Run two VMs and attach every VM to two blk controllers.
|
# 2. Run two VMs and attach every VM to two blk controllers.
|
||||||
vm_run_with_arg "0 1"
|
vm_run_with_arg "0 1"
|
||||||
@ -115,7 +114,7 @@ function blk_hotremove_tc3() {
|
|||||||
local last_pid=$!
|
local last_pid=$!
|
||||||
sleep 3
|
sleep 3
|
||||||
# 4. Run the command to hot remove of first NVMe disk.
|
# 4. Run the command to hot remove of first NVMe disk.
|
||||||
delete_nvme $hotnvmename
|
delete_nvme "HotInNvme1"
|
||||||
local retcode=0
|
local retcode=0
|
||||||
wait_for_finish $last_pid || retcode=$?
|
wait_for_finish $last_pid || retcode=$?
|
||||||
# 6. Check that fio job run on hot-removed device stopped.
|
# 6. Check that fio job run on hot-removed device stopped.
|
||||||
@ -133,8 +132,7 @@ function blk_hotremove_tc3() {
|
|||||||
check_fio_retcode "Blk hotremove test case 3: Iteration 2." 1 $retcode
|
check_fio_retcode "Blk hotremove test case 3: Iteration 2." 1 $retcode
|
||||||
vm_shutdown_all
|
vm_shutdown_all
|
||||||
vhost_delete_controllers
|
vhost_delete_controllers
|
||||||
set_hotnvmename
|
add_nvme "HotInNvme2" "$traddr"
|
||||||
add_nvme $hotnvmename "$traddr"
|
|
||||||
sleep 1
|
sleep 1
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -142,9 +140,9 @@ function blk_hotremove_tc3() {
|
|||||||
function blk_hotremove_tc4() {
|
function blk_hotremove_tc4() {
|
||||||
echo "Blk hotremove test case 4"
|
echo "Blk hotremove test case 4"
|
||||||
# 1. Use rpc command to create blk controllers.
|
# 1. Use rpc command to create blk controllers.
|
||||||
$rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 "${hotnvmename}n1p0"
|
$rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 HotInNvme2n1p0
|
||||||
$rpc_py vhost_create_blk_controller naa.Nvme0n1p1.0 Mallocp0
|
$rpc_py vhost_create_blk_controller naa.Nvme0n1p1.0 Mallocp0
|
||||||
$rpc_py vhost_create_blk_controller naa.Nvme0n1p2.1 "${hotnvmename}n1p1"
|
$rpc_py vhost_create_blk_controller naa.Nvme0n1p2.1 HotInNvme2n1p1
|
||||||
$rpc_py vhost_create_blk_controller naa.Nvme0n1p3.1 Mallocp1
|
$rpc_py vhost_create_blk_controller naa.Nvme0n1p3.1 Mallocp1
|
||||||
# 2. Run two VM, attached to blk controllers.
|
# 2. Run two VM, attached to blk controllers.
|
||||||
vm_run_with_arg "0 1"
|
vm_run_with_arg "0 1"
|
||||||
@ -163,7 +161,7 @@ function blk_hotremove_tc4() {
|
|||||||
sleep 3
|
sleep 3
|
||||||
prepare_fio_cmd_tc1 "0 1"
|
prepare_fio_cmd_tc1 "0 1"
|
||||||
# 5. Run the command to hot remove of first NVMe disk.
|
# 5. Run the command to hot remove of first NVMe disk.
|
||||||
delete_nvme $hotnvmename
|
delete_nvme "HotInNvme2"
|
||||||
local retcode_vm0=0
|
local retcode_vm0=0
|
||||||
local retcode_vm1=0
|
local retcode_vm1=0
|
||||||
wait_for_finish $last_pid_vm0 || retcode_vm0=$?
|
wait_for_finish $last_pid_vm0 || retcode_vm0=$?
|
||||||
@ -185,8 +183,7 @@ function blk_hotremove_tc4() {
|
|||||||
|
|
||||||
vm_shutdown_all
|
vm_shutdown_all
|
||||||
vhost_delete_controllers
|
vhost_delete_controllers
|
||||||
set_hotnvmename
|
add_nvme "HotInNvme3" "$traddr"
|
||||||
add_nvme $hotnvmename "$traddr"
|
|
||||||
sleep 1
|
sleep 1
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -194,7 +191,7 @@ function blk_hotremove_tc4() {
|
|||||||
function blk_hotremove_tc5() {
|
function blk_hotremove_tc5() {
|
||||||
echo "Blk hotremove test case 5"
|
echo "Blk hotremove test case 5"
|
||||||
# 1. Use rpc command to create blk controllers.
|
# 1. Use rpc command to create blk controllers.
|
||||||
$rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 "${hotnvmename}n1p0"
|
$rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 HotInNvme3n1p0
|
||||||
$rpc_py vhost_create_blk_controller naa.Nvme0n1p1.0 Mallocp0
|
$rpc_py vhost_create_blk_controller naa.Nvme0n1p1.0 Mallocp0
|
||||||
$rpc_py vhost_create_blk_controller naa.Nvme0n1p2.1 Mallocp1
|
$rpc_py vhost_create_blk_controller naa.Nvme0n1p2.1 Mallocp1
|
||||||
$rpc_py vhost_create_blk_controller naa.Nvme0n1p3.1 Mallocp2
|
$rpc_py vhost_create_blk_controller naa.Nvme0n1p3.1 Mallocp2
|
||||||
@ -208,7 +205,7 @@ function blk_hotremove_tc5() {
|
|||||||
local last_pid=$!
|
local last_pid=$!
|
||||||
sleep 3
|
sleep 3
|
||||||
# 4. Run the command to hot remove of first NVMe disk.
|
# 4. Run the command to hot remove of first NVMe disk.
|
||||||
delete_nvme $hotnvmename
|
delete_nvme "HotInNvme3"
|
||||||
local retcode=0
|
local retcode=0
|
||||||
wait_for_finish $last_pid || retcode=$?
|
wait_for_finish $last_pid || retcode=$?
|
||||||
# 5. Check that fio job run on hot-removed device stopped.
|
# 5. Check that fio job run on hot-removed device stopped.
|
||||||
@ -226,25 +223,13 @@ function blk_hotremove_tc5() {
|
|||||||
check_fio_retcode "Blk hotremove test case 5: Iteration 2." 1 $retcode
|
check_fio_retcode "Blk hotremove test case 5: Iteration 2." 1 $retcode
|
||||||
vm_shutdown_all
|
vm_shutdown_all
|
||||||
vhost_delete_controllers
|
vhost_delete_controllers
|
||||||
set_hotnvmename
|
add_nvme "HotInNvme4" "$traddr"
|
||||||
add_nvme $hotnvmename "$traddr"
|
|
||||||
sleep 1
|
sleep 1
|
||||||
}
|
}
|
||||||
|
|
||||||
vms_setup
|
vms_setup
|
||||||
get_traddr "Nvme0"
|
blk_hotremove_tc1
|
||||||
if $tc1; then
|
blk_hotremove_tc2
|
||||||
blk_hotremove_tc1
|
blk_hotremove_tc3
|
||||||
fi
|
blk_hotremove_tc4
|
||||||
if $tc2; then
|
blk_hotremove_tc5
|
||||||
blk_hotremove_tc2
|
|
||||||
fi
|
|
||||||
if $tc3; then
|
|
||||||
blk_hotremove_tc3
|
|
||||||
fi
|
|
||||||
if $tc4; then
|
|
||||||
blk_hotremove_tc4
|
|
||||||
fi
|
|
||||||
if $tc5; then
|
|
||||||
blk_hotremove_tc5
|
|
||||||
fi
|
|
||||||
|
@ -16,7 +16,6 @@ x=""
|
|||||||
scsi_hot_remove_test=0
|
scsi_hot_remove_test=0
|
||||||
blk_hot_remove_test=0
|
blk_hot_remove_test=0
|
||||||
readonly=""
|
readonly=""
|
||||||
test_cases="all"
|
|
||||||
|
|
||||||
|
|
||||||
function usage() {
|
function usage() {
|
||||||
@ -39,9 +38,6 @@ function usage() {
|
|||||||
echo " DISKS - VM os test disks/devices path (virtio - optional, kernel_vhost - mandatory)"
|
echo " DISKS - VM os test disks/devices path (virtio - optional, kernel_vhost - mandatory)"
|
||||||
echo " --scsi-hotremove-test Run scsi hotremove tests"
|
echo " --scsi-hotremove-test Run scsi hotremove tests"
|
||||||
echo " --readonly Use readonly for fio"
|
echo " --readonly Use readonly for fio"
|
||||||
echo " --blk-hotremove-test Run blk hotremove tests."
|
|
||||||
echo " --test-cases=[num] Run comma separated test cases. Assign all if all test cases should be run"
|
|
||||||
echo " Default value is all"
|
|
||||||
exit 0
|
exit 0
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -57,7 +53,6 @@ while getopts 'xh-:' optchar; do
|
|||||||
scsi-hotremove-test) scsi_hot_remove_test=1 ;;
|
scsi-hotremove-test) scsi_hot_remove_test=1 ;;
|
||||||
blk-hotremove-test) blk_hot_remove_test=1 ;;
|
blk-hotremove-test) blk_hot_remove_test=1 ;;
|
||||||
readonly) readonly="--readonly" ;;
|
readonly) readonly="--readonly" ;;
|
||||||
test-cases=*) test_cases="${OPTARG#*=}" ;;
|
|
||||||
*) usage $0 "Invalid argument '$OPTARG'" ;;
|
*) usage $0 "Invalid argument '$OPTARG'" ;;
|
||||||
esac
|
esac
|
||||||
;;
|
;;
|
||||||
@ -69,36 +64,6 @@ while getopts 'xh-:' optchar; do
|
|||||||
done
|
done
|
||||||
shift $(( OPTIND - 1 ))
|
shift $(( OPTIND - 1 ))
|
||||||
|
|
||||||
if [ ${test_cases} == "all" ]; then
|
|
||||||
test_cases="1,2,3,4,5"
|
|
||||||
fi
|
|
||||||
tc1=false
|
|
||||||
tc2=false
|
|
||||||
tc3=false
|
|
||||||
tc4=false
|
|
||||||
tc5=false
|
|
||||||
IFS=',' read -ra tc <<< "${test_cases}"
|
|
||||||
for i in "${tc[@]}"; do
|
|
||||||
if [ $i == "1" ]; then
|
|
||||||
tc1=true
|
|
||||||
elif [ $i == "2" ]; then
|
|
||||||
tc2=true
|
|
||||||
elif [ $i == "3" ]; then
|
|
||||||
tc3=true
|
|
||||||
elif [ $i == "4" ]; then
|
|
||||||
tc4=true
|
|
||||||
elif [ $i == "5" ]; then
|
|
||||||
tc5=true
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
hotnvmenumber=0
|
|
||||||
hotnvmename="Nvme0"
|
|
||||||
function set_hotnvmename() {
|
|
||||||
hotnvmename="HotInNvme${hotnvmenumber}"
|
|
||||||
hotnvmenumber=$((hotnvmenumber + 1))
|
|
||||||
}
|
|
||||||
|
|
||||||
fio_job=$testdir/fio_jobs/default_integrity.job
|
fio_job=$testdir/fio_jobs/default_integrity.job
|
||||||
tmp_attach_job=$testdir/fio_jobs/fio_attach.job.tmp
|
tmp_attach_job=$testdir/fio_jobs/fio_attach.job.tmp
|
||||||
tmp_detach_job=$testdir/fio_jobs/fio_detach.job.tmp
|
tmp_detach_job=$testdir/fio_jobs/fio_detach.job.tmp
|
||||||
|
@ -43,21 +43,23 @@ function scsi_hotremove_tc1() {
|
|||||||
traddr=""
|
traddr=""
|
||||||
get_traddr "Nvme0"
|
get_traddr "Nvme0"
|
||||||
# 1. Run the command to hot remove NVMe disk.
|
# 1. Run the command to hot remove NVMe disk.
|
||||||
delete_nvme $hotnvmename
|
delete_nvme "Nvme0"
|
||||||
# 2. If vhost had crashed then tests would stop running
|
# 2. If vhost had crashed then tests would stop running
|
||||||
sleep 1
|
sleep 1
|
||||||
set_hotnvmename
|
add_nvme "HotInNvme0" "$traddr"
|
||||||
add_nvme $hotnvmename "$traddr"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# Test Case 2
|
# Test Case 2
|
||||||
function scsi_hotremove_tc2() {
|
function scsi_hotremove_tc2() {
|
||||||
echo "Scsi hotremove test case 2"
|
echo "Scsi hotremove test case 2"
|
||||||
# 1. Attach split NVMe bdevs to scsi controller.
|
# 1. Attach split NVMe bdevs to scsi controller.
|
||||||
$rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p0.0 0 "${hotnvmename}n1p0"
|
$rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p0.0 0 HotInNvme0n1p0
|
||||||
$rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p2.1 0 "${hotnvmename}n1p1"
|
$rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p1.0 0 Mallocp0
|
||||||
|
$rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p2.1 0 HotInNvme0n1p1
|
||||||
|
$rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p3.1 0 Mallocp1
|
||||||
|
|
||||||
# 2. Run two VMs, attached to scsi controllers.
|
# 2. Run two VMs, attached to scsi controllers.
|
||||||
|
vms_setup
|
||||||
vm_run_with_arg 0 1
|
vm_run_with_arg 0 1
|
||||||
vms_prepare "0 1"
|
vms_prepare "0 1"
|
||||||
|
|
||||||
@ -72,7 +74,7 @@ function scsi_hotremove_tc2() {
|
|||||||
local last_pid=$!
|
local last_pid=$!
|
||||||
sleep 3
|
sleep 3
|
||||||
# 4. Run the command to hot remove NVMe disk.
|
# 4. Run the command to hot remove NVMe disk.
|
||||||
delete_nvme $hotnvmename
|
delete_nvme "HotInNvme0"
|
||||||
|
|
||||||
# 5. Check that fio job run on hot-remove device stopped on VM.
|
# 5. Check that fio job run on hot-remove device stopped on VM.
|
||||||
# Expected: Fio should return error message and return code != 0.
|
# Expected: Fio should return error message and return code != 0.
|
||||||
@ -93,8 +95,7 @@ function scsi_hotremove_tc2() {
|
|||||||
# Expected: Fio should return error message and return code != 0.
|
# Expected: Fio should return error message and return code != 0.
|
||||||
check_fio_retcode "Scsi hotremove test case 2: Iteration 2." 1 $retcode
|
check_fio_retcode "Scsi hotremove test case 2: Iteration 2." 1 $retcode
|
||||||
vm_shutdown_all
|
vm_shutdown_all
|
||||||
set_hotnvmename
|
add_nvme "HotInNvme1" "$traddr"
|
||||||
add_nvme $hotnvmename "$traddr"
|
|
||||||
sleep 1
|
sleep 1
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -102,7 +103,7 @@ function scsi_hotremove_tc2() {
|
|||||||
function scsi_hotremove_tc3() {
|
function scsi_hotremove_tc3() {
|
||||||
echo "Scsi hotremove test case 3"
|
echo "Scsi hotremove test case 3"
|
||||||
# 1. Attach added NVMe bdev to scsi controller.
|
# 1. Attach added NVMe bdev to scsi controller.
|
||||||
$rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p0.0 0 "${hotnvmename}n1p0"
|
$rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p0.0 0 HotInNvme1n1p0
|
||||||
# 2. Run two VM, attached to scsi controllers.
|
# 2. Run two VM, attached to scsi controllers.
|
||||||
vm_run_with_arg 0 1
|
vm_run_with_arg 0 1
|
||||||
vms_prepare "0 1"
|
vms_prepare "0 1"
|
||||||
@ -116,7 +117,7 @@ function scsi_hotremove_tc3() {
|
|||||||
local last_pid=$!
|
local last_pid=$!
|
||||||
sleep 3
|
sleep 3
|
||||||
# 4. Run the command to hot remove NVMe disk.
|
# 4. Run the command to hot remove NVMe disk.
|
||||||
delete_nvme $hotnvmename
|
delete_nvme "HotInNvme1"
|
||||||
# 5. Check that fio job run on hot-remove device stopped on first VM.
|
# 5. Check that fio job run on hot-remove device stopped on first VM.
|
||||||
# Expected: Fio should return error message and return code != 0.
|
# Expected: Fio should return error message and return code != 0.
|
||||||
wait_for_finish $last_pid || retcode=$?
|
wait_for_finish $last_pid || retcode=$?
|
||||||
@ -135,8 +136,7 @@ function scsi_hotremove_tc3() {
|
|||||||
# Expected: Fio should return error message and return code != 0.
|
# Expected: Fio should return error message and return code != 0.
|
||||||
check_fio_retcode "Scsi hotremove test case 3: Iteration 2." 1 $retcode
|
check_fio_retcode "Scsi hotremove test case 3: Iteration 2." 1 $retcode
|
||||||
vm_shutdown_all
|
vm_shutdown_all
|
||||||
set_hotnvmename
|
add_nvme "HotInNvme2" "$traddr"
|
||||||
add_nvme $hotnvmename "$traddr"
|
|
||||||
sleep 1
|
sleep 1
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -144,8 +144,8 @@ function scsi_hotremove_tc3() {
|
|||||||
function scsi_hotremove_tc4() {
|
function scsi_hotremove_tc4() {
|
||||||
echo "Scsi hotremove test case 4"
|
echo "Scsi hotremove test case 4"
|
||||||
# 1. Attach NVMe bdevs to scsi controllers.
|
# 1. Attach NVMe bdevs to scsi controllers.
|
||||||
$rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p0.0 0 "${hotnvmename}n1p0"
|
$rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p0.0 0 HotInNvme2n1p0
|
||||||
$rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p2.1 0 "${hotnvmename}n1p1"
|
$rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p2.1 0 HotInNvme2n1p1
|
||||||
# 2. Run two VMs, attach to scsi controller.
|
# 2. Run two VMs, attach to scsi controller.
|
||||||
vm_run_with_arg 0 1
|
vm_run_with_arg 0 1
|
||||||
vms_prepare "0 1"
|
vms_prepare "0 1"
|
||||||
@ -168,7 +168,7 @@ function scsi_hotremove_tc4() {
|
|||||||
# 5. Run the command to hot remove NVMe disk.
|
# 5. Run the command to hot remove NVMe disk.
|
||||||
traddr=""
|
traddr=""
|
||||||
get_traddr "Nvme0"
|
get_traddr "Nvme0"
|
||||||
delete_nvme $hotnvmename
|
delete_nvme "HotInNvme2"
|
||||||
# 6. Check that fio job run on hot-removed devices stopped.
|
# 6. Check that fio job run on hot-removed devices stopped.
|
||||||
# Expected: Fio should return error message and return code != 0.
|
# Expected: Fio should return error message and return code != 0.
|
||||||
local retcode_vm0=0
|
local retcode_vm0=0
|
||||||
@ -205,9 +205,10 @@ function scsi_hotremove_tc4() {
|
|||||||
# Expected: Fio should return return code == 0.
|
# Expected: Fio should return return code == 0.
|
||||||
check_fio_retcode "Scsi hotremove test case 4: Iteration 4." 0 $retcode
|
check_fio_retcode "Scsi hotremove test case 4: Iteration 4." 0 $retcode
|
||||||
vm_shutdown_all
|
vm_shutdown_all
|
||||||
set_hotnvmename
|
add_nvme "HotInNvme3" "$traddr"
|
||||||
add_nvme $hotnvmename "$traddr"
|
|
||||||
sleep 1
|
sleep 1
|
||||||
|
$rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p1.0 0
|
||||||
|
$rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p3.1 0
|
||||||
}
|
}
|
||||||
|
|
||||||
function pre_scsi_hotremove_test_case() {
|
function pre_scsi_hotremove_test_case() {
|
||||||
@ -215,13 +216,9 @@ function pre_scsi_hotremove_test_case() {
|
|||||||
$rpc_py vhost_create_scsi_controller naa.Nvme0n1p1.0
|
$rpc_py vhost_create_scsi_controller naa.Nvme0n1p1.0
|
||||||
$rpc_py vhost_create_scsi_controller naa.Nvme0n1p2.1
|
$rpc_py vhost_create_scsi_controller naa.Nvme0n1p2.1
|
||||||
$rpc_py vhost_create_scsi_controller naa.Nvme0n1p3.1
|
$rpc_py vhost_create_scsi_controller naa.Nvme0n1p3.1
|
||||||
$rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p1.0 0 Mallocp0
|
|
||||||
$rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p3.1 0 Mallocp1
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function post_scsi_hotremove_test_case() {
|
function post_scsi_hotremove_test_case() {
|
||||||
$rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p1.0 0
|
|
||||||
$rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p3.1 0
|
|
||||||
$rpc_py vhost_delete_controller naa.Nvme0n1p0.0
|
$rpc_py vhost_delete_controller naa.Nvme0n1p0.0
|
||||||
$rpc_py vhost_delete_controller naa.Nvme0n1p1.0
|
$rpc_py vhost_delete_controller naa.Nvme0n1p1.0
|
||||||
$rpc_py vhost_delete_controller naa.Nvme0n1p2.1
|
$rpc_py vhost_delete_controller naa.Nvme0n1p2.1
|
||||||
@ -229,18 +226,8 @@ function post_scsi_hotremove_test_case() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pre_scsi_hotremove_test_case
|
pre_scsi_hotremove_test_case
|
||||||
vms_setup
|
scsi_hotremove_tc1
|
||||||
if $tc1; then
|
scsi_hotremove_tc2
|
||||||
scsi_hotremove_tc1
|
scsi_hotremove_tc3
|
||||||
fi
|
scsi_hotremove_tc4
|
||||||
if $tc2; then
|
|
||||||
scsi_hotremove_tc2
|
|
||||||
fi
|
|
||||||
if $tc3; then
|
|
||||||
scsi_hotremove_tc3
|
|
||||||
fi
|
|
||||||
if $tc4; then
|
|
||||||
scsi_hotremove_tc4
|
|
||||||
fi
|
|
||||||
sleep 1
|
|
||||||
post_scsi_hotremove_test_case
|
post_scsi_hotremove_test_case
|
||||||
|
@ -19,7 +19,6 @@ case $1 in
|
|||||||
echo " -shr|--scsi-hot-remove for running scsi hot remove tests"
|
echo " -shr|--scsi-hot-remove for running scsi hot remove tests"
|
||||||
echo " -bhr|--blk-hot-remove for running blk hot remove tests"
|
echo " -bhr|--blk-hot-remove for running blk hot remove tests"
|
||||||
echo " -h |--help prints this message"
|
echo " -h |--help prints this message"
|
||||||
echo " -tc|--test-cases define test cases to run for hotremove test"
|
|
||||||
echo ""
|
echo ""
|
||||||
echo "Environment:"
|
echo "Environment:"
|
||||||
echo " VM_IMAGE path to QCOW2 VM image used during test (default: $HOME/vhost_vm_image.qcow2)"
|
echo " VM_IMAGE path to QCOW2 VM image used during test (default: $HOME/vhost_vm_image.qcow2)"
|
||||||
@ -51,13 +50,6 @@ DISKS_NUMBER=$(lspci -mm -n | grep 0108 | tr -d '"' | awk -F " " '{print "0000:"
|
|||||||
|
|
||||||
WORKDIR=$(readlink -f $(dirname $0))
|
WORKDIR=$(readlink -f $(dirname $0))
|
||||||
|
|
||||||
test_cases="all"
|
|
||||||
if [ -n "$2" ]; then
|
|
||||||
case $2 in
|
|
||||||
-tc=*|-test-cases=*) test_cases="${2#*=}" ;;
|
|
||||||
esac
|
|
||||||
fi
|
|
||||||
|
|
||||||
case $1 in
|
case $1 in
|
||||||
-hp|--hotplug)
|
-hp|--hotplug)
|
||||||
echo 'Running hotplug tests suite...'
|
echo 'Running hotplug tests suite...'
|
||||||
@ -76,8 +68,7 @@ case $1 in
|
|||||||
--vm=1,$VM_IMAGE,Nvme0n1p2:Nvme0n1p3 \
|
--vm=1,$VM_IMAGE,Nvme0n1p2:Nvme0n1p3 \
|
||||||
--test-type=spdk_vhost_scsi \
|
--test-type=spdk_vhost_scsi \
|
||||||
--scsi-hotremove-test \
|
--scsi-hotremove-test \
|
||||||
--fio-jobs=$WORKDIR/hotplug/fio_jobs/default_integrity.job \
|
--fio-jobs=$WORKDIR/hotplug/fio_jobs/default_integrity.job
|
||||||
--test-cases=$test_cases
|
|
||||||
;;
|
;;
|
||||||
-bhr|--blk-hot-remove)
|
-bhr|--blk-hot-remove)
|
||||||
echo 'Running blk hotremove tests suite...'
|
echo 'Running blk hotremove tests suite...'
|
||||||
@ -86,9 +77,8 @@ case $1 in
|
|||||||
--vm=1,$VM_IMAGE,Nvme0n1p2:Nvme0n1p3 \
|
--vm=1,$VM_IMAGE,Nvme0n1p2:Nvme0n1p3 \
|
||||||
--test-type=spdk_vhost_blk \
|
--test-type=spdk_vhost_blk \
|
||||||
--blk-hotremove-test \
|
--blk-hotremove-test \
|
||||||
--fio-jobs=$WORKDIR/hotplug/fio_jobs/default_integrity.job \
|
--fio-jobs=$WORKDIR/hotplug/fio_jobs/default_integrity.job
|
||||||
--test-cases=$test_cases
|
;;
|
||||||
;;
|
|
||||||
*)
|
*)
|
||||||
echo "unknown test type: $1"
|
echo "unknown test type: $1"
|
||||||
exit 1
|
exit 1
|
||||||
|
@ -94,12 +94,6 @@ if [ $RUN_NIGHTLY -eq 1 ]; then
|
|||||||
echo 'Running migration suite...'
|
echo 'Running migration suite...'
|
||||||
run_test "vhost_migration" $WORKDIR/migration/migration.sh -x \
|
run_test "vhost_migration" $WORKDIR/migration/migration.sh -x \
|
||||||
--fio-bin=$FIO_BIN --os=$VM_IMAGE
|
--fio-bin=$FIO_BIN --os=$VM_IMAGE
|
||||||
|
|
||||||
echo "Running scsi hotremove test"
|
|
||||||
run_test "scsi_hotremove" $WORKDIR/vhost/manual.sh -shr --test-cases=1,2,3,4
|
|
||||||
|
|
||||||
echo "Running blk hotremove test"
|
|
||||||
run_test "blk_hotremove" $WORKDIR/vhost/manual.sh -bhr --test-cases=1,2,3,4,5
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo 'Running lvol integrity suite...'
|
echo 'Running lvol integrity suite...'
|
||||||
|
Loading…
Reference in New Issue
Block a user