test/vhost: Scsi hot remove tests

Change-Id: I7dac919f0ba331fe6a78855c0171c0798d48fade
Signed-off-by: Pawel Kaminski <pawelx.kaminski@intel.com>
Reviewed-on: https://review.gerrithub.io/392144
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Karol Latecki <karol.latecki@intel.com>
Reviewed-by: Pawel Wodkowski <pawelx.wodkowski@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
Pawel Kaminski 2017-12-18 16:02:24 +01:00 committed by Daniel Verkamp
parent 0f80c44447
commit 576f8ed256
6 changed files with 253 additions and 22 deletions

View File

@ -1,8 +1,3 @@
#!/usr/bin/env bash
set -e
BASE_DIR=$(readlink -f $(dirname $0))
[[ -z "$TEST_DIR" ]] && TEST_DIR="$(cd $BASE_DIR/../../../../ && pwd)"
dry_run=false
no_shutdown=false
fio_bin="fio"
@ -13,6 +8,7 @@ vms=()
used_vms=""
disk_split=""
x=""
scsi_hot_remove_test=0
function usage() {
@ -34,6 +30,7 @@ function usage() {
echo " NUM - VM number (mandatory)"
echo " OS - VM os disk path (optional)"
echo " DISKS - VM os test disks/devices path (virtio - optional, kernel_vhost - mandatory)"
echo " --scsi-hotremove-test Run scsi hotremove tests"
exit 0
}
@ -47,6 +44,7 @@ while getopts 'xh-:' optchar; do
fio-jobs=*) fio_jobs="${OPTARG#*=}" ;;
test-type=*) test_type="${OPTARG#*=}" ;;
vm=*) vms+=("${OPTARG#*=}") ;;
scsi-hotremove-test) scsi_hot_remove_test=1 ;;
*) usage $0 "Invalid argument '$OPTARG'" ;;
esac
;;
@ -108,11 +106,14 @@ function vms_setup() {
done
}
function vm_run_with_arg() {
vm_run $@
vm_wait_for_boot 600 $@
}
function vms_setup_and_run() {
vms_setup
# Run everything
vm_run $used_vms
vm_wait_for_boot 600 $used_vms
vm_run_with_arg $@
}
function vms_prepare() {
@ -163,6 +164,49 @@ function check_fio_retcode() {
}
function reboot_all_and_prepare() {
vms_reboot_all $1
vms_prepare $1
vms_reboot_all "$1"
vms_prepare "$1"
}
function post_test_case() {
vm_shutdown_all
spdk_vhost_kill
}
function on_error_exit() {
set +e
echo "Error on $1 - $2"
post_test_case
print_backtrace
exit 1
}
function check_disks() {
if [ "$1" == "$2" ]; then
echo "Disk has not been deleted"
exit 1
fi
}
function get_traddr() {
local nvme_name=$1
local nvme="$( $SPDK_BUILD_DIR/scripts/gen_nvme.sh )"
while read -r line; do
if [[ $line == *"TransportID"* ]] && [[ $line == *$nvme_name* ]]; then
local word_array=($line)
for word in "${word_array[@]}"; do
if [[ $word == *"traddr"* ]]; then
traddr=$( echo $word | sed 's/traddr://' | sed 's/"//' )
fi
done
fi
done <<< "$nvme"
}
function delete_nvme() {
$rpc_py delete_bdev $1
}
function add_nvme() {
$rpc_py construct_nvme_bdev -b $1 -t PCIe -a $2
}

View File

@ -12,4 +12,5 @@ rw=randwrite
do_verify=1
verify=md5
verify_backlog=1024
time_based=1
runtime=10

View File

@ -1,8 +1,6 @@
#!/usr/bin/env bash
set -e
BASE_DIR=$(readlink -f $(dirname $0))
[[ -z "$TEST_DIR" ]] && TEST_DIR="$(cd $BASE_DIR/../../../../ && pwd)"
. $BASE_DIR/common.sh
# Add split section into vhost config
@ -11,6 +9,10 @@ function gen_config() {
cat << END_OF_CONFIG >> $BASE_DIR/vhost.conf.in
[Split]
Split Nvme0n1 16
Split Nvme1n1 20
Split HotInNvme0n1 2
Split HotInNvme1n1 2
Split HotInNvme2n1 2
END_OF_CONFIG
}
@ -61,13 +63,18 @@ trap 'error_exit "${FUNCNAME}" "${LINENO}"' ERR
gen_config
run_vhost
rm $BASE_DIR/vhost.conf.in
pre_hot_attach_detach_test_case
$BASE_DIR/scsi_hotattach.sh --fio-bin=$fio_bin &
first_script=$!
$BASE_DIR/scsi_hotdetach.sh --fio-bin=$fio_bin &
second_script=$!
wait $first_script
wait $second_script
vm_shutdown_all
clear_vhost_config
spdk_vhost_kill
if [[ $scsi_hot_remove_test == 0 ]]; then
pre_hot_attach_detach_test_case
$BASE_DIR/scsi_hotattach.sh --fio-bin=$fio_bin &
first_script=$!
$BASE_DIR/scsi_hotdetach.sh --fio-bin=$fio_bin &
second_script=$!
wait $first_script
wait $second_script
vm_shutdown_all
clear_vhost_config
fi
if [[ $scsi_hot_remove_test == 1 ]]; then
source $BASE_DIR/scsi_hotremove.sh
fi
post_test_case

View File

@ -0,0 +1,166 @@
set -xe
function prepare_fio_cmd_tc1() {
print_test_fio_header
run_fio="$fio_bin --eta=never "
for vm_num in $1; do
cp $fio_job $tmp_detach_job
vm_dir=$VM_BASE_DIR/$vm_num
vm_check_scsi_location $vm_num
for disk in $SCSI_DISK; do
echo "[nvme-host$disk]" >> $tmp_detach_job
echo "filename=/dev/$disk" >> $tmp_detach_job
echo "size=100%" >> $tmp_detach_job
done
vm_scp "$vm_num" $tmp_detach_job 127.0.0.1:/root/default_integrity_2discs.job
run_fio+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/default_integrity_2discs.job "
rm $tmp_detach_job
done
}
function scsi_hotremove_tc1() {
echo "Scsi hotremove test case 1"
traddr=""
get_traddr "Nvme0"
delete_nvme "Nvme0n1"
sleep 1
add_nvme "HotInNvme0" "$traddr"
}
function scsi_hotremove_tc2() {
echo "Scsi hotremove test case 2"
$rpc_py add_vhost_scsi_lun naa.Nvme0n1p0.0 0 HotInNvme0n1p0
$rpc_py add_vhost_scsi_lun naa.Nvme0n1p1.0 0 Nvme1n1p0
$rpc_py add_vhost_scsi_lun naa.Nvme0n1p2.1 0 HotInNvme0n1p1
$rpc_py add_vhost_scsi_lun naa.Nvme0n1p3.1 0 Nvme1n1p1
vms_setup
vm_run_with_arg 0 1
vms_prepare "0 1"
vm_check_scsi_location "0"
local disks="$SCSI_DISK"
traddr=""
get_traddr "Nvme0"
prepare_fio_cmd_tc1 "0 1"
$run_fio &
local last_pid=$!
sleep 3
delete_nvme "HotInNvme0n1"
set +xe
wait $last_pid
check_fio_retcode "Scsi hotremove test case 2: Iteration 1." 1 $?
vm_check_scsi_location "0"
local new_disks="$SCSI_DISK"
check_disks "$disks" "$new_disks"
reboot_all_and_prepare "0 1"
$run_fio
check_fio_retcode "Scsi hotremove test case 2: Iteration 2." 1 $?
set -xe
vm_shutdown_all
add_nvme "HotInNvme1" "$traddr"
sleep 1
}
function scsi_hotremove_tc3() {
echo "Scsi hotremove test case 3"
$rpc_py add_vhost_scsi_lun naa.Nvme0n1p0.0 0 HotInNvme1n1p0
vm_run_with_arg 0 1
vms_prepare "0 1"
vm_check_scsi_location "0"
local disks="$SCSI_DISK"
traddr=""
get_traddr "Nvme0"
prepare_fio_cmd_tc1 "0"
$run_fio &
local last_pid=$!
sleep 3
delete_nvme "HotInNvme1n1"
set +xe
wait $last_pid
check_fio_retcode "Scsi hotremove test case 3: Iteration 1." 1 $?
vm_check_scsi_location "0"
local new_disks="$SCSI_DISK"
check_disks "$disks" "$new_disks"
reboot_all_and_prepare "0 1"
$run_fio
check_fio_retcode "Scsi hotremove test case 3: Iteration 2." 1 $?
set -xe
vm_shutdown_all
add_nvme "HotInNvme2" "$traddr"
sleep 1
}
function scsi_hotremove_tc4() {
echo "Scsi hotremove test case 4"
$rpc_py add_vhost_scsi_lun naa.Nvme0n1p0.0 0 HotInNvme2n1p0
$rpc_py add_vhost_scsi_lun naa.Nvme0n1p2.1 0 HotInNvme2n1p1
vm_run_with_arg 0 1
vms_prepare "0 1"
vm_check_scsi_location "0"
local disks_vm0="$SCSI_DISK"
prepare_fio_cmd_tc1 "0"
$run_fio &
last_pid_vm0=$!
vm_check_scsi_location "1"
local disks_vm1="$SCSI_DISK"
prepare_fio_cmd_tc1 "1"
$run_fio &
local last_pid_vm1=$!
prepare_fio_cmd_tc1 "0 1"
sleep 3
traddr=""
get_traddr "Nvme0"
delete_nvme "HotInNvme2n1"
set +xe
wait $last_pid_vm0
local retcode_vm0=$?
wait $last_pid_vm1
local retcode_vm1=$?
check_fio_retcode "Scsi hotremove test case 4: Iteration 1." 1 $retcode_vm0
vm_check_scsi_location "0"
local new_disks_vm0="$SCSI_DISK"
check_disks "$disks_vm0" "$new_disks_vm0"
check_fio_retcode "Scsi hotremove test case 4: Iteration 2." 1 $retcode_vm1
vm_check_scsi_location "1"
local new_disks_vm1="$SCSI_DISK"
check_disks "$disks_vm1" "$new_disks_vm1"
reboot_all_and_prepare "0 1"
$run_fio
check_fio_retcode "Scsi hotremove test case 4: Iteration 3." 1 $?
prepare_fio_cmd_tc1 "0 1"
$run_fio
check_fio_retcode "Scsi hotremove test case 4: Iteration 4." 0 $?
set -xe
vm_shutdown_all
add_nvme "HotInNvme3" "$traddr"
sleep 1
$rpc_py remove_vhost_scsi_target naa.Nvme0n1p1.0 0
$rpc_py remove_vhost_scsi_target naa.Nvme0n1p3.1 0
}
function pre_scsi_hotremove_test_case() {
$rpc_py construct_vhost_scsi_controller naa.Nvme0n1p0.0
$rpc_py construct_vhost_scsi_controller naa.Nvme0n1p1.0
$rpc_py construct_vhost_scsi_controller naa.Nvme0n1p2.1
$rpc_py construct_vhost_scsi_controller naa.Nvme0n1p3.1
}
function post_scsi_hotremove_test_case() {
$rpc_py remove_vhost_controller naa.Nvme0n1p0.0
$rpc_py remove_vhost_controller naa.Nvme0n1p1.0
$rpc_py remove_vhost_controller naa.Nvme0n1p2.1
$rpc_py remove_vhost_controller naa.Nvme0n1p3.1
}
trap "" ERR
pre_scsi_hotremove_test_case
scsi_hotremove_tc1
scsi_hotremove_tc2
scsi_hotremove_tc3
scsi_hotremove_tc4
post_scsi_hotremove_test_case
trap 'error_exit "${FUNCNAME}" "${LINENO}"' ERR

View File

@ -2,3 +2,6 @@
[Ioat]
Disable Yes
[Nvme]
HotplugEnable Yes

View File

@ -22,6 +22,7 @@ case $1 in
echo " -ilsn|--integrity-lvol-scsi-nightly for running an nightly integrity test with vhost scsi and lvol backends"
echo " -ilbn|--integrity-lvol-blk-nightly for running an nightly integrity test with vhost blk and lvol backends"
echo " -hp|--hotplug for running hotplug tests"
echo " -shr|--scsi-hot-remove for running scsi hot remove tests"
echo " -ro|--readonly for running readonly test for vhost blk"
echo " -h |--help prints this message"
echo ""
@ -160,6 +161,15 @@ case $1 in
--test-type=spdk_vhost_scsi \
--fio-jobs=$WORKDIR/hotplug/fio_jobs/default_integrity.job -x
;;
-shr|--scsi-hot-remove)
echo 'Running scsi hotremove tests suite...'
$WORKDIR/hotplug/scsi_hotplug.sh --fio-bin=$FIO_BIN \
--vm=0,$VM_IMAGE,Nvme0n1p0:Nvme0n1p1 \
--vm=1,$VM_IMAGE,Nvme0n1p2:Nvme0n1p3 \
--test-type=spdk_vhost_scsi \
--scsi-hotremove-test \
--fio-jobs=$WORKDIR/hotplug/fio_jobs/default_integrity.job
;;
-ro|--readonly)
echo 'Running readonly tests suite...'
$WORKDIR/readonly/readonly.sh --vm_image=$VM_IMAGE --disk=Nvme0n1