test/vhost: Add tests for hot-attach and hot-detach features.

Test plan for hot-attach and hot-detach included.
File with CPU core masks for vhost and qemu updated because more
than one virtual machine is needed to run the tests.

Change-Id: I6ba02f65398d09e2ef3335c2d5b0d6c04d3e393c
Signed-off-by: Pawel Kaminski <pawelx.kaminski@intel.com>
Reviewed-on: https://review.gerrithub.io/372268
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Pawel Kaminski 2017-08-02 19:13:48 +02:00 committed by Daniel Verkamp
parent eb8b1e20a9
commit 598ba73f5f
10 changed files with 759 additions and 15 deletions

View File

@ -3,3 +3,36 @@ vhost_master_core=0
VM_0_qemu_mask=0x6
VM_0_qemu_numa_node=0
VM_1_qemu_mask=0x18
VM_1_qemu_numa_node=0
VM_2_qemu_mask=0x60
VM_2_qemu_numa_node=0
VM_3_qemu_mask=0x180
VM_3_qemu_numa_node=0
VM_4_qemu_mask=0x600
VM_4_qemu_numa_node=0
VM_5_qemu_mask=0x1800
VM_5_qemu_numa_node=0
VM_6_qemu_mask=0x1800000
VM_6_qemu_numa_node=1
VM_7_qemu_mask=0x6000000
VM_7_qemu_numa_node=1
VM_8_qemu_mask=0x18000000
VM_8_qemu_numa_node=1
VM_9_qemu_mask=0x60000000
VM_9_qemu_numa_node=1
VM_10_qemu_mask=0x180000000
VM_10_qemu_numa_node=1
VM_11_qemu_mask=0x600000000
VM_11_qemu_numa_node=1

View File

@ -249,27 +249,33 @@ function vm_fio_socket()
cat $vm_dir/fio_socket
}
function vm_create_ssh_config()
{
local ssh_config="$VM_BASE_DIR/ssh_config"
if [[ ! -f $ssh_config ]]; then
(
echo "Host *"
echo " ControlPersist=10m"
echo " ConnectTimeout=2"
echo " Compression=no"
echo " ControlMaster=auto"
echo " UserKnownHostsFile=/dev/null"
echo " StrictHostKeyChecking=no"
echo " User root"
echo " ControlPath=$VM_BASE_DIR/%r@%h:%p.ssh"
echo ""
) > $ssh_config
fi
}
# Execute ssh command on given VM
# param $1 virtual machine number
#
function vm_ssh()
{
vm_num_is_valid $1 || return 1
vm_create_ssh_config
local ssh_config="$VM_BASE_DIR/ssh_config"
if [[ ! -f $ssh_config ]]; then
(
echo "Host *"
echo " ControlPersist=10m"
echo " ConnectTimeout=2"
echo " Compression=no"
echo " ControlMaster=auto"
echo " UserKnownHostsFile=/dev/null"
echo " StrictHostKeyChecking=no"
echo " User root"
echo " ControlPath=$VM_BASE_DIR/%r@%h:%p.ssh"
echo ""
) > $ssh_config
fi
local ssh_cmd="ssh -i $SPDK_VHOST_SSH_KEY_FILE -F $ssh_config \
-p $(vm_ssh_socket $1) 127.0.0.1"
@ -278,6 +284,23 @@ function vm_ssh()
$ssh_cmd "$@"
}
# Execute scp command on given VM
# param $1 virtual machine number
#
function vm_scp()
{
vm_num_is_valid $1 || return 1
vm_create_ssh_config
local ssh_config="$VM_BASE_DIR/ssh_config"
local scp_cmd="scp -i $SPDK_VHOST_SSH_KEY_FILE -F $ssh_config \
-P $(vm_ssh_socket $1) "
shift
$scp_cmd "$@"
}
# check if specified VM is running
# param $1 VM num
function vm_is_running()

View File

@ -0,0 +1,174 @@
#!/usr/bin/env bash
set -e
BASE_DIR=$(readlink -f $(dirname $0))
[[ -z "$TEST_DIR" ]] && TEST_DIR="$(cd $BASE_DIR/../../../../ && pwd)"
dry_run=false
no_shutdown=false
fio_bin="fio"
fio_jobs="$BASE_DIR/fio_jobs/"
test_type=spdk_vhost_scsi
reuse_vms=false
force_build=false
vms=()
used_vms=""
disk_split=""
x=""
function usage() {
[[ ! -z $2 ]] && ( echo "$2"; echo ""; )
echo "Shortcut script for doing automated hotattach/hotdetach test"
echo "Usage: $(basename $1) [OPTIONS]"
echo
echo "-h, --help print help and exit"
echo " --test-type=TYPE Perform specified test:"
echo " virtio - test host virtio-scsi-pci using file as disk image"
echo " kernel_vhost - use kernel driver vhost-scsi"
echo " spdk_vhost_scsi - use spdk vhost scsi"
echo " spdk_vhost_blk - use spdk vhost block"
echo "-x set -x for script debug"
echo " --fio-bin=FIO Use specific fio binary (will be uploaded to VM)"
echo " --fio-jobs= Fio configs to use for tests. Can point to a directory or"
echo " --work-dir=WORK_DIR Where to find build file. Must exist. [default: $TEST_DIR]"
echo " --vm=NUM[,OS][,DISKS] VM configuration. This parameter might be used more than once:"
echo " NUM - VM number (mandatory)"
echo " OS - VM os disk path (optional)"
echo " DISKS - VM os test disks/devices path (virtio - optional, kernel_vhost - mandatory)"
echo " If test-type=spdk_vhost_blk then each disk can have additional size parameter, e.g."
echo " --vm=X,os.qcow,DISK_size_35G; unit can be M or G; default - 20G"
exit 0
}
while getopts 'xh-:' optchar; do
case "$optchar" in
-)
case "$OPTARG" in
help) usage $0 ;;
work-dir=*) TEST_DIR="${OPTARG#*=}" ;;
fio-bin=*) fio_bin="${OPTARG#*=}" ;;
fio-jobs=*) fio_jobs="${OPTARG#*=}" ;;
test-type=*) test_type="${OPTARG#*=}" ;;
vm=*) vms+=("${OPTARG#*=}") ;;
*) usage $0 "Invalid argument '$OPTARG'" ;;
esac
;;
h) usage $0 ;;
x) set -x
x="-x" ;;
*) usage $0 "Invalid argument '$OPTARG'"
esac
done
shift $(( OPTIND - 1 ))
fio_job=$BASE_DIR/fio_jobs/default_integrity.job
tmp_attach_job=$BASE_DIR/fio_jobs/fio_attach.job.tmp
tmp_detach_job=$BASE_DIR/fio_jobs/fio_detach.job.tmp
. $BASE_DIR/../common/common.sh
rpc_py="python $SPDK_BUILD_DIR/scripts/rpc.py -s 127.0.0.1 "
function print_test_fio_header() {
echo "==============="
echo ""
echo "INFO: Testing..."
echo "INFO: Running fio jobs ..."
if [ $# -gt 0 ]; then
echo $1
fi
}
function run_vhost() {
echo "==============="
echo ""
echo "INFO: running SPDK"
echo ""
$BASE_DIR/../common/run_vhost.sh $x --work-dir=$TEST_DIR --conf-dir=$BASE_DIR
echo
}
function vms_setup() {
for vm_conf in ${vms[@]}; do
IFS=',' read -ra conf <<< "$vm_conf"
setup_cmd="$BASE_DIR/../common/vm_setup.sh $x --work-dir=$TEST_DIR --test-type=$test_type"
if [[ x"${conf[0]}" == x"" ]] || ! assert_number ${conf[0]}; then
echo "ERROR: invalid VM configuration syntax $vm_conf"
exit 1;
fi
# Sanity check if VM is not defined twice
for vm_num in $used_vms; do
if [[ $vm_num -eq ${conf[0]} ]]; then
echo "ERROR: VM$vm_num defined more than twice ( $(printf "'%s' " "${vms[@]}"))!"
exit 1
fi
done
setup_cmd+=" -f ${conf[0]}"
used_vms+=" ${conf[0]}"
[[ x"${conf[1]}" != x"" ]] && setup_cmd+=" --os=${conf[1]}"
[[ x"${conf[2]}" != x"" ]] && setup_cmd+=" --disk=${conf[2]}"
$setup_cmd
done
}
function vms_setup_and_run() {
vms_setup
# Run everything
$BASE_DIR/../common/vm_run.sh $x --work-dir=$TEST_DIR $used_vms
vm_wait_for_boot 600 $used_vms
}
function vms_prepare() {
for vm_num in $1; do
vm_dir=$VM_BASE_DIR/$vm_num
qemu_mask_param="VM_${vm_num}_qemu_mask"
host_name="VM-${vm_num}-${!qemu_mask_param}"
echo "INFO: Setting up hostname: $host_name"
vm_ssh $vm_num "hostname $host_name"
vm_start_fio_server --fio-bin=$fio_bin $readonly $vm_num
done
}
function vms_reboot_all() {
echo "Rebooting all vms "
for vm_num in $1; do
vm_ssh $vm_num "reboot" || true
done
vm_wait_for_boot 600 $1
}
function check_fio_retcode() {
fio_retcode=$3
echo $1
retcode_expected=$2
if [ $retcode_expected == 0 ]; then
if [ $fio_retcode != 0 ]; then
echo " Fio test ended with error."
vm_shutdown_all
spdk_vhost_kill
exit 1
else
echo " Fio test ended with success."
fi
else
if [ $fio_retcode != 0 ]; then
echo " Fio test ended with expected error."
else
echo " Fio test ended with unexpected success."
vm_shutdown_all
spdk_vhost_kill
exit 1
fi
fi
}
function reboot_all_and_prepare() {
vms_reboot_all $1
vms_prepare $1
}

View File

@ -0,0 +1,15 @@
[global]
blocksize=4k
iodepth=512
iodepth_batch=128
iodepth_low=256
ioengine=libaio
group_reporting
thread
numjobs=1
direct=1
rw=randwrite
do_verify=1
verify=md5
verify_backlog=1024
runtime=10

View File

@ -0,0 +1,95 @@
#!/usr/bin/env bash
set -e
BASE_DIR=$(readlink -f $(dirname $0))
[[ -z "$TEST_DIR" ]] && TEST_DIR="$(cd $BASE_DIR/../../../../ && pwd)"
. $BASE_DIR/common.sh
function prepare_fio_cmd_tc1() {
print_test_fio_header
run_fio="$fio_bin --eta=never "
for vm_num in $1; do
cp $fio_job $tmp_attach_job
vm_dir=$VM_BASE_DIR/$vm_num
vm_check_scsi_location $vm_num
for disk in $SCSI_DISK; do
echo "[nvme-host$disk]" >> $tmp_attach_job
echo "filename=/dev/$disk" >> $tmp_attach_job
done
vm_scp $vm_num $tmp_attach_job 127.0.0.1:/root/default_integrity_discs.job
run_fio+="--client=127.0.0.1,$(vm_fio_socket ${vm_num}) --remote-config /root/default_integrity_discs.job "
rm $tmp_attach_job
done
}
# Check if fio test passes on device attached to first controller.
function hotattach_tc1() {
echo "Hotattach test case 1"
$rpc_py add_vhost_scsi_lun naa.Nvme0n1p0.0 0 Nvme0n1p0
sleep 3
prepare_fio_cmd_tc1 "0"
$run_fio
check_fio_retcode "Hotattach test case 1: Iteration 1." 0 $?
}
# Run fio test for previously attached device.
# During test attach another device to first controller and check fio status.
function hotattach_tc2() {
echo "Hotattach test case 2"
prepare_fio_cmd_tc1 "0"
$run_fio &
last_pid=$!
sleep 3
$rpc_py add_vhost_scsi_lun naa.Nvme0n1p0.0 1 Nvme0n1p1
wait $last_pid
check_fio_retcode "Hotattach test case 2: Iteration 1." 0 $?
}
# Run fio test for previously attached devices.
# During test attach another device to second controller and check fio status.
function hotattach_tc3() {
echo "Hotattach test case 3"
prepare_fio_cmd_tc1 "0"
$run_fio &
last_pid=$!
sleep 3
$rpc_py add_vhost_scsi_lun naa.Nvme0n1p1.0 0 Nvme0n1p2
wait $last_pid
check_fio_retcode "Hotattach test case 3: Iteration 1." 0 $?
}
# Run fio test for previously attached devices.
# During test attach another device to third controller(VM2) and check fio status.
# At the end after rebooting VMs run fio test for all devices and check fio status.
function hotattach_tc4() {
echo "Hotattach test case 4"
prepare_fio_cmd_tc1 "0"
$run_fio &
last_pid=$!
sleep 3
$rpc_py add_vhost_scsi_lun naa.Nvme0n1p2.1 0 Nvme0n1p3
wait $last_pid
check_fio_retcode "Hotattach test case 4: Iteration 1." 0 $?
prepare_fio_cmd_tc1 "0 1"
$run_fio
check_fio_retcode "Hotattach test case 4: Iteration 2." 0 $?
reboot_all_and_prepare "0 1"
prepare_fio_cmd_tc1 "0 1"
$run_fio
check_fio_retcode "Hotattach test case 4: Iteration 3." 0 $?
}
hotattach_tc1
hotattach_tc2
hotattach_tc3
hotattach_tc4

View File

@ -0,0 +1,237 @@
#!/usr/bin/env bash
set -e
BASE_DIR=$(readlink -f $(dirname $0))
[[ -z "$TEST_DIR" ]] && TEST_DIR="$(cd $BASE_DIR/../../../../ && pwd)"
. $BASE_DIR/common.sh
function get_first_disk() {
vm_check_scsi_location $1
disk_array=( $SCSI_DISK )
eval "$2=${disk_array[0]}"
}
function check_disks() {
if [ "$1" == "$2" ]; then
echo "Disk has not been deleted"
exit 1
fi
}
function prepare_fio_cmd_tc1_iter1() {
print_test_fio_header
run_fio="$fio_bin --eta=never "
for vm_num in $1; do
cp $fio_job $tmp_detach_job
vm_dir=$VM_BASE_DIR/$vm_num
vm_check_scsi_location $vm_num
for disk in $SCSI_DISK; do
echo "[nvme-host$disk]" >> $tmp_detach_job
echo "filename=/dev/$disk" >> $tmp_detach_job
done
vm_scp "$vm_num" $tmp_detach_job 127.0.0.1:/root/default_integrity_4discs.job
run_fio+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/default_integrity_4discs.job "
rm $tmp_detach_job
done
}
function prepare_fio_cmd_tc1_iter2() {
print_test_fio_header
for vm_num in 2; do
cp $fio_job $tmp_detach_job
vm_dir=$VM_BASE_DIR/$vm_num
vm_check_scsi_location $vm_num
for disk in $SCSI_DISK; do
echo "[nvme-host$disk]" >> $tmp_detach_job
echo "filename=/dev/$disk" >> $tmp_detach_job
done
vm_scp "$vm_num" $tmp_detach_job 127.0.0.1:/root/default_integrity_3discs.job
rm $tmp_detach_job
done
run_fio="$fio_bin --eta=never "
for vm_num in $used_vms; do
if [ $vm_num == 2 ]; then
run_fio+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/default_integrity_3discs.job "
continue
fi
run_fio+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/default_integrity_4discs.job "
done
}
function prepare_fio_cmd_tc2_iter1() {
print_test_fio_header
run_fio="$fio_bin --eta=never "
for vm_num in $1; do
cp $fio_job $tmp_detach_job
vm_dir=$VM_BASE_DIR/$vm_num
vm_check_scsi_location $vm_num
disk_array=($SCSI_DISK)
disk=${disk_array[0]}
echo "[nvme-host$disk]" >> $tmp_detach_job
echo "filename=/dev/$disk" >> $tmp_detach_job
vm_scp "$vm_num" $tmp_detach_job 127.0.0.1:/root/default_integrity.job
run_fio+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/default_integrity.job "
rm $tmp_detach_job
done
}
function prepare_fio_cmd_tc2_iter2() {
print_test_fio_header
run_fio="$fio_bin --eta=never "
for vm_num in $1; do
cp $fio_job $tmp_detach_job
if [ $vm_num == 2 ]; then
vm_job_name=default_integrity_3discs.job
else
vm_job_name=default_integrity_4discs.job
fi
vm_dir=$VM_BASE_DIR/$vm_num
vm_check_scsi_location $vm_num
for disk in $SCSI_DISK; do
echo "[nvme-host$disk]" >> $tmp_detach_job
echo "filename=/dev/$disk" >> $tmp_detach_job
done
vm_scp "$vm_num" $tmp_detach_job 127.0.0.1:/root/$vm_job_name
run_fio+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/${vm_job_name} "
rm $tmp_detach_job
done
}
function prepare_fio_cmd_tc3_iter1() {
print_test_fio_header
run_fio="$fio_bin --eta=never "
for vm_num in $1; do
cp $fio_job $tmp_detach_job
if [ $vm_num == 2 ]; then
vm_job_name=default_integrity_3discs.job
else
vm_job_name=default_integrity_4discs.job
fi
vm_dir=$VM_BASE_DIR/$vm_num
vm_check_scsi_location $vm_num
j=1
for disk in $SCSI_DISK; do
if [ $vm_num == 2 ]; then
if [ $j == 1 ]; then
(( j++ ))
continue
fi
fi
echo "[nvme-host$disk]" >> $tmp_detach_job
echo "filename=/dev/$disk" >> $tmp_detach_job
(( j++ ))
done
vm_scp "$vm_num" $tmp_detach_job 127.0.0.1:/root/$vm_job_name
run_fio+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/$vm_job_name "
rm $tmp_detach_job
done
}
# During fio test for all devices remove first device from fifth controller and check if fio fails.
# Also check if disc has been removed from VM.
function hotdetach_tc1() {
echo "Hotdetach test case 1"
first_disk=""
get_first_disk "2" first_disk
prepare_fio_cmd_tc1_iter1 "2 3"
$run_fio &
last_pid=$!
sleep 3
$rpc_py remove_vhost_scsi_dev naa.Nvme0n1p4.2 0
set +xe
wait $last_pid
check_fio_retcode "Hotdetach test case 1: Iteration 1." 1 $?
set -xe
second_disk=""
get_first_disk "2" second_disk
check_disks $first_disk $second_disk
}
# During fio test for device from third VM remove first device from fifth controller and check if fio fails.
# Also check if disc has been removed from VM.
function hotdetach_tc2() {
echo "Hotdetach test case 2"
$SPDK_BUILD_DIR/scripts/rpc.py add_vhost_scsi_lun naa.Nvme0n1p4.2 0 Nvme0n1p8
sleep 2
first_disk=""
get_first_disk "2" first_disk
prepare_fio_cmd_tc2_iter1 "2"
$run_fio &
last_pid=$!
sleep 3
$rpc_py remove_vhost_scsi_dev naa.Nvme0n1p4.2 0
set +xe
wait $last_pid
check_fio_retcode "Hotdetach test case 2: Iteration 1." 1 $?
set -xe
second_disk=""
get_first_disk "2" second_disk
check_disks $first_disk $second_disk
}
# Run fio test for all devices except one, then remove this device and check if fio passes.
# Also check if disc has been removed from VM.
function hotdetach_tc3() {
echo "Hotdetach test case 3"
$SPDK_BUILD_DIR/scripts/rpc.py add_vhost_scsi_lun naa.Nvme0n1p4.2 0 Nvme0n1p8
sleep 2
first_disk=""
get_first_disk "2" first_disk
prepare_fio_cmd_tc3_iter1 "2 3"
$run_fio &
last_pid=$!
sleep 3
$rpc_py remove_vhost_scsi_dev naa.Nvme0n1p4.2 0
wait $last_pid
check_fio_retcode "Hotdetach test case 3: Iteration 1." 0 $?
second_disk=""
get_first_disk "2" second_disk
check_disks $first_disk $second_disk
}
# Run fio test for all devices except one and run separate fio test for this device.
# Check if first fio test passes and second fio test fails.
# Also check if disc has been removed from VM.
# After reboot run fio test for remaining devices and check if fio passes.
function hotdetach_tc4() {
echo "Hotdetach test case 4"
$SPDK_BUILD_DIR/scripts/rpc.py add_vhost_scsi_lun naa.Nvme0n1p4.2 0 Nvme0n1p8
sleep 2
first_disk=""
get_first_disk "2" first_disk
prepare_fio_cmd_tc2_iter1 "2"
$run_fio &
first_fio_pid=$!
prepare_fio_cmd_tc3_iter1 "2 3"
$run_fio &
second_fio_pid=$!
sleep 3
$rpc_py remove_vhost_scsi_dev naa.Nvme0n1p4.2 0
set +xe
wait $first_fio_pid
check_fio_retcode "Hotdetach test case 4: Iteration 1." 1 $?
set -xe
wait $second_fio_pid
check_fio_retcode "Hotdetach test case 4: Iteration 2." 0 $?
second_disk=""
get_first_disk "2" second_disk
check_disks $first_disk $second_disk
reboot_all_and_prepare "2 3"
sleep 2
prepare_fio_cmd_tc2_iter2 "2 3"
$run_fio
check_fio_retcode "Hotdetach test case 4: Iteration 3." 0 $?
}
hotdetach_tc1
hotdetach_tc2
hotdetach_tc3
hotdetach_tc4

View File

@ -0,0 +1,63 @@
#!/usr/bin/env bash
set -e
BASE_DIR=$(readlink -f $(dirname $0))
[[ -z "$TEST_DIR" ]] && TEST_DIR="$(cd $BASE_DIR/../../../../ && pwd)"
. $BASE_DIR/common.sh
# Add split section into vhost config
function gen_config() {
cp $BASE_DIR/vhost.conf.base $BASE_DIR/vhost.conf.in
cat << END_OF_CONFIG >> $BASE_DIR/vhost.conf.in
[Split]
Split Nvme0n1 16
END_OF_CONFIG
}
# Run spdk by calling run_vhost from hotplug/common.sh.
# Run_vhost uses run_vhost.sh (test/vhost/common) script.
# This script calls spdk_vhost_run (common/common.sh) to run vhost.
# Then prepare vhost with rpc calls and setup and run 4 VMs.
function pre_test_case() {
used_vms=""
run_vhost
rm $BASE_DIR/vhost.conf.in
$SPDK_BUILD_DIR/scripts/rpc.py construct_vhost_scsi_controller naa.Nvme0n1p0.0
$SPDK_BUILD_DIR/scripts/rpc.py construct_vhost_scsi_controller naa.Nvme0n1p1.0
$SPDK_BUILD_DIR/scripts/rpc.py construct_vhost_scsi_controller naa.Nvme0n1p2.1
$SPDK_BUILD_DIR/scripts/rpc.py construct_vhost_scsi_controller naa.Nvme0n1p3.1
$SPDK_BUILD_DIR/scripts/rpc.py construct_vhost_scsi_controller naa.Nvme0n1p4.2
$SPDK_BUILD_DIR/scripts/rpc.py construct_vhost_scsi_controller naa.Nvme0n1p5.2
$SPDK_BUILD_DIR/scripts/rpc.py construct_vhost_scsi_controller naa.Nvme0n1p6.3
$SPDK_BUILD_DIR/scripts/rpc.py construct_vhost_scsi_controller naa.Nvme0n1p7.3
$SPDK_BUILD_DIR/scripts/rpc.py add_vhost_scsi_lun naa.Nvme0n1p4.2 0 Nvme0n1p8
$SPDK_BUILD_DIR/scripts/rpc.py add_vhost_scsi_lun naa.Nvme0n1p4.2 1 Nvme0n1p9
$SPDK_BUILD_DIR/scripts/rpc.py add_vhost_scsi_lun naa.Nvme0n1p5.2 0 Nvme0n1p10
$SPDK_BUILD_DIR/scripts/rpc.py add_vhost_scsi_lun naa.Nvme0n1p5.2 1 Nvme0n1p11
$SPDK_BUILD_DIR/scripts/rpc.py add_vhost_scsi_lun naa.Nvme0n1p6.3 0 Nvme0n1p12
$SPDK_BUILD_DIR/scripts/rpc.py add_vhost_scsi_lun naa.Nvme0n1p6.3 1 Nvme0n1p13
$SPDK_BUILD_DIR/scripts/rpc.py add_vhost_scsi_lun naa.Nvme0n1p7.3 0 Nvme0n1p14
$SPDK_BUILD_DIR/scripts/rpc.py add_vhost_scsi_lun naa.Nvme0n1p7.3 1 Nvme0n1p15
vms_setup_and_run "0 1 2 3"
vms_prepare "0 1 2 3"
}
function reboot_all_and_prepare() {
vms_reboot_all $1
vms_prepare $1
}
function post_test_case() {
vm_shutdown_all
spdk_vhost_kill
}
gen_config
pre_test_case
$BASE_DIR/scsi_hotattach.sh --fio-bin=$fio_bin &
first_script=$!
$BASE_DIR/scsi_hotdetach.sh --fio-bin=$fio_bin &
second_script=$!
wait $first_script
wait $second_script
post_test_case

View File

@ -0,0 +1,86 @@
#Vhost hotattach and hotdetach test plan
## Objective
The purpose of these tests is to verify that SPDK vhost remains stable during
hot-attach and hot-detach operations performed on SCSI controllers devices.
Hot-attach is a scenario where a device is added to controller already in use by
guest VM, while in hot-detach device is removed from controller when already in use.
## Test Cases Description
1. FIO I/O traffic is run during hot-attach and detach operations.
By default FIO uses default_integrity*.job config files located in
test/vhost/hotfeatures/fio_jobs directory.
2. FIO mode of operation in random write (randwrite) with verification enabled
which results in also performing read operations.
3. Test case descriptions below contain manual steps for testing.
Automated tests are located in test/vhost/hotfeatures.
### Hotattach, Hotdetach Test Cases prerequisites
1. Run vhost with 8 empty controllers. Prepare 16 nvme disks.
If you don't have 16 disks use split.
2. In test cases fio status is checked after every run if there are any errors.
### Hotattach Test Cases prerequisites
1. Run vms, first with ctrlr-1 and ctrlr-2 and second one with ctrlr-3 and ctrlr-4.
## Test Case 1
1. Attach NVMe to Ctrlr 1
2. Run fio integrity on attached device
## Test Case 2
1. Run fio integrity on attached device from test case 1
2. During fio attach another NVMe to Ctrlr 1
3. Run fio integrity on both devices
## Test Case 3
1. Run fio integrity on attached devices from previous test cases
2. During fio attach NVMe to Ctrl2
3. Run fio integrity on all devices
## Test Case 4
2. Run fio integrity on attached device from previous test cases
3. During fio attach NVMe to Ctrl3/VM2
4. Run fio integrity on all devices
5. Reboot VMs
6. Run fio integrity again on all devices
### Hotdetach Test Cases prerequisites
1. Run vms, first with ctrlr-5 and ctrlr-6 and second with ctrlr-7 and ctrlr-8.
## Test Case 1
1. Run fio on all devices
2. Detatch NVMe from Ctrl5 during fio
3. Check vhost or VMs did not crash
4. Check that detatched device is gone from VM
5. Check that fio job run on detached device stopped and failed
## Test Case 2
1. Attach NVMe to Ctrlr 5
2. Run fio on 1 device from Ctrl 5
3. Detatch NVMe from Ctrl5 during fio traffic
4. Check vhost or VMs did not crash
5. Check that fio job run on detached device stopped and failed
6. Check that detatched device is gone from VM
## Test Case 3
1. Attach NVMe to Ctrlr 5
2. Run fio with integrity on all devices, except one
3. Detatch NVMe without traffic during fio running on other devices
4. Check vhost or VMs did not crash
5. Check that fio jobs did not fail
6. Check that detatched device is gone from VM
## Test Case 4
1. Attach NVMe to Ctrlr 5
2. Run fio on 1 device from Ctrl 5
3. Run separate fio with integrity on all other devices (all VMs)
4. Detatch NVMe from Ctrl1 during fio traffic
5. Check vhost or VMs did not crash
6. Check that fio job run on detached device stopped and failed
7. Check that other fio jobs did not fail
8. Check that detatched device is gone from VM
9. Reboot VMs
10. Check that detatched device is gone from VM
11. Check that all other devices are in place
12. Run fio integrity on all remianing devices

View File

@ -0,0 +1,7 @@
[Global]
[Rpc]
Enable Yes
[Ioat]
Disable Yes

View File

@ -61,11 +61,21 @@ case $param in
echo Running lvol integrity suite...
./lvol/lvol_test.sh --fio-bin=/home/sys_sgsw/fio_ubuntu \
--ctrl-type=vhost_scsi
;;
;;
-ilb|--integrity-lvol-blk)
echo Running lvol integrity suite...
./lvol/lvol_test.sh --fio-bin=/home/sys_sgsw/fio_ubuntu \
--ctrl-type=vhost_blk
;;
-hp|--hotplug)
echo Running hotplug tests suite...
./hotplug/scsi_hotplug.sh --fio-bin=/home/sys_sgsw/fio_ubuntu \
--vm=0,/home/sys_sgsw/vhost_vm_image.qcow2,Nvme0n1p0:Nvme0n1p1 \
--vm=1,/home/sys_sgsw/vhost_vm_image.qcow2,Nvme0n1p2:Nvme0n1p3 \
--vm=2,/home/sys_sgsw/vhost_vm_image.qcow2,Nvme0n1p4:Nvme0n1p5 \
--vm=3,/home/sys_sgsw/vhost_vm_image.qcow2,Nvme0n1p6:Nvme0n1p7 \
--test-type=spdk_vhost_scsi \
--fio-jobs=$WORKDIR/hotplug/fio_jobs/default_integrity.job -x
;;
-h|--help)
echo "-i |--integrity for running an integrity test with vhost scsi"
@ -76,6 +86,7 @@ case $param in
echo "-pb|--performance-blk for running a performance test with vhost blk"
echo "-ils|--integrity-lvol-scsi for running an integrity test with vhost scsi and lvol backends"
echo "-ilb|--integrity-lvol-blk for running an integrity test with vhost blk and lvol backends"
echo "-hp|--hotplug for running hotplug tests"
echo "-h |--help prints this message"
;;
*)