test/vhost: introduce notice, warning, error and fail message
helpers So we can better trace what failed. notice - just echo to stdout warning - just echo to stderr error - echo to stderr and return false, so trap ERR can catch this fail - like err but call 'exit 1' at the end so if no trap ERR is used it will exit anyway. Change-Id: I5c7b3682fd6c0d81c07c58a5ec965155c7593407 Signed-off-by: Pawel Wodkowski <pawelx.wodkowski@intel.com> Reviewed-on: https://review.gerrithub.io/392218 Reviewed-by: Karol Latecki <karol.latecki@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
This commit is contained in:
parent
df6e317ed9
commit
575a291fa3
@ -1,5 +1,7 @@
|
||||
set -e
|
||||
|
||||
: ${SPDK_VHOST_VERBOSE=false}
|
||||
|
||||
BASE_DIR=$(readlink -f $(dirname $0))
|
||||
|
||||
# Default running dir -> spdk/..
|
||||
@ -10,10 +12,53 @@ SPDK_BUILD_DIR=$BASE_DIR/../../../
|
||||
|
||||
SPDK_VHOST_SCSI_TEST_DIR=$TEST_DIR/vhost
|
||||
|
||||
function message()
|
||||
{
|
||||
if ! $SPDK_VHOST_VERBOSE; then
|
||||
local verbose_out=""
|
||||
elif [[ ${FUNCNAME[2]} == "source" ]]; then
|
||||
local verbose_out=" (file $(basename ${BASH_SOURCE[1]}):${BASH_LINENO[1]})"
|
||||
else
|
||||
local verbose_out=" (function ${FUNCNAME[2]}:${BASH_LINENO[1]})"
|
||||
fi
|
||||
|
||||
local msg_type="$1"
|
||||
shift
|
||||
echo -e "${msg_type}${verbose_out}: $@"
|
||||
}
|
||||
|
||||
function fail()
|
||||
{
|
||||
echo "===========" >&2
|
||||
message "FAIL" "$@" >&2
|
||||
echo "===========" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
function error()
|
||||
{
|
||||
echo "===========" >&2
|
||||
message "ERROR" "$@" >&2
|
||||
echo "===========" >&2
|
||||
# Don't 'return 1' since the stack trace will be incomplete (why?) missing upper command.
|
||||
false
|
||||
}
|
||||
|
||||
function warning()
|
||||
{
|
||||
message "WARN" "$@" >&2
|
||||
}
|
||||
|
||||
function notice()
|
||||
{
|
||||
message "INFO" "$@"
|
||||
}
|
||||
|
||||
|
||||
# SSH key file
|
||||
: ${SPDK_VHOST_SSH_KEY_FILE="$HOME/.ssh/spdk_vhost_id_rsa"}
|
||||
if [[ ! -e "$SPDK_VHOST_SSH_KEY_FILE" ]]; then
|
||||
echo "Could not find SSH key file $SPDK_VHOST_SSH_KEY_FILE"
|
||||
error "Could not find SSH key file $SPDK_VHOST_SSH_KEY_FILE"
|
||||
exit 1
|
||||
fi
|
||||
echo "Using SSH key file $SPDK_VHOST_SSH_KEY_FILE"
|
||||
@ -39,15 +84,6 @@ else
|
||||
set +x
|
||||
fi
|
||||
|
||||
function error()
|
||||
{
|
||||
echo "==========="
|
||||
echo -e "ERROR: $@"
|
||||
echo "==========="
|
||||
# Don't 'return 1' since the stack trace will be incomplete (why?) missing upper command.
|
||||
false
|
||||
}
|
||||
|
||||
function spdk_vhost_run()
|
||||
{
|
||||
local vhost_conf_path="$1"
|
||||
@ -57,7 +93,7 @@ function spdk_vhost_run()
|
||||
local vhost_socket="$SPDK_VHOST_SCSI_TEST_DIR/usvhost"
|
||||
local vhost_conf_template="$vhost_conf_path/vhost.conf.in"
|
||||
local vhost_conf_file="$vhost_conf_path/vhost.conf"
|
||||
echo "INFO: starting vhost app in background"
|
||||
notice "starting vhost app in background"
|
||||
[[ -r "$vhost_pid_file" ]] && spdk_vhost_kill
|
||||
[[ -d $SPDK_VHOST_SCSI_TEST_DIR ]] && rm -f $SPDK_VHOST_SCSI_TEST_DIR/*
|
||||
mkdir -p $SPDK_VHOST_SCSI_TEST_DIR
|
||||
@ -77,18 +113,18 @@ function spdk_vhost_run()
|
||||
|
||||
local cmd="$vhost_app -m $vhost_reactor_mask -p $vhost_master_core -c $vhost_conf_file"
|
||||
|
||||
echo "INFO: Loging to: $vhost_log_file"
|
||||
echo "INFO: Config file: $vhost_conf_file"
|
||||
echo "INFO: Socket: $vhost_socket"
|
||||
echo "INFO: Command: $cmd"
|
||||
notice "Loging to: $vhost_log_file"
|
||||
notice "Config file: $vhost_conf_file"
|
||||
notice "Socket: $vhost_socket"
|
||||
notice "Command: $cmd"
|
||||
|
||||
cd $SPDK_VHOST_SCSI_TEST_DIR; $cmd &
|
||||
vhost_pid=$!
|
||||
echo $vhost_pid > $vhost_pid_file
|
||||
|
||||
echo "INFO: waiting for app to run..."
|
||||
notice "waiting for app to run..."
|
||||
waitforlisten "$vhost_pid"
|
||||
echo "INFO: vhost started - pid=$vhost_pid"
|
||||
notice "vhost started - pid=$vhost_pid"
|
||||
|
||||
rm $vhost_conf_file
|
||||
}
|
||||
@ -98,15 +134,15 @@ function spdk_vhost_kill()
|
||||
local vhost_pid_file="$SPDK_VHOST_SCSI_TEST_DIR/vhost.pid"
|
||||
|
||||
if [[ ! -r $vhost_pid_file ]]; then
|
||||
echo "WARN: no vhost pid file found"
|
||||
warning "no vhost pid file found"
|
||||
return 0
|
||||
fi
|
||||
|
||||
local vhost_pid="$(cat $vhost_pid_file)"
|
||||
echo "INFO: killing vhost (PID $vhost_pid) app"
|
||||
notice "killing vhost (PID $vhost_pid) app"
|
||||
|
||||
if /bin/kill -INT $vhost_pid >/dev/null; then
|
||||
echo "INFO: sent SIGINT to vhost app - waiting 60 seconds to exit"
|
||||
notice "sent SIGINT to vhost app - waiting 60 seconds to exit"
|
||||
for ((i=0; i<60; i++)); do
|
||||
if /bin/kill -0 $vhost_pid; then
|
||||
echo "."
|
||||
@ -116,7 +152,7 @@ function spdk_vhost_kill()
|
||||
fi
|
||||
done
|
||||
if /bin/kill -0 $vhost_pid; then
|
||||
echo "ERROR: vhost was NOT killed - sending SIGABRT"
|
||||
error "ERROR: vhost was NOT killed - sending SIGABRT"
|
||||
/bin/kill -ABRT $vhost_pid
|
||||
rm $vhost_pid_file
|
||||
return 1
|
||||
@ -125,7 +161,7 @@ function spdk_vhost_kill()
|
||||
error "vhost NOT killed - you need to kill it manually"
|
||||
return 1
|
||||
else
|
||||
echo "INFO: vhost was no running"
|
||||
notice "vhost was no running"
|
||||
fi
|
||||
|
||||
rm $vhost_pid_file
|
||||
@ -139,7 +175,7 @@ function assert_number()
|
||||
{
|
||||
[[ "$1" =~ [0-9]+ ]] && return 0
|
||||
|
||||
echo "${FUNCNAME[1]}() - ${BASH_LINENO[1]}: ERROR Invalid or missing paramter: need number but got '$1'" > /dev/stderr
|
||||
error "Invalid or missing paramter: need number but got '$1'"
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -150,7 +186,7 @@ function vm_num_is_valid()
|
||||
{
|
||||
[[ "$1" =~ ^[0-9]+$ ]] && return 0
|
||||
|
||||
echo "${FUNCNAME[1]}() - ${BASH_LINENO[1]}: ERROR Invalid or missing paramter: vm number '$1'" > /dev/stderr
|
||||
error "Invalid or missing paramter: vm number '$1'"
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -243,7 +279,7 @@ function vm_is_running()
|
||||
return 0
|
||||
else
|
||||
if [[ $EUID -ne 0 ]]; then
|
||||
echo "WARNING: not root - assuming we running since can't be checked"
|
||||
warning "not root - assuming VM running since can't be checked"
|
||||
return 0
|
||||
fi
|
||||
|
||||
@ -286,16 +322,16 @@ function vm_shutdown()
|
||||
fi
|
||||
|
||||
if ! vm_is_running $1; then
|
||||
echo "INFO: VM$1 ($vm_dir) is not running"
|
||||
notice "VM$1 ($vm_dir) is not running"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Temporarily disabling exit flag for next ssh command, since it will
|
||||
# "fail" due to shutdown
|
||||
echo "Shutting down virtual machine $vm_dir"
|
||||
notice "Shutting down virtual machine $vm_dir"
|
||||
set +e
|
||||
vm_ssh $1 "nohup sh -c 'shutdown -h -P now'" || true
|
||||
echo "INFO: VM$1 is shutting down - wait a while to complete"
|
||||
notice "VM$1 is shutting down - wait a while to complete"
|
||||
set -e
|
||||
}
|
||||
|
||||
@ -308,16 +344,15 @@ function vm_kill()
|
||||
local vm_dir="$VM_BASE_DIR/$1"
|
||||
|
||||
if [[ ! -r $vm_dir/qemu.pid ]]; then
|
||||
#echo "WARN: VM$1 pid not found - not killing"
|
||||
return 0
|
||||
fi
|
||||
|
||||
local vm_pid="$(cat $vm_dir/qemu.pid)"
|
||||
|
||||
echo "Killing virtual machine $vm_dir (pid=$vm_pid)"
|
||||
notice "Killing virtual machine $vm_dir (pid=$vm_pid)"
|
||||
# First kill should fail, second one must fail
|
||||
if /bin/kill $vm_pid; then
|
||||
echo "INFO: process $vm_pid killed"
|
||||
notice "process $vm_pid killed"
|
||||
rm $vm_dir/qemu.pid
|
||||
elif vm_is_running $1; then
|
||||
error "Process $vm_pid NOT killed"
|
||||
@ -345,7 +380,7 @@ function vm_shutdown_all()
|
||||
vm_shutdown $(basename $vm)
|
||||
done
|
||||
|
||||
echo "INFO: Waiting for VMs to shutdown..."
|
||||
notice "Waiting for VMs to shutdown..."
|
||||
timeo=10
|
||||
while [[ $timeo -gt 0 ]]; do
|
||||
all_vms_down=1
|
||||
@ -357,7 +392,7 @@ function vm_shutdown_all()
|
||||
done
|
||||
|
||||
if [[ $all_vms_down == 1 ]]; then
|
||||
echo "INFO: All VMs successfully shut down"
|
||||
notice "All VMs successfully shut down"
|
||||
shopt -u nullglob
|
||||
return 0
|
||||
fi
|
||||
@ -408,7 +443,8 @@ function vm_setup()
|
||||
|
||||
vm_num_is_valid $vm_num || return 1
|
||||
local vm_dir="$VM_BASE_DIR/$vm_num"
|
||||
[[ -d $vm_dir ]] && echo "WARNING: removing existing VM in '$vm_dir'"
|
||||
[[ -d $vm_dir ]] && warning "removing existing VM in '$vm_dir'"
|
||||
# FIXME: why this is just echo???
|
||||
echo "rm -rf $vm_dir"
|
||||
else
|
||||
local vm_dir=""
|
||||
@ -425,7 +461,7 @@ function vm_setup()
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "INFO: Creating new VM in $vm_dir"
|
||||
notice "Creating new VM in $vm_dir"
|
||||
mkdir -p $vm_dir
|
||||
if [[ ! -r $os ]]; then
|
||||
error "file not found: $os"
|
||||
@ -446,7 +482,7 @@ function vm_setup()
|
||||
|
||||
local task_mask=${!qemu_mask_param}
|
||||
|
||||
echo "INFO: TASK MASK: $task_mask"
|
||||
notice "TASK MASK: $task_mask"
|
||||
local cmd="taskset -a $task_mask $INSTALL_DIR/bin/qemu-system-x86_64 ${eol}"
|
||||
local vm_socket_offset=$(( 10000 + 100 * vm_num ))
|
||||
|
||||
@ -466,7 +502,7 @@ function vm_setup()
|
||||
|
||||
#-cpu host
|
||||
local node_num=${!qemu_numa_node_param}
|
||||
echo "INFO: NUMA NODE: $node_num"
|
||||
notice "NUMA NODE: $node_num"
|
||||
cmd+="-m 1024 --enable-kvm -smp $cpu_num -vga std -vnc :$vnc_socket -daemonize -snapshot ${eol}"
|
||||
cmd+="-object memory-backend-file,id=mem,size=1G,mem-path=/dev/hugepages,share=on,prealloc=yes,host-nodes=$node_num,policy=bind ${eol}"
|
||||
cmd+="-numa node,memdev=mem ${eol}"
|
||||
@ -506,10 +542,10 @@ function vm_setup()
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "INFO: Creating Virtio disc $raw_disk"
|
||||
notice "Creating Virtio disc $raw_disk"
|
||||
dd if=/dev/zero of=$raw_disk bs=1024k count=10240
|
||||
else
|
||||
echo "INFO: Using existing image $raw_disk"
|
||||
notice "Using existing image $raw_disk"
|
||||
fi
|
||||
|
||||
cmd+="-device virtio-scsi-pci ${eol}"
|
||||
@ -517,7 +553,7 @@ function vm_setup()
|
||||
cmd+="-drive if=none,id=hd$i,file=$raw_disk,format=raw$raw_cache ${eol}"
|
||||
;;
|
||||
spdk_vhost_scsi)
|
||||
echo "INFO: using socket $SPDK_VHOST_SCSI_TEST_DIR/naa.$disk.$vm_num"
|
||||
notice "using socket $SPDK_VHOST_SCSI_TEST_DIR/naa.$disk.$vm_num"
|
||||
cmd+="-chardev socket,id=char_$disk,path=$SPDK_VHOST_SCSI_TEST_DIR/naa.$disk.$vm_num ${eol}"
|
||||
cmd+="-device vhost-user-scsi-pci,id=scsi_$disk,num_queues=$cpu_num,chardev=char_$disk ${eol}"
|
||||
;;
|
||||
@ -528,7 +564,7 @@ function vm_setup()
|
||||
size="20G"
|
||||
fi
|
||||
disk=${disk%%_*}
|
||||
echo "INFO: using socket $SPDK_VHOST_SCSI_TEST_DIR/naa.$disk.$vm_num"
|
||||
notice "using socket $SPDK_VHOST_SCSI_TEST_DIR/naa.$disk.$vm_num"
|
||||
cmd+="-chardev socket,id=char_$disk,path=$SPDK_VHOST_SCSI_TEST_DIR/naa.$disk.$vm_num ${eol}"
|
||||
cmd+="-device vhost-user-blk-pci,num_queues=$cpu_num,chardev=char_$disk,"
|
||||
cmd+="logical_block_size=4096,size=$size ${eol}"
|
||||
@ -541,7 +577,7 @@ function vm_setup()
|
||||
error "$disk_type - disk(wnn)=$disk does not look like WNN number"
|
||||
return 1
|
||||
fi
|
||||
echo "Using kernel vhost disk wwn=$disk"
|
||||
notice "Using kernel vhost disk wwn=$disk"
|
||||
cmd+=" -device vhost-scsi-pci,wwpn=$disk ${eol}"
|
||||
;;
|
||||
*)
|
||||
@ -554,7 +590,7 @@ function vm_setup()
|
||||
# remove last $eol
|
||||
cmd="${cmd%\\\\\\n }"
|
||||
|
||||
echo "Saving to $vm_dir/run.sh:"
|
||||
notice "Saving to $vm_dir/run.sh:"
|
||||
(
|
||||
echo '#!/bin/bash'
|
||||
echo 'if [[ $EUID -ne 0 ]]; then '
|
||||
@ -598,7 +634,7 @@ function vm_run()
|
||||
case "$optchar" in
|
||||
a) run_all=true ;;
|
||||
*)
|
||||
echo "vm_run Unknown param $OPTARG"
|
||||
error "Unknown param $OPTARG"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
@ -623,11 +659,11 @@ function vm_run()
|
||||
|
||||
for vm in $vms_to_run; do
|
||||
if vm_is_running $(basename $vm); then
|
||||
echo "WARNING: VM$(basename $vm) ($vm) already running"
|
||||
warning "VM$(basename $vm) ($vm) already running"
|
||||
continue
|
||||
fi
|
||||
|
||||
echo "INFO: running $vm/run.sh"
|
||||
notice "running $vm/run.sh"
|
||||
if ! $vm/run.sh; then
|
||||
error "FAILED to run vm $vm"
|
||||
return 1
|
||||
@ -646,7 +682,7 @@ function vm_wait_for_boot()
|
||||
[[ $timeout_time -lt 10 ]] && timeout_time=10
|
||||
local timeout_time=$(date -d "+$timeout_time seconds" +%s)
|
||||
|
||||
echo "Waiting for VMs to boot"
|
||||
notice "Waiting for VMs to boot"
|
||||
shift
|
||||
if [[ "$@" == "" ]]; then
|
||||
local vms_to_check="$VM_BASE_DIR/[0-9]*"
|
||||
@ -660,26 +696,26 @@ function vm_wait_for_boot()
|
||||
for vm in $vms_to_check; do
|
||||
local vm_num=$(basename $vm)
|
||||
local i=0
|
||||
echo "INFO: waiting for VM$vm_num ($vm)"
|
||||
notice "waiting for VM$vm_num ($vm)"
|
||||
while ! vm_os_booted $vm_num; do
|
||||
if ! vm_is_running $vm_num; then
|
||||
echo
|
||||
echo "ERROR: VM $vm_num is not running"
|
||||
echo "================"
|
||||
echo "QEMU LOG:"
|
||||
|
||||
warning "VM $vm_num is not running"
|
||||
warning "================"
|
||||
warning "QEMU LOG:"
|
||||
if [[ -r $vm/qemu.log ]]; then
|
||||
cat $vm/qemu.log
|
||||
else
|
||||
echo "LOG not found"
|
||||
warning "LOG not found"
|
||||
fi
|
||||
|
||||
echo "VM LOG:"
|
||||
warning "VM LOG:"
|
||||
if [[ -r $vm/serial.log ]]; then
|
||||
cat $vm/serial.log
|
||||
else
|
||||
echo "LOG not found"
|
||||
warning "LOG not found"
|
||||
fi
|
||||
echo "================"
|
||||
warning "================"
|
||||
return 1
|
||||
fi
|
||||
|
||||
@ -695,10 +731,10 @@ function vm_wait_for_boot()
|
||||
sleep 1
|
||||
done
|
||||
echo ""
|
||||
echo "INFO: VM$vm_num ready"
|
||||
notice "VM$vm_num ready"
|
||||
done
|
||||
|
||||
echo "INFO: all VMs ready"
|
||||
notice "all VMs ready"
|
||||
return 0
|
||||
}
|
||||
|
||||
@ -712,16 +748,16 @@ function vm_start_fio_server()
|
||||
case "$OPTARG" in
|
||||
fio-bin=*) local fio_bin="${OPTARG#*=}" ;;
|
||||
readonly) local readonly="--readonly" ;;
|
||||
*) echo "Invalid argument '$OPTARG'" && return 1;;
|
||||
*) error "Invalid argument '$OPTARG'" && return 1;;
|
||||
esac
|
||||
;;
|
||||
*) echo "Invalid argument '$OPTARG'" && return 1;;
|
||||
*) error "Invalid argument '$OPTARG'" && return 1;;
|
||||
esac
|
||||
done
|
||||
|
||||
shift $(( OPTIND - 1 ))
|
||||
for vm_num in $@; do
|
||||
echo "INFO: Starting fio server on VM$vm_num"
|
||||
notice "Starting fio server on VM$vm_num"
|
||||
if [[ $fio_bin != "" ]]; then
|
||||
cat $fio_bin | vm_ssh $vm_num 'cat > /root/fio; chmod +x /root/fio'
|
||||
vm_ssh $vm_num /root/fio $readonly --eta=never --server --daemonize=/root/fio.pid
|
||||
@ -757,7 +793,7 @@ function vm_check_scsi_location()
|
||||
function vm_reset_scsi_devices()
|
||||
{
|
||||
for disk in "${@:2}"; do
|
||||
echo "INFO: VM$1 Performing device reset on disk $disk"
|
||||
notice "VM$1 Performing device reset on disk $disk"
|
||||
vm_ssh $1 sg_reset /dev/$disk -vNd
|
||||
done
|
||||
}
|
||||
@ -819,14 +855,14 @@ function run_fio()
|
||||
#
|
||||
function at_app_exit()
|
||||
{
|
||||
echo "INFO: APP EXITING"
|
||||
echo "INFO: killing all VMs"
|
||||
notice "APP EXITING"
|
||||
notice "killing all VMs"
|
||||
vm_kill_all
|
||||
# Kill vhost application
|
||||
echo "INFO: killing vhost app"
|
||||
notice "killing vhost app"
|
||||
spdk_vhost_kill
|
||||
|
||||
echo "INFO: EXIT DONE"
|
||||
notice "EXIT DONE"
|
||||
}
|
||||
|
||||
function error_exit()
|
||||
@ -834,7 +870,7 @@ function error_exit()
|
||||
trap - ERR
|
||||
print_backtrace
|
||||
set +e
|
||||
echo "Error on $1 $2"
|
||||
error "Error on $1 $2"
|
||||
|
||||
at_app_exit
|
||||
exit 1
|
||||
|
@ -40,12 +40,11 @@ while getopts 'xh-:' optchar; do
|
||||
done
|
||||
|
||||
if [[ $EUID -ne 0 ]]; then
|
||||
echo "Go away user come back as root"
|
||||
exit 1
|
||||
fail "Go away user come back as root"
|
||||
fi
|
||||
|
||||
echo "INFO: $0"
|
||||
echo
|
||||
notice "$0"
|
||||
notice ""
|
||||
|
||||
. $COMMON_DIR/common.sh
|
||||
|
||||
|
@ -36,14 +36,13 @@ done
|
||||
. $COMMON_DIR/common.sh
|
||||
|
||||
if [[ $EUID -ne 0 ]]; then
|
||||
echo "Go away user come back as root"
|
||||
exit 1
|
||||
fail "Go away user come back as root"
|
||||
fi
|
||||
|
||||
if $run_all; then
|
||||
vm_run -a
|
||||
else
|
||||
shift $((OPTIND-1))
|
||||
echo "INFO: running VMs: $@"
|
||||
notice "running VMs: $@"
|
||||
vm_run "$@"
|
||||
fi
|
||||
|
@ -70,8 +70,7 @@ done
|
||||
[[ -z "$os" ]] && os="$TEST_DIR/debian.qcow2"
|
||||
[[ $test_type =~ "spdk_vhost" ]] && [[ -z "$disk" ]] && disk="$SPDK_VHOST_SCSI_TEST_DIR/usvhost"
|
||||
if [[ $test_type == "kernel_vhost" ]] && [[ -z "$disk" ]]; then
|
||||
echo "ERROR: for $test_type '--disk=WWN' is mandatory"
|
||||
exit 1
|
||||
fail "for $test_type '--disk=WWN' is mandatory"
|
||||
fi
|
||||
|
||||
vm_setup \
|
||||
|
@ -45,22 +45,22 @@ fi
|
||||
|
||||
if $all; then
|
||||
if do_kill; then
|
||||
echo 'INFO: killing all VMs'
|
||||
notice "killing all VMs"
|
||||
vm_kill_all
|
||||
else
|
||||
echo 'INFO: shutting down all VMs'
|
||||
notice "shutting down all VMs"
|
||||
vm_shutdown_all
|
||||
fi
|
||||
else
|
||||
shift $((OPTIND-1))
|
||||
|
||||
if do_kill; then
|
||||
echo 'INFO: killing VMs: $@'
|
||||
notice "INFO: killing VMs: $@"
|
||||
for vm in $@; do
|
||||
vm_kill $vm
|
||||
done
|
||||
else
|
||||
echo 'INFO: shutting down all VMs'
|
||||
notice "shutting down all VMs"
|
||||
vm_shutdown_all
|
||||
fi
|
||||
fi
|
||||
|
@ -48,10 +48,9 @@ fi
|
||||
if $boot_wait; then
|
||||
while ! vm_os_booted $vm_num; do
|
||||
if ! vm_is_running $vm_num; then
|
||||
echo "ERROR: VM$vm_num is not running"
|
||||
exit 1
|
||||
fail "VM$vm_num is not running"
|
||||
fi
|
||||
echo "INFO: waiting for VM$vm_num to boot"
|
||||
notice "waiting for VM$vm_num to boot"
|
||||
sleep 1
|
||||
done
|
||||
fi
|
||||
|
@ -70,8 +70,7 @@ done
|
||||
shift $(( OPTIND - 1 ))
|
||||
|
||||
if [[ ! -r "$fio_job" ]]; then
|
||||
echo "ERROR: no fio job file specified"
|
||||
exit 1
|
||||
fail "no fio job file specified"
|
||||
fi
|
||||
|
||||
. $COMMON_DIR/common.sh
|
||||
@ -81,18 +80,18 @@ trap 'error_exit "${FUNCNAME}" "${LINENO}"' ERR
|
||||
vm_kill_all
|
||||
|
||||
if [[ $test_type =~ "spdk_vhost" ]]; then
|
||||
echo "==============="
|
||||
echo ""
|
||||
echo "INFO: running SPDK"
|
||||
echo ""
|
||||
notice "==============="
|
||||
notice ""
|
||||
notice "running SPDK"
|
||||
notice ""
|
||||
$COMMON_DIR/run_vhost.sh $x --work-dir=$TEST_DIR --conf-dir=$BASE_DIR
|
||||
echo
|
||||
notice ""
|
||||
fi
|
||||
|
||||
echo "==============="
|
||||
echo ""
|
||||
echo "Setting up VM"
|
||||
echo ""
|
||||
notice "==============="
|
||||
notice ""
|
||||
notice "Setting up VM"
|
||||
notice ""
|
||||
|
||||
rpc_py="python $SPDK_BUILD_DIR/scripts/rpc.py "
|
||||
|
||||
@ -100,15 +99,13 @@ for vm_conf in ${vms[@]}; do
|
||||
IFS=',' read -ra conf <<< "$vm_conf"
|
||||
setup_cmd="$COMMON_DIR/vm_setup.sh $x --work-dir=$TEST_DIR --test-type=$test_type"
|
||||
if [[ x"${conf[0]}" == x"" ]] || ! assert_number ${conf[0]}; then
|
||||
echo "ERROR: invalid VM configuration syntax $vm_conf"
|
||||
exit 1;
|
||||
fail "invalid VM configuration syntax $vm_conf"
|
||||
fi
|
||||
|
||||
# Sanity check if VM is not defined twice
|
||||
for vm_num in $used_vms; do
|
||||
if [[ $vm_num -eq ${conf[0]} ]]; then
|
||||
echo "ERROR: VM$vm_num defined more than twice ( $(printf "'%s' " "${vms[@]}"))!"
|
||||
exit 1
|
||||
fail "VM$vm_num defined more than twice ( $(printf "'%s' " "${vms[@]}"))!"
|
||||
fi
|
||||
done
|
||||
|
||||
@ -119,89 +116,77 @@ for vm_conf in ${vms[@]}; do
|
||||
|
||||
if [[ $test_type =~ "spdk_vhost" ]]; then
|
||||
|
||||
echo "INFO: Adding device via RPC ..."
|
||||
echo ""
|
||||
notice "Adding device via RPC ..."
|
||||
|
||||
while IFS=':' read -ra disks; do
|
||||
for disk in "${disks[@]}"; do
|
||||
if [[ "$test_type" == "spdk_vhost_blk" ]]; then
|
||||
disk=${disk%%_*}
|
||||
echo "INFO: Creating vhost block controller naa.$disk.${conf[0]} with device $disk"
|
||||
notice "Creating vhost block controller naa.$disk.${conf[0]} with device $disk"
|
||||
$rpc_py construct_vhost_blk_controller naa.$disk.${conf[0]} $disk
|
||||
else
|
||||
echo "INFO: Trying to remove nonexistent controller"
|
||||
notice "Trying to remove nonexistent controller"
|
||||
if $rpc_py remove_vhost_controller unk0 > /dev/null; then
|
||||
echo "ERROR: Removing nonexistent controller succeeded, but it shouldn't"
|
||||
false
|
||||
error "Removing nonexistent controller succeeded, but it shouldn't"
|
||||
fi
|
||||
echo "INFO: Creating controller naa.$disk.${conf[0]}"
|
||||
notice "Creating controller naa.$disk.${conf[0]}"
|
||||
$rpc_py construct_vhost_scsi_controller naa.$disk.${conf[0]}
|
||||
|
||||
echo "INFO: Adding initial device (0) to naa.$disk.${conf[0]}"
|
||||
notice "Adding initial device (0) to naa.$disk.${conf[0]}"
|
||||
$rpc_py add_vhost_scsi_lun naa.$disk.${conf[0]} 0 $disk
|
||||
|
||||
echo "INFO: Trying to remove nonexistent device on existing controller"
|
||||
notice "Trying to remove nonexistent device on existing controller"
|
||||
if $rpc_py remove_vhost_scsi_target naa.$disk.${conf[0]} 1 > /dev/null; then
|
||||
echo "ERROR: Removing nonexistent device (1) from controller naa.$disk.${conf[0]} succeeded, but it shouldn't"
|
||||
false
|
||||
error "Removing nonexistent device (1) from controller naa.$disk.${conf[0]} succeeded, but it shouldn't"
|
||||
fi
|
||||
|
||||
echo "INFO: Trying to remove existing device from a controller"
|
||||
notice "Trying to remove existing device from a controller"
|
||||
$rpc_py remove_vhost_scsi_target naa.$disk.${conf[0]} 0
|
||||
|
||||
echo "INFO: Trying to remove a just-deleted device from a controller again"
|
||||
notice "Trying to remove a just-deleted device from a controller again"
|
||||
if $rpc_py remove_vhost_scsi_target naa.$disk.${conf[0]} 0 > /dev/null; then
|
||||
echo "ERROR: Removing device 0 from controller naa.$disk.${conf[0]} succeeded, but it shouldn't"
|
||||
false
|
||||
error "Removing device 0 from controller naa.$disk.${conf[0]} succeeded, but it shouldn't"
|
||||
fi
|
||||
|
||||
echo "INFO: Re-adding device 0 to naa.$disk.${conf[0]}"
|
||||
notice "Re-adding device 0 to naa.$disk.${conf[0]}"
|
||||
$rpc_py add_vhost_scsi_lun naa.$disk.${conf[0]} 0 $disk
|
||||
fi
|
||||
done
|
||||
|
||||
echo "INFO: Trying to create scsi controller with incorrect cpumask"
|
||||
notice "Trying to create scsi controller with incorrect cpumask"
|
||||
if $rpc_py construct_vhost_scsi_controller vhost.invalid.cpumask --cpumask 0x2; then
|
||||
echo "ERROR: Creating scsi controller with incorrect cpumask succeeded, but it shouldn't"
|
||||
false
|
||||
error "Creating scsi controller with incorrect cpumask succeeded, but it shouldn't"
|
||||
fi
|
||||
|
||||
echo "INFO: Trying to remove device from nonexistent scsi controller"
|
||||
notice "Trying to remove device from nonexistent scsi controller"
|
||||
if $rpc_py remove_vhost_scsi_target vhost.nonexistent.name 0; then
|
||||
echo "ERROR: Removing device from nonexistent scsi controller succeeded, but it shouldn't"
|
||||
false
|
||||
error "Removing device from nonexistent scsi controller succeeded, but it shouldn't"
|
||||
fi
|
||||
|
||||
echo "INFO: Trying to add device to nonexistent scsi controller"
|
||||
notice "Trying to add device to nonexistent scsi controller"
|
||||
if $rpc_py add_vhost_scsi_lun vhost.nonexistent.name 0 Malloc0; then
|
||||
echo "ERROR: Adding device to nonexistent scsi controller succeeded, but it shouldn't"
|
||||
false
|
||||
error "Adding device to nonexistent scsi controller succeeded, but it shouldn't"
|
||||
fi
|
||||
|
||||
echo "INFO: Trying to create scsi controller with incorrect name"
|
||||
notice "Trying to create scsi controller with incorrect name"
|
||||
if $rpc_py construct_vhost_scsi_controller .; then
|
||||
echo "ERROR: Creating scsi controller with incorrect name succeeded, but it shouldn't"
|
||||
false
|
||||
error "Creating scsi controller with incorrect name succeeded, but it shouldn't"
|
||||
fi
|
||||
|
||||
echo "INFO: Trying to create block controller with incorrect cpumask"
|
||||
notice "Trying to create block controller with incorrect cpumask"
|
||||
if $rpc_py construct_vhost_blk_controller vhost.invalid.cpumask Malloc0 --cpumask 0x2; then
|
||||
echo "ERROR: Creating block controller with incorrect cpumask succeeded, but it shouldn't"
|
||||
false
|
||||
error "Creating block controller with incorrect cpumask succeeded, but it shouldn't"
|
||||
fi
|
||||
|
||||
echo "INFO: Trying to remove nonexistent block controller"
|
||||
notice "Trying to remove nonexistent block controller"
|
||||
if $rpc_py remove_vhost_controller vhost.nonexistent.name; then
|
||||
echo "ERROR: Removing nonexistent block controller succeeded, but it shouldn't"
|
||||
false
|
||||
error "Removing nonexistent block controller succeeded, but it shouldn't"
|
||||
fi
|
||||
|
||||
echo "INFO: Trying to create block controller with incorrect name"
|
||||
notice "Trying to create block controller with incorrect name"
|
||||
if $rpc_py construct_vhost_blk_controller . Malloc0; then
|
||||
echo "ERROR: Creating block controller with incorrect name succeeded, but it shouldn't"
|
||||
false
|
||||
error "Creating block controller with incorrect name succeeded, but it shouldn't"
|
||||
fi
|
||||
|
||||
done <<< "${conf[2]}"
|
||||
unset IFS;
|
||||
$rpc_py get_vhost_controllers
|
||||
@ -218,12 +203,12 @@ if [[ $test_type == "spdk_vhost_scsi" ]]; then
|
||||
IFS=',' read -ra conf <<< "$vm_conf"
|
||||
while IFS=':' read -ra disks; do
|
||||
for disk in "${disks[@]}"; do
|
||||
echo "INFO: Hotdetach test. Trying to remove existing device from a controller naa.$disk.${conf[0]}"
|
||||
notice "Hotdetach test. Trying to remove existing device from a controller naa.$disk.${conf[0]}"
|
||||
$rpc_py remove_vhost_scsi_target naa.$disk.${conf[0]} 0
|
||||
|
||||
sleep 0.1
|
||||
|
||||
echo "INFO: Hotattach test. Re-adding device 0 to naa.$disk.${conf[0]}"
|
||||
notice "Hotattach test. Re-adding device 0 to naa.$disk.${conf[0]}"
|
||||
$rpc_py add_vhost_scsi_lun naa.$disk.${conf[0]} 0 $disk
|
||||
done
|
||||
done <<< "${conf[2]}"
|
||||
@ -233,11 +218,11 @@ fi
|
||||
|
||||
sleep 0.1
|
||||
|
||||
echo "==============="
|
||||
echo ""
|
||||
echo "INFO: Testing..."
|
||||
notice "==============="
|
||||
notice ""
|
||||
notice "Testing..."
|
||||
|
||||
echo "INFO: Running fio jobs ..."
|
||||
notice "Running fio jobs ..."
|
||||
|
||||
# Check if all VM have disk in tha same location
|
||||
DISK=""
|
||||
@ -249,7 +234,7 @@ for vm_num in $used_vms; do
|
||||
qemu_mask_param="VM_${vm_num}_qemu_mask"
|
||||
|
||||
host_name="VM-$vm_num-${!qemu_mask_param}"
|
||||
echo "INFO: Setting up hostname: $host_name"
|
||||
notice "Setting up hostname: $host_name"
|
||||
vm_ssh $vm_num "hostname $host_name"
|
||||
vm_start_fio_server $fio_bin $readonly $vm_num
|
||||
|
||||
@ -279,21 +264,21 @@ if [[ "$test_type" == "spdk_vhost_scsi" ]]; then
|
||||
fi
|
||||
|
||||
if ! $no_shutdown; then
|
||||
echo "==============="
|
||||
echo "INFO: APP EXITING"
|
||||
echo "INFO: killing all VMs"
|
||||
notice "==============="
|
||||
notice "APP EXITING"
|
||||
notice "killing all VMs"
|
||||
vm_shutdown_all
|
||||
echo "INFO: waiting 2 seconds to let all VMs die"
|
||||
notice "waiting 2 seconds to let all VMs die"
|
||||
sleep 2
|
||||
if [[ $test_type =~ "spdk_vhost" ]]; then
|
||||
echo "INFO: Removing vhost devices & controllers via RPC ..."
|
||||
notice "Removing vhost devices & controllers via RPC ..."
|
||||
for vm_conf in ${vms[@]}; do
|
||||
IFS=',' read -ra conf <<< "$vm_conf"
|
||||
|
||||
while IFS=':' read -ra disks; do
|
||||
for disk in "${disks[@]}"; do
|
||||
disk=${disk%%_*}
|
||||
echo "INFO: Removing all vhost devices from controller naa.$disk.${conf[0]}"
|
||||
notice "Removing all vhost devices from controller naa.$disk.${conf[0]}"
|
||||
if [[ "$test_type" == "spdk_vhost_scsi" ]]; then
|
||||
$rpc_py remove_vhost_scsi_target naa.$disk.${conf[0]} 0
|
||||
fi
|
||||
@ -303,16 +288,16 @@ if ! $no_shutdown; then
|
||||
done <<< "${conf[2]}"
|
||||
done
|
||||
fi
|
||||
echo "INFO: Testing done -> shutting down"
|
||||
echo "INFO: killing vhost app"
|
||||
notice "Testing done -> shutting down"
|
||||
notice "killing vhost app"
|
||||
spdk_vhost_kill
|
||||
|
||||
echo "INFO: EXIT DONE"
|
||||
echo "==============="
|
||||
notice "EXIT DONE"
|
||||
notice "==============="
|
||||
else
|
||||
echo "==============="
|
||||
echo
|
||||
echo "INFO: Leaving environment working!"
|
||||
echo ""
|
||||
echo "==============="
|
||||
notice "==============="
|
||||
notice ""
|
||||
notice "Leaving environment working!"
|
||||
notice ""
|
||||
notice "==============="
|
||||
fi
|
||||
|
@ -9,7 +9,6 @@ fio_bin="fio"
|
||||
fio_jobs="$BASE_DIR/fio_jobs/"
|
||||
test_type=spdk_vhost_scsi
|
||||
reuse_vms=false
|
||||
force_build=false
|
||||
vms=()
|
||||
used_vms=""
|
||||
disk_split=""
|
||||
@ -69,23 +68,23 @@ tmp_detach_job=$BASE_DIR/fio_jobs/fio_detach.job.tmp
|
||||
rpc_py="python $SPDK_BUILD_DIR/scripts/rpc.py "
|
||||
|
||||
function print_test_fio_header() {
|
||||
echo "==============="
|
||||
echo ""
|
||||
echo "INFO: Testing..."
|
||||
notice "==============="
|
||||
notice ""
|
||||
notice "Testing..."
|
||||
|
||||
echo "INFO: Running fio jobs ..."
|
||||
notice "Running fio jobs ..."
|
||||
if [ $# -gt 0 ]; then
|
||||
echo $1
|
||||
fi
|
||||
}
|
||||
|
||||
function run_vhost() {
|
||||
echo "==============="
|
||||
echo ""
|
||||
echo "INFO: running SPDK"
|
||||
echo ""
|
||||
notice "==============="
|
||||
notice ""
|
||||
notice "running SPDK"
|
||||
notice ""
|
||||
$BASE_DIR/../common/run_vhost.sh $x --work-dir=$TEST_DIR --conf-dir=$BASE_DIR
|
||||
echo
|
||||
notice ""
|
||||
}
|
||||
|
||||
function vms_setup() {
|
||||
@ -93,15 +92,13 @@ function vms_setup() {
|
||||
IFS=',' read -ra conf <<< "$vm_conf"
|
||||
setup_cmd="$BASE_DIR/../common/vm_setup.sh $x --work-dir=$TEST_DIR --test-type=$test_type"
|
||||
if [[ x"${conf[0]}" == x"" ]] || ! assert_number ${conf[0]}; then
|
||||
echo "ERROR: invalid VM configuration syntax $vm_conf"
|
||||
exit 1;
|
||||
fail "invalid VM configuration syntax $vm_conf"
|
||||
fi
|
||||
|
||||
# Sanity check if VM is not defined twice
|
||||
for vm_num in $used_vms; do
|
||||
if [[ $vm_num -eq ${conf[0]} ]]; then
|
||||
echo "ERROR: VM$vm_num defined more than twice ( $(printf "'%s' " "${vms[@]}"))!"
|
||||
exit 1
|
||||
fail "VM$vm_num defined more than twice ( $(printf "'%s' " "${vms[@]}"))!"
|
||||
fi
|
||||
done
|
||||
|
||||
@ -128,14 +125,14 @@ function vms_prepare() {
|
||||
qemu_mask_param="VM_${vm_num}_qemu_mask"
|
||||
|
||||
host_name="VM-${vm_num}-${!qemu_mask_param}"
|
||||
echo "INFO: Setting up hostname: $host_name"
|
||||
notice "Setting up hostname: $host_name"
|
||||
vm_ssh $vm_num "hostname $host_name"
|
||||
vm_start_fio_server --fio-bin=$fio_bin $readonly $vm_num
|
||||
done
|
||||
}
|
||||
|
||||
function vms_reboot_all() {
|
||||
echo "Rebooting all vms "
|
||||
notice "Rebooting all vms "
|
||||
for vm_num in $1; do
|
||||
vm_ssh $vm_num "reboot" || true
|
||||
done
|
||||
@ -149,18 +146,18 @@ function check_fio_retcode() {
|
||||
retcode_expected=$2
|
||||
if [ $retcode_expected == 0 ]; then
|
||||
if [ $fio_retcode != 0 ]; then
|
||||
echo " Fio test ended with error."
|
||||
warning " Fio test ended with error."
|
||||
vm_shutdown_all
|
||||
spdk_vhost_kill
|
||||
exit 1
|
||||
else
|
||||
echo " Fio test ended with success."
|
||||
notice " Fio test ended with success."
|
||||
fi
|
||||
else
|
||||
if [ $fio_retcode != 0 ]; then
|
||||
echo " Fio test ended with expected error."
|
||||
notice " Fio test ended with expected error."
|
||||
else
|
||||
echo " Fio test ended with unexpected success."
|
||||
warning " Fio test ended with unexpected success."
|
||||
vm_shutdown_all
|
||||
spdk_vhost_kill
|
||||
exit 1
|
||||
|
@ -25,7 +25,7 @@ function prepare_fio_cmd_tc1() {
|
||||
|
||||
# Check if fio test passes on device attached to first controller.
|
||||
function hotattach_tc1() {
|
||||
echo "Hotattach test case 1"
|
||||
notice "Hotattach test case 1"
|
||||
|
||||
$rpc_py add_vhost_scsi_lun naa.Nvme0n1p0.0 0 Nvme0n1p0
|
||||
|
||||
@ -38,7 +38,7 @@ function hotattach_tc1() {
|
||||
# Run fio test for previously attached device.
|
||||
# During test attach another device to first controller and check fio status.
|
||||
function hotattach_tc2() {
|
||||
echo "Hotattach test case 2"
|
||||
notice "Hotattach test case 2"
|
||||
prepare_fio_cmd_tc1 "0"
|
||||
|
||||
$run_fio &
|
||||
@ -52,7 +52,7 @@ function hotattach_tc2() {
|
||||
# Run fio test for previously attached devices.
|
||||
# During test attach another device to second controller and check fio status.
|
||||
function hotattach_tc3() {
|
||||
echo "Hotattach test case 3"
|
||||
notice "Hotattach test case 3"
|
||||
prepare_fio_cmd_tc1 "0"
|
||||
|
||||
$run_fio &
|
||||
@ -67,7 +67,7 @@ function hotattach_tc3() {
|
||||
# During test attach another device to third controller(VM2) and check fio status.
|
||||
# At the end after rebooting VMs run fio test for all devices and check fio status.
|
||||
function hotattach_tc4() {
|
||||
echo "Hotattach test case 4"
|
||||
notice "Hotattach test case 4"
|
||||
|
||||
prepare_fio_cmd_tc1 "0"
|
||||
|
||||
|
@ -14,8 +14,7 @@ function get_first_disk() {
|
||||
|
||||
function check_disks() {
|
||||
if [ "$1" == "$2" ]; then
|
||||
echo "Disk has not been deleted"
|
||||
exit 1
|
||||
fail "Disk has not been deleted"
|
||||
fi
|
||||
}
|
||||
|
||||
@ -137,7 +136,7 @@ function prepare_fio_cmd_tc3_iter1() {
|
||||
# During fio test for all devices remove first device from fifth controller and check if fio fails.
|
||||
# Also check if disc has been removed from VM.
|
||||
function hotdetach_tc1() {
|
||||
echo "Hotdetach test case 1"
|
||||
notice "Hotdetach test case 1"
|
||||
first_disk=""
|
||||
get_first_disk "2" first_disk
|
||||
prepare_fio_cmd_tc1_iter1 "2 3"
|
||||
@ -157,7 +156,7 @@ function hotdetach_tc1() {
|
||||
# During fio test for device from third VM remove first device from fifth controller and check if fio fails.
|
||||
# Also check if disc has been removed from VM.
|
||||
function hotdetach_tc2() {
|
||||
echo "Hotdetach test case 2"
|
||||
notice "Hotdetach test case 2"
|
||||
$SPDK_BUILD_DIR/scripts/rpc.py add_vhost_scsi_lun naa.Nvme0n1p4.2 0 Nvme0n1p8
|
||||
sleep 2
|
||||
first_disk=""
|
||||
@ -179,7 +178,7 @@ function hotdetach_tc2() {
|
||||
# Run fio test for all devices except one, then remove this device and check if fio passes.
|
||||
# Also check if disc has been removed from VM.
|
||||
function hotdetach_tc3() {
|
||||
echo "Hotdetach test case 3"
|
||||
notice "Hotdetach test case 3"
|
||||
$SPDK_BUILD_DIR/scripts/rpc.py add_vhost_scsi_lun naa.Nvme0n1p4.2 0 Nvme0n1p8
|
||||
sleep 2
|
||||
first_disk=""
|
||||
@ -201,7 +200,7 @@ function hotdetach_tc3() {
|
||||
# Also check if disc has been removed from VM.
|
||||
# After reboot run fio test for remaining devices and check if fio passes.
|
||||
function hotdetach_tc4() {
|
||||
echo "Hotdetach test case 4"
|
||||
notice "Hotdetach test case 4"
|
||||
$SPDK_BUILD_DIR/scripts/rpc.py add_vhost_scsi_lun naa.Nvme0n1p4.2 0 Nvme0n1p8
|
||||
sleep 2
|
||||
first_disk=""
|
||||
|
@ -130,7 +130,7 @@ for bdev in $bdevs; do
|
||||
if [ $bdev == "Nvme0n1" ]; then
|
||||
for rw in "${fio_rw[@]}"; do
|
||||
timing_enter fio_4G_rw_verify
|
||||
echo "INFO: Running 4G test $rw for disk $bdev"
|
||||
notice " Running 4G test $rw for disk $bdev"
|
||||
cp $testdir/../common/fio_jobs/default_initiator.job $testdir/bdev.fio
|
||||
prepare_fio_job_4G "$rw" "$bdevs"
|
||||
run_spdk_fio --spdk_conf=$testdir/bdev.conf
|
||||
|
@ -48,28 +48,28 @@ function usage()
|
||||
|
||||
function clean_lvol_cfg()
|
||||
{
|
||||
echo "INFO: Removing nested lvol bdevs"
|
||||
notice "Removing nested lvol bdevs"
|
||||
for lvol_bdev in "${nest_lvol_bdevs[@]}"; do
|
||||
$rpc_py delete_bdev $lvol_bdev
|
||||
echo -e "\tINFO: nested lvol bdev $lvol_bdev removed"
|
||||
notice "nested lvol bdev $lvol_bdev removed"
|
||||
done
|
||||
|
||||
echo "INFO: Removing nested lvol stores"
|
||||
notice "Removing nested lvol stores"
|
||||
for lvol_store in "${nest_lvol_stores[@]}"; do
|
||||
$rpc_py destroy_lvol_store -u $lvol_store
|
||||
echo -e "\tINFO: nested lvol store $lvol_store removed"
|
||||
notice "nested lvol store $lvol_store removed"
|
||||
done
|
||||
|
||||
echo "INFO: Removing lvol bdevs"
|
||||
notice "Removing lvol bdevs"
|
||||
for lvol_bdev in "${lvol_bdevs[@]}"; do
|
||||
$rpc_py delete_bdev $lvol_bdev
|
||||
echo -e "\tINFO: lvol bdev $lvol_bdev removed"
|
||||
notice "lvol bdev $lvol_bdev removed"
|
||||
done
|
||||
|
||||
echo "INFO: Removing lvol stores"
|
||||
notice "Removing lvol stores"
|
||||
for lvol_store in "${lvol_stores[@]}"; do
|
||||
$rpc_py destroy_lvol_store -u $lvol_store
|
||||
echo -e "\tINFO: lvol store $lvol_store removed"
|
||||
notice "lvol store $lvol_store removed"
|
||||
done
|
||||
}
|
||||
|
||||
@ -94,7 +94,7 @@ while getopts 'xh-:' optchar; do
|
||||
esac
|
||||
done
|
||||
|
||||
echo "INFO: Get NVMe disks:"
|
||||
notice "Get NVMe disks:"
|
||||
nvmes=($(iter_pci_class_code 01 08 02))
|
||||
|
||||
if [[ -z $max_disks ]]; then
|
||||
@ -102,9 +102,7 @@ if [[ -z $max_disks ]]; then
|
||||
fi
|
||||
|
||||
if [[ ${#nvmes[@]} -lt max_disks ]]; then
|
||||
echo -e "ERROR: Number of NVMe drives (${#nvmes[@]})\n\
|
||||
is lower than number of requested disks for test ($max_disks)"
|
||||
exit 1
|
||||
fail "Number of NVMe drives (${#nvmes[@]}) is lower than number of requested disks for test ($max_disks)"
|
||||
fi
|
||||
|
||||
if $distribute_cores; then
|
||||
@ -116,10 +114,9 @@ trap 'error_exit "${FUNCNAME}" "${LINENO}"' SIGTERM SIGABRT ERR
|
||||
|
||||
vm_kill_all
|
||||
|
||||
echo "INFO: running SPDK"
|
||||
echo ""
|
||||
notice "running SPDK vhost"
|
||||
$COMMON_DIR/run_vhost.sh $x --work-dir=$TEST_DIR --conf-dir=$BASE_DIR
|
||||
echo ""
|
||||
notice "..."
|
||||
|
||||
trap 'clean_lvol_cfg; error_exit "${FUNCNAME}" "${LINENO}"' SIGTERM SIGABRT ERR
|
||||
|
||||
@ -133,7 +130,7 @@ used_vms=""
|
||||
for (( i=0; i<$max_disks; i++ ));do
|
||||
|
||||
# Create base lvol store on NVMe
|
||||
echo "INFO: Creating lvol store on device Nvme${i}n1"
|
||||
notice "Creating lvol store on device Nvme${i}n1"
|
||||
ls_guid=$($rpc_py construct_lvol_store Nvme${i}n1 lvs_$i)
|
||||
lvol_stores+=("$ls_guid")
|
||||
|
||||
@ -141,15 +138,15 @@ for (( i=0; i<$max_disks; i++ ));do
|
||||
free_mb=$(get_lvs_free_mb "$ls_guid")
|
||||
size=$((free_mb / (vm_count+1) ))
|
||||
|
||||
echo "INFO: Creating lvol bdev on lvol store: $ls_guid"
|
||||
notice "Creating lvol bdev on lvol store: $ls_guid"
|
||||
lb_name=$($rpc_py construct_lvol_bdev -u $ls_guid lbd_nest $size)
|
||||
|
||||
echo "INFO: Creating nested lvol store on lvol bdev: $lb_name"
|
||||
notice "Creating nested lvol store on lvol bdev: $lb_name"
|
||||
nest_ls_guid=$($rpc_py construct_lvol_store $lb_name lvs_n_$i)
|
||||
nest_lvol_stores+=("$nest_ls_guid")
|
||||
|
||||
for (( j=0; j<$vm_count; j++)); do
|
||||
echo "INFO: Creating nested lvol bdev for VM $i on lvol store $nest_ls_guid"
|
||||
notice "Creating nested lvol bdev for VM $i on lvol store $nest_ls_guid"
|
||||
free_mb=$(get_lvs_free_mb "$nest_ls_guid")
|
||||
nest_size=$((free_mb / (vm_count-j) ))
|
||||
lb_name=$($rpc_py construct_lvol_bdev -u $nest_ls_guid lbd_vm_$j $nest_size)
|
||||
@ -159,7 +156,7 @@ for (( i=0; i<$max_disks; i++ ));do
|
||||
|
||||
# Create base lvol bdevs
|
||||
for (( j=0; j<$vm_count; j++)); do
|
||||
echo "INFO: Creating lvol bdev for VM $i on lvol store $ls_guid"
|
||||
notice "Creating lvol bdev for VM $i on lvol store $ls_guid"
|
||||
free_mb=$(get_lvs_free_mb "$ls_guid")
|
||||
size=$((free_mb / (vm_count-j) ))
|
||||
lb_name=$($rpc_py construct_lvol_bdev -u $ls_guid lbd_vm_$j $size)
|
||||
@ -168,7 +165,7 @@ for (( i=0; i<$max_disks; i++ ));do
|
||||
done
|
||||
|
||||
bdev_info=$($rpc_py get_bdevs)
|
||||
echo "INFO: Configuration after initial set-up:"
|
||||
notice "Configuration after initial set-up:"
|
||||
$rpc_py get_lvol_stores
|
||||
echo "$bdev_info"
|
||||
|
||||
@ -248,27 +245,27 @@ done
|
||||
# Run FIO traffic
|
||||
run_fio $fio_bin --job-file=$COMMON_DIR/fio_jobs/default_integrity.job --out="$TEST_DIR/fio_results" $fio_disks
|
||||
|
||||
echo "INFO: Shutting down virtual machines..."
|
||||
notice "Shutting down virtual machines..."
|
||||
vm_shutdown_all
|
||||
sleep 2
|
||||
|
||||
echo "INFO: Cleaning up vhost - remove LUNs, controllers, lvol bdevs and lvol stores"
|
||||
notice "Cleaning up vhost - remove LUNs, controllers, lvol bdevs and lvol stores"
|
||||
if [[ "$ctrl_type" == "vhost_scsi" ]]; then
|
||||
for (( i=0; i<$vm_count; i++)); do
|
||||
echo "INFO: Removing devices from vhost SCSI controller naa.0.$i"
|
||||
notice "Removing devices from vhost SCSI controller naa.0.$i"
|
||||
for (( j=0; j<${#bdevs[@]}; j++)); do
|
||||
$rpc_py remove_vhost_scsi_target naa.0.$i $j
|
||||
echo -e "\tINFO: Removed device $j"
|
||||
notice "Removed device $j"
|
||||
done
|
||||
echo "Removing vhost SCSI controller naa.0.$i"
|
||||
notice "Removing vhost SCSI controller naa.0.$i"
|
||||
$rpc_py remove_vhost_controller naa.0.$i
|
||||
done
|
||||
elif [[ "$ctrl_type" == "vhost_blk" ]]; then
|
||||
for (( i=0; i<$vm_count; i++)); do
|
||||
for (( j=0; j<${#bdevs[@]}; j++)); do
|
||||
echo "INFO: Removing vhost BLK controller naa.$j.$i"
|
||||
notice "Removing vhost BLK controller naa.$j.$i"
|
||||
$rpc_py remove_vhost_controller naa.$j.$i
|
||||
echo -e "\tINFO: Removed naa.$j.$i"
|
||||
notice "Removed naa.$j.$i"
|
||||
done
|
||||
done
|
||||
fi
|
||||
@ -280,5 +277,5 @@ $rpc_py get_bdevs
|
||||
$rpc_py get_vhost_controllers
|
||||
$rpc_py get_luns
|
||||
|
||||
echo "INFO: Shutting down SPDK vhost app..."
|
||||
notice "Shutting down SPDK vhost app..."
|
||||
spdk_vhost_kill
|
||||
|
@ -40,23 +40,21 @@ trap error_exit ERR
|
||||
|
||||
VHOST_APP="$SPDK_BUILD_DIR/app/vhost/vhost"
|
||||
|
||||
echo "INFO: Testing vhost command line arguments"
|
||||
notice "Testing vhost command line arguments"
|
||||
# Printing help will force vhost to exit without error
|
||||
$VHOST_APP -c /path/to/non_existing_file/conf -S $BASE_DIR -e 0x0 -s 1024 -d -q -h
|
||||
|
||||
# Testing vhost create pid file option. Vhost will exit with error as invalid config path is given
|
||||
if $VHOST_APP -c /path/to/non_existing_file/conf -f $SPDK_VHOST_SCSI_TEST_DIR/vhost.pid; then
|
||||
echo "vhost started when specifying invalid config file"
|
||||
exit 1
|
||||
fail "vhost started when specifying invalid config file"
|
||||
fi
|
||||
|
||||
# Expecting vhost to fail if an incorrect argument is given
|
||||
if $VHOST_APP -x -h; then
|
||||
echo "vhost started with invalid -x command line option"
|
||||
exit 1
|
||||
fail "vhost started with invalid -x command line option"
|
||||
fi
|
||||
|
||||
# Passing trace flags if spdk is build without CONFIG_DEBUG=y option make vhost exit with error
|
||||
if ! $VHOST_APP -t vhost_scsi -h; then
|
||||
echo "vhost did not started with trace flags enabled but ignoring this as it might not be a debug build"
|
||||
warning "vhost did not started with trace flags enabled but ignoring this as it might not be a debug build"
|
||||
fi
|
||||
|
@ -45,20 +45,19 @@ while getopts 'xh-:' optchar; do
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ $EUID -ne 0 ]]; then
|
||||
echo "INFO: Go away user come back as root"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
. $COMMON_DIR/common.sh
|
||||
trap 'error_exit "${FUNCNAME}" "${LINENO}"' ERR
|
||||
|
||||
if [[ $EUID -ne 0 ]]; then
|
||||
fail "Go away user come back as root"
|
||||
fi
|
||||
|
||||
function print_tc_name()
|
||||
{
|
||||
echo ""
|
||||
echo "==============================================================="
|
||||
echo "Now running: $1"
|
||||
echo "==============================================================="
|
||||
notice ""
|
||||
notice "==============================================================="
|
||||
notice "Now running: $1"
|
||||
notice "==============================================================="
|
||||
}
|
||||
|
||||
function blk_ro_tc1()
|
||||
@ -72,19 +71,19 @@ function blk_ro_tc1()
|
||||
if [[ $disk =~ .*malloc.* ]]; then
|
||||
disk_name=$($rpc_py construct_malloc_bdev 512 4096)
|
||||
if [ $? != 0 ]; then
|
||||
error "Failed to create malloc bdev"
|
||||
fail "Failed to create malloc bdev"
|
||||
fi
|
||||
|
||||
disk=$disk_name"_size_512M"
|
||||
else
|
||||
disk_name=${disk%%_*}
|
||||
if ! $rpc_py get_bdevs | jq -r '.[] .name' | grep -qi $disk_name$; then
|
||||
error "$disk_name bdev not found!"
|
||||
fail "$disk_name bdev not found!"
|
||||
fi
|
||||
fi
|
||||
|
||||
#Create controller and create file on disk for later test
|
||||
echo "INFO: Creating vhost_blk controller"
|
||||
notice "Creating vhost_blk controller"
|
||||
vhost_blk_name="naa.$disk_name.$vm_no"
|
||||
$rpc_py construct_vhost_blk_controller $vhost_blk_name $disk_name
|
||||
setup_cmd="$COMMON_DIR/vm_setup.sh $x --work-dir=$TEST_DIR --test-type=spdk_vhost_blk"
|
||||
@ -95,19 +94,19 @@ function blk_ro_tc1()
|
||||
|
||||
$COMMON_DIR/vm_run.sh $x --work-dir=$TEST_DIR $vm_no
|
||||
vm_wait_for_boot 600 $vm_no
|
||||
echo "INFO: Prepearing partition and file on guest VM"
|
||||
notice "Prepearing partition and file on guest VM"
|
||||
vm_ssh $vm_no "bash -s" < $BASE_DIR/disabled_readonly_vm.sh
|
||||
sleep 1
|
||||
|
||||
vm_shutdown_all
|
||||
#Create readonly controller and test readonly featchure
|
||||
echo "INFO: Removing controller and creating new one with readonly flag"
|
||||
notice "Removing controller and creating new one with readonly flag"
|
||||
$rpc_py remove_vhost_controller $vhost_blk_name
|
||||
$rpc_py construct_vhost_blk_controller -r $vhost_blk_name $disk_name
|
||||
|
||||
$COMMON_DIR/vm_run.sh $x --work-dir=$TEST_DIR $vm_no
|
||||
vm_wait_for_boot 600 $vm_no
|
||||
echo "INFO: Testing readonly feature on guest VM"
|
||||
notice "Testing readonly feature on guest VM"
|
||||
vm_ssh $vm_no "bash -s" < $BASE_DIR/enabled_readonly_vm.sh
|
||||
sleep 1
|
||||
|
||||
@ -119,7 +118,7 @@ function blk_ro_tc1()
|
||||
|
||||
$COMMON_DIR/vm_run.sh $x --work-dir=$TEST_DIR $vm_no
|
||||
vm_wait_for_boot 600 $vm_no
|
||||
echo "INFO: removing partition and file from test disk on guest VM"
|
||||
notice "removing partition and file from test disk on guest VM"
|
||||
vm_ssh $vm_no "bash -s" < $BASE_DIR/delete_partition_vm.sh
|
||||
sleep 1
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user