test/vhost: add live migration test case 2
Change-Id: I114c11d9f7c1e9a5a8c0a989541a41b8747cc125 Signed-off-by: Pawel Wodkowski <pawelx.wodkowski@intel.com> Reviewed-on: https://review.gerrithub.io/398753 Reviewed-by: Karol Latecki <karol.latecki@intel.com> Tested-by: SPDK Automated Test System <sys_sgsw@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
parent
90922c60df
commit
6ebfbf7351
@ -112,6 +112,7 @@ function spdk_vhost_run()
|
|||||||
local param
|
local param
|
||||||
local vhost_num=0
|
local vhost_num=0
|
||||||
local vhost_conf_path=""
|
local vhost_conf_path=""
|
||||||
|
local memory=1024
|
||||||
|
|
||||||
for param in "$@"; do
|
for param in "$@"; do
|
||||||
case $param in
|
case $param in
|
||||||
@ -120,6 +121,7 @@ function spdk_vhost_run()
|
|||||||
assert_number "$vhost_num"
|
assert_number "$vhost_num"
|
||||||
;;
|
;;
|
||||||
--conf-path=*) local vhost_conf_path="${param#*=}" ;;
|
--conf-path=*) local vhost_conf_path="${param#*=}" ;;
|
||||||
|
--memory=*) local memory=${param#*=} ;;
|
||||||
*)
|
*)
|
||||||
error "Invalid parameter '$param'"
|
error "Invalid parameter '$param'"
|
||||||
return 1
|
return 1
|
||||||
@ -127,7 +129,6 @@ function spdk_vhost_run()
|
|||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
|
||||||
|
|
||||||
local vhost_dir="$(get_vhost_dir $vhost_num)"
|
local vhost_dir="$(get_vhost_dir $vhost_num)"
|
||||||
if [[ -z "$vhost_conf_path" ]]; then
|
if [[ -z "$vhost_conf_path" ]]; then
|
||||||
error "Missing mandatory parameter '--conf-path'"
|
error "Missing mandatory parameter '--conf-path'"
|
||||||
@ -163,13 +164,14 @@ function spdk_vhost_run()
|
|||||||
cp $vhost_conf_template $vhost_conf_file
|
cp $vhost_conf_template $vhost_conf_file
|
||||||
$SPDK_BUILD_DIR/scripts/gen_nvme.sh >> $vhost_conf_file
|
$SPDK_BUILD_DIR/scripts/gen_nvme.sh >> $vhost_conf_file
|
||||||
|
|
||||||
local cmd="$vhost_app -m $reactor_mask -p $master_core -c $vhost_conf_file -r $vhost_dir/rpc.sock"
|
local cmd="$vhost_app -m $reactor_mask -p $master_core -c $vhost_conf_file -s $memory -r $vhost_dir/rpc.sock"
|
||||||
|
|
||||||
notice "Loging to: $vhost_log_file"
|
notice "Loging to: $vhost_log_file"
|
||||||
notice "Config file: $vhost_conf_file"
|
notice "Config file: $vhost_conf_file"
|
||||||
notice "Socket: $vhost_socket"
|
notice "Socket: $vhost_socket"
|
||||||
notice "Command: $cmd"
|
notice "Command: $cmd"
|
||||||
|
|
||||||
|
timing_enter vhost_start
|
||||||
cd $vhost_dir; $cmd &
|
cd $vhost_dir; $cmd &
|
||||||
vhost_pid=$!
|
vhost_pid=$!
|
||||||
echo $vhost_pid > $vhost_pid_file
|
echo $vhost_pid > $vhost_pid_file
|
||||||
@ -177,12 +179,14 @@ function spdk_vhost_run()
|
|||||||
notice "waiting for app to run..."
|
notice "waiting for app to run..."
|
||||||
waitforlisten "$vhost_pid" "$vhost_dir/rpc.sock"
|
waitforlisten "$vhost_pid" "$vhost_dir/rpc.sock"
|
||||||
notice "vhost started - pid=$vhost_pid"
|
notice "vhost started - pid=$vhost_pid"
|
||||||
|
timing_exit vhost_start
|
||||||
|
|
||||||
rm $vhost_conf_file
|
rm $vhost_conf_file
|
||||||
}
|
}
|
||||||
|
|
||||||
function spdk_vhost_kill()
|
function spdk_vhost_kill()
|
||||||
{
|
{
|
||||||
|
local rc=0
|
||||||
local vhost_num=0
|
local vhost_num=0
|
||||||
if [[ ! -z "$1" ]]; then
|
if [[ ! -z "$1" ]]; then
|
||||||
vhost_num=$1
|
vhost_num=$1
|
||||||
@ -196,6 +200,7 @@ function spdk_vhost_kill()
|
|||||||
return 0
|
return 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
timing_enter vhost_kill
|
||||||
local vhost_pid="$(cat $vhost_pid_file)"
|
local vhost_pid="$(cat $vhost_pid_file)"
|
||||||
notice "killing vhost (PID $vhost_pid) app"
|
notice "killing vhost (PID $vhost_pid) app"
|
||||||
|
|
||||||
@ -213,19 +218,24 @@ function spdk_vhost_kill()
|
|||||||
error "ERROR: vhost was NOT killed - sending SIGABRT"
|
error "ERROR: vhost was NOT killed - sending SIGABRT"
|
||||||
/bin/kill -ABRT $vhost_pid
|
/bin/kill -ABRT $vhost_pid
|
||||||
rm $vhost_pid_file
|
rm $vhost_pid_file
|
||||||
return 1
|
rc=1
|
||||||
|
else
|
||||||
|
#check vhost return code, activate trap on error
|
||||||
|
wait $vhost_pid
|
||||||
fi
|
fi
|
||||||
|
|
||||||
#check vhost return code, activate trap on error
|
|
||||||
wait $vhost_pid
|
|
||||||
elif /bin/kill -0 $vhost_pid; then
|
elif /bin/kill -0 $vhost_pid; then
|
||||||
error "vhost NOT killed - you need to kill it manually"
|
error "vhost NOT killed - you need to kill it manually"
|
||||||
return 1
|
rc=1
|
||||||
else
|
else
|
||||||
notice "vhost was no running"
|
notice "vhost was no running"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
rm $vhost_pid_file
|
timing_exit vhost_kill
|
||||||
|
if [[ $rc == 0 ]]; then
|
||||||
|
rm $vhost_pid_file
|
||||||
|
fi
|
||||||
|
|
||||||
|
return $rc
|
||||||
}
|
}
|
||||||
|
|
||||||
###
|
###
|
||||||
|
20
test/vhost/migration/migration-tc2.job
Normal file
20
test/vhost/migration/migration-tc2.job
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
[global]
|
||||||
|
blocksize_range=4k-512k
|
||||||
|
iodepth=128
|
||||||
|
ioengine=libaio
|
||||||
|
filename=
|
||||||
|
group_reporting
|
||||||
|
thread
|
||||||
|
numjobs=1
|
||||||
|
direct=1
|
||||||
|
do_verify=1
|
||||||
|
verify=md5
|
||||||
|
verify_fatal=1
|
||||||
|
verify_dump=1
|
||||||
|
verify_backlog=8
|
||||||
|
|
||||||
|
[randwrite]
|
||||||
|
rw=randwrite
|
||||||
|
runtime=15
|
||||||
|
time_based
|
||||||
|
stonewall
|
207
test/vhost/migration/migration-tc2.sh
Normal file
207
test/vhost/migration/migration-tc2.sh
Normal file
@ -0,0 +1,207 @@
|
|||||||
|
source $SPDK_BUILD_DIR/test/nvmf/common.sh
|
||||||
|
|
||||||
|
function migration_tc2_cleanup_nvmf_tgt()
|
||||||
|
{
|
||||||
|
local i
|
||||||
|
|
||||||
|
if [[ ! -r "$nvmf_dir/nvmf_tgt.pid" ]]; then
|
||||||
|
warning "Pid file '$nvmf_dir/nvmf_tgt.pid' does not exist. "
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ ! -z "$1" ]]; then
|
||||||
|
trap 'error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
|
||||||
|
pkill --signal $1 -F $nvmf_dir/nvmf_tgt.pid || true
|
||||||
|
sleep 5
|
||||||
|
if ! pkill -F $nvmf_dir/nvmf_tgt.pid; then
|
||||||
|
fail "failed to kill nvmf_tgt app"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
pkill --signal SIGTERM -F $nvmf_dir/nvmf_tgt.pid || true
|
||||||
|
for (( i=0; i<20; i++ )); do
|
||||||
|
if ! pkill --signal 0 -F $nvmf_dir/nvmf_tgt.pid; then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
sleep 0.5
|
||||||
|
done
|
||||||
|
|
||||||
|
if pkill --signal 0 -F $nvmf_dir/nvmf_tgt.pid; then
|
||||||
|
error "nvmf_tgt failed to shutdown"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
rm $nvmf_dir/nvmf_tgt.pid
|
||||||
|
unset -v nvmf_dir rpc_nvmf
|
||||||
|
}
|
||||||
|
|
||||||
|
function migration_tc2_cleanup_vhost_config()
|
||||||
|
{
|
||||||
|
timing_enter migration_tc2_cleanup_vhost_config
|
||||||
|
|
||||||
|
trap 'migration_tc2_cleanup_nvmf_tgt SIGKILL; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
|
||||||
|
|
||||||
|
notice "Shutting down all VMs"
|
||||||
|
vm_shutdown_all
|
||||||
|
|
||||||
|
notice "Removing vhost devices & controllers via RPC ..."
|
||||||
|
# Delete bdev first to remove all LUNs and SCSI targets
|
||||||
|
$rpc_0 delete_bdev Nvme0n1
|
||||||
|
$rpc_0 remove_vhost_controller $incoming_vm_ctrlr
|
||||||
|
|
||||||
|
$rpc_1 delete_bdev Nvme0n1
|
||||||
|
$rpc_1 remove_vhost_controller $target_vm_ctrlr
|
||||||
|
|
||||||
|
notice "killing vhost app"
|
||||||
|
spdk_vhost_kill 0
|
||||||
|
spdk_vhost_kill 1
|
||||||
|
|
||||||
|
unset -v incoming_vm target_vm incoming_vm_ctrlr target_vm_ctrlr
|
||||||
|
unset -v rpc_0 rpc_1
|
||||||
|
|
||||||
|
trap 'error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
|
||||||
|
migration_tc2_cleanup_nvmf_tgt
|
||||||
|
|
||||||
|
timing_exit migration_tc2_cleanup_vhost_config
|
||||||
|
}
|
||||||
|
|
||||||
|
function migration_tc2_configure_vhost()
|
||||||
|
{
|
||||||
|
timing_enter migration_tc2_configure_vhost
|
||||||
|
|
||||||
|
# Those are global intentionaly - they will be unset in cleanup handler
|
||||||
|
nvmf_dir="$TEST_DIR/nvmf_tgt"
|
||||||
|
|
||||||
|
incoming_vm=1
|
||||||
|
target_vm=2
|
||||||
|
incoming_vm_ctrlr=naa.VhostScsi0.$incoming_vm
|
||||||
|
target_vm_ctrlr=naa.VhostScsi0.$target_vm
|
||||||
|
|
||||||
|
rpc_nvmf="python $SPDK_BUILD_DIR/scripts/rpc.py -s $nvmf_dir/rpc.sock"
|
||||||
|
rpc_0="python $SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
|
||||||
|
rpc_1="python $SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir 1)/rpc.sock"
|
||||||
|
|
||||||
|
# Default cleanup/error handlers will not shutdown nvmf_tgt app so setup it
|
||||||
|
# here to teardown in cleanup function
|
||||||
|
trap 'migration_tc2_error_cleanup; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
|
||||||
|
|
||||||
|
# Run nvmf_tgt and two vhost instances:
|
||||||
|
# nvmf_tgt uses core id 2 (-m 0x4)
|
||||||
|
# First uses core id 0 (vhost_0_reactor_mask=0x1)
|
||||||
|
# Second uses core id 1 (vhost_1_reactor_mask=0x2)
|
||||||
|
# This force to use VM 1 and 2.
|
||||||
|
timing_enter start_nvmf_tgt
|
||||||
|
notice "Running nvmf_tgt..."
|
||||||
|
mkdir -p $nvmf_dir
|
||||||
|
rm -f $nvmf_dir/*
|
||||||
|
cp $SPDK_BUILD_DIR/test/nvmf/nvmf.conf $nvmf_dir/nvmf.conf
|
||||||
|
$SPDK_BUILD_DIR/scripts/gen_nvme.sh >> $nvmf_dir/nvmf.conf
|
||||||
|
$SPDK_BUILD_DIR/app/nvmf_tgt/nvmf_tgt -s 512 -m 0x4 -c $nvmf_dir/nvmf.conf -r $nvmf_dir/rpc.sock &
|
||||||
|
local nvmf_tgt_pid=$!
|
||||||
|
echo $nvmf_tgt_pid > $nvmf_dir/nvmf_tgt.pid
|
||||||
|
waitforlisten "$nvmf_tgt_pid" "$nvmf_dir/rpc.sock"
|
||||||
|
timing_exit start_nvmf_tgt
|
||||||
|
|
||||||
|
spdk_vhost_run --conf-path=$BASE_DIR --memory=512 --vhost-num=0
|
||||||
|
# Those are global intentionaly
|
||||||
|
vhost_1_reactor_mask=0x2
|
||||||
|
vhost_1_master_core=1
|
||||||
|
spdk_vhost_run --conf-path=$BASE_DIR --memory=512 --vhost-num=1
|
||||||
|
|
||||||
|
local rdma_ip_list=$(get_available_rdma_ips)
|
||||||
|
local nvmf_target_ip=$(echo "$rdma_ip_list" | head -n 1)
|
||||||
|
|
||||||
|
if [[ -z "$nvmf_target_ip" ]]; then
|
||||||
|
fail "no NIC for nvmf target"
|
||||||
|
fi
|
||||||
|
|
||||||
|
notice "Configuring nvmf_tgt, vhost devices & controllers via RPC ..."
|
||||||
|
|
||||||
|
# Construct shared bdevs and controllers
|
||||||
|
$rpc_nvmf construct_nvmf_subsystem nqn.2016-06.io.spdk:cnode1 \
|
||||||
|
"trtype:RDMA traddr:$nvmf_target_ip trsvcid:4420" "" -a -s SPDK00000000000001 -n Nvme0n1
|
||||||
|
|
||||||
|
$rpc_0 construct_nvme_bdev -b Nvme0 -t rdma -f ipv4 -a $nvmf_target_ip -s 4420 -n "nqn.2016-06.io.spdk:cnode1"
|
||||||
|
$rpc_0 construct_vhost_scsi_controller $incoming_vm_ctrlr
|
||||||
|
$rpc_0 add_vhost_scsi_lun $incoming_vm_ctrlr 0 Nvme0n1
|
||||||
|
|
||||||
|
$rpc_1 construct_nvme_bdev -b Nvme0 -t rdma -f ipv4 -a $nvmf_target_ip -s 4420 -n "nqn.2016-06.io.spdk:cnode1"
|
||||||
|
$rpc_1 construct_vhost_scsi_controller $target_vm_ctrlr
|
||||||
|
$rpc_1 add_vhost_scsi_lun $target_vm_ctrlr 0 Nvme0n1
|
||||||
|
|
||||||
|
notice "Setting up VMs"
|
||||||
|
vm_setup --os="$os_image" --force=$incoming_vm --disk-type=spdk_vhost_scsi --disks=VhostScsi0 \
|
||||||
|
--migrate-to=$target_vm --memory=1024 --vhost-num=0
|
||||||
|
vm_setup --force=$target_vm --disk-type=spdk_vhost_scsi --disks=VhostScsi0 --incoming=$incoming_vm --memory=1024 \
|
||||||
|
--vhost-num=1
|
||||||
|
|
||||||
|
# Run everything
|
||||||
|
vm_run $incoming_vm $target_vm
|
||||||
|
|
||||||
|
# Wait only for incoming VM, as target is waiting for migration
|
||||||
|
vm_wait_for_boot 600 $incoming_vm
|
||||||
|
|
||||||
|
notice "Configuration done"
|
||||||
|
|
||||||
|
timing_exit migration_tc2_configure_vhost
|
||||||
|
}
|
||||||
|
|
||||||
|
function migration_tc2_error_cleanup()
|
||||||
|
{
|
||||||
|
trap - SIGINT ERR EXIT
|
||||||
|
set -x
|
||||||
|
|
||||||
|
vm_kill_all
|
||||||
|
migration_tc2_cleanup_vhost_config
|
||||||
|
notice "Migration TC2 FAILED"
|
||||||
|
}
|
||||||
|
|
||||||
|
function migration_tc2()
|
||||||
|
{
|
||||||
|
# Use 2 VMs:
|
||||||
|
# incoming VM - the one we want to migrate
|
||||||
|
# targe VM - the one which will accept migration
|
||||||
|
local job_file="$BASE_DIR/migration-tc2.job"
|
||||||
|
|
||||||
|
migration_tc2_configure_vhost
|
||||||
|
|
||||||
|
# Run fio before migration
|
||||||
|
notice "Starting FIO"
|
||||||
|
vm_check_scsi_location $incoming_vm
|
||||||
|
run_fio $fio_bin --job-file="$job_file" --local --vm="${incoming_vm}$(printf ':/dev/%s' $SCSI_DISK)"
|
||||||
|
|
||||||
|
# Wait a while to let the FIO time to issue some IO
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
# Check if fio is still running before migration
|
||||||
|
if ! is_fio_running $incoming_vm; then
|
||||||
|
vm_ssh $incoming_vm "cat /root/$(basename ${job_file}).out"
|
||||||
|
error "FIO is not running before migration: process crashed or finished too early"
|
||||||
|
fi
|
||||||
|
|
||||||
|
vm_migrate $incoming_vm
|
||||||
|
sleep 3
|
||||||
|
|
||||||
|
# Check if fio is still running after migration
|
||||||
|
if ! is_fio_running $target_vm; then
|
||||||
|
vm_ssh $target_vm "cat /root/$(basename ${job_file}).out"
|
||||||
|
error "FIO is not running after migration: process crashed or finished too early"
|
||||||
|
fi
|
||||||
|
|
||||||
|
notice "Waiting for fio to finish"
|
||||||
|
local timeout=40
|
||||||
|
while is_fio_running $target_vm; do
|
||||||
|
sleep 1
|
||||||
|
echo -n "."
|
||||||
|
if (( timeout-- == 0 )); then
|
||||||
|
error "timeout while waiting for FIO!"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
notice "Fio result is:"
|
||||||
|
vm_ssh $target_vm "cat /root/$(basename ${job_file}).out"
|
||||||
|
|
||||||
|
migration_tc2_cleanup_vhost_config
|
||||||
|
notice "Migration TC2 SUCCESS"
|
||||||
|
}
|
||||||
|
|
||||||
|
migration_tc2
|
@ -76,6 +76,7 @@ function vm_migrate()
|
|||||||
fail "source VM $1 or destination VM is not properly configured for live migration"
|
fail "source VM $1 or destination VM is not properly configured for live migration"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
timing_enter vm_migrate
|
||||||
notice "Migrating VM $1 to VM "$(basename $target_vm_dir)
|
notice "Migrating VM $1 to VM "$(basename $target_vm_dir)
|
||||||
echo -e \
|
echo -e \
|
||||||
"migrate_set_speed 1g\n" \
|
"migrate_set_speed 1g\n" \
|
||||||
@ -95,6 +96,7 @@ function vm_migrate()
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
notice "Migration complete"
|
notice "Migration complete"
|
||||||
|
timing_exit vm_migrate
|
||||||
}
|
}
|
||||||
|
|
||||||
function is_fio_running()
|
function is_fio_running()
|
||||||
@ -120,7 +122,10 @@ for test_case in ${test_cases//,/ }; do
|
|||||||
notice "==============================="
|
notice "==============================="
|
||||||
notice "Running Migration test case ${test_case}"
|
notice "Running Migration test case ${test_case}"
|
||||||
notice "==============================="
|
notice "==============================="
|
||||||
|
|
||||||
|
timing_enter migration-tc${test_case}
|
||||||
source $BASE_DIR/migration-tc${test_case}.sh
|
source $BASE_DIR/migration-tc${test_case}.sh
|
||||||
|
timing_exit migration-tc${test_case}
|
||||||
done
|
done
|
||||||
|
|
||||||
notice "Migration Test SUCCESS"
|
notice "Migration Test SUCCESS"
|
||||||
|
@ -85,7 +85,7 @@ case $1 in
|
|||||||
-m|--migration)
|
-m|--migration)
|
||||||
echo 'Running migration suite...'
|
echo 'Running migration suite...'
|
||||||
$WORKDIR/migration/migration.sh -x \
|
$WORKDIR/migration/migration.sh -x \
|
||||||
--fio-bin=$FIO_BIN --os=$VM_IMAGE --test-cases=1
|
--fio-bin=$FIO_BIN --os=$VM_IMAGE --test-cases=1,2
|
||||||
;;
|
;;
|
||||||
-i|--integrity)
|
-i|--integrity)
|
||||||
echo 'Running SCSI integrity suite...'
|
echo 'Running SCSI integrity suite...'
|
||||||
|
Loading…
Reference in New Issue
Block a user