test/vhost: Add no-pci option and fix vhost live migration tc2 & tc3
Adds option to disable pci access in spdk_vhost_run function. This is particularly required in vhost live migration tc2 & tc3 Change-Id: I5ec215d316b97dbabd1297856d8f2dfbf7b9d3cb Signed-off-by: Pawel Niedzwiecki <pawelx.niedzwiecki@intel.com> Reviewed-on: https://review.gerrithub.io/413658 Tested-by: SPDK Automated Test System <sys_sgsw@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
parent
b6f90c527a
commit
c25adb8444
@ -123,6 +123,7 @@ function spdk_vhost_run()
|
||||
--conf-path=*) local vhost_conf_path="${param#*=}" ;;
|
||||
--json-path=*) local vhost_json_path="${param#*=}" ;;
|
||||
--memory=*) local memory=${param#*=} ;;
|
||||
--no-pci*) local no_pci="-u" ;;
|
||||
*)
|
||||
error "Invalid parameter '$param'"
|
||||
return 1
|
||||
@ -158,11 +159,11 @@ function spdk_vhost_run()
|
||||
return 1
|
||||
fi
|
||||
|
||||
local cmd="$vhost_app -m $reactor_mask -p $master_core -s $memory -r $vhost_dir/rpc.sock"
|
||||
local cmd="$vhost_app -m $reactor_mask -p $master_core -s $memory -r $vhost_dir/rpc.sock $no_pci"
|
||||
if [[ -n "$vhost_conf_path" ]]; then
|
||||
cp $vhost_conf_template $vhost_conf_file
|
||||
$SPDK_BUILD_DIR/scripts/gen_nvme.sh >> $vhost_conf_file
|
||||
cmd="$vhost_app -m $reactor_mask -p $master_core -c $vhost_conf_file -s $memory -r $vhost_dir/rpc.sock"
|
||||
cmd="$vhost_app -m $reactor_mask -p $master_core -c $vhost_conf_file -s $memory -r $vhost_dir/rpc.sock $no_pci"
|
||||
fi
|
||||
|
||||
notice "Loging to: $vhost_log_file"
|
||||
@ -176,7 +177,8 @@ function spdk_vhost_run()
|
||||
|
||||
notice "waiting for app to run..."
|
||||
waitforlisten "$vhost_pid" "$vhost_dir/rpc.sock"
|
||||
if [[ -z "$vhost_conf_path" ]]; then
|
||||
#do not generate nvmes if pci access is disabled
|
||||
if [[ -z "$vhost_conf_path" ]] && [[ -z "$no_pci" ]]; then
|
||||
$SPDK_BUILD_DIR/scripts/gen_nvme.sh "--json" | $SPDK_BUILD_DIR/scripts/rpc.py\
|
||||
-s $vhost_dir/rpc.sock load_subsystem_config
|
||||
fi
|
||||
|
@ -101,11 +101,11 @@ function migration_tc2_configure_vhost()
|
||||
waitforlisten "$nvmf_tgt_pid" "$nvmf_dir/rpc.sock"
|
||||
timing_exit start_nvmf_tgt
|
||||
|
||||
spdk_vhost_run --memory=512 --vhost-num=0
|
||||
spdk_vhost_run --memory=512 --vhost-num=0 --no-pci
|
||||
# Those are global intentionaly
|
||||
vhost_1_reactor_mask=0x2
|
||||
vhost_1_master_core=1
|
||||
spdk_vhost_run --memory=512 --vhost-num=1
|
||||
spdk_vhost_run --memory=512 --vhost-num=1 --no-pci
|
||||
|
||||
local rdma_ip_list=$(get_available_rdma_ips)
|
||||
local nvmf_target_ip=$(echo "$rdma_ip_list" | head -n 1)
|
||||
|
@ -118,7 +118,7 @@ function host1_start_vhost()
|
||||
|
||||
notice "Starting vhost0 instance on local server"
|
||||
trap 'host1_cleanup_vhost; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
|
||||
spdk_vhost_run --vhost-num=0
|
||||
spdk_vhost_run --vhost-num=0 --no-pci
|
||||
$rpc_0 construct_nvme_bdev -b Nvme0 -t rdma -f ipv4 -a $RDMA_TARGET_IP -s 4420 -n "nqn.2018-02.io.spdk:cnode1"
|
||||
$rpc_0 construct_vhost_scsi_controller $incoming_vm_ctrlr
|
||||
$rpc_0 add_vhost_scsi_lun $incoming_vm_ctrlr 0 Nvme0n1
|
||||
|
@ -33,7 +33,7 @@ function host_2_start_vhost()
|
||||
|
||||
notice "Starting vhost 1 instance on remote server"
|
||||
trap 'host_2_cleanup_vhost; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
|
||||
spdk_vhost_run --vhost-num=1
|
||||
spdk_vhost_run --vhost-num=1 --no-pci
|
||||
|
||||
$rpc construct_nvme_bdev -b Nvme0 -t rdma -f ipv4 -a $RDMA_TARGET_IP -s 4420 -n "nqn.2018-02.io.spdk:cnode1"
|
||||
$rpc construct_vhost_scsi_controller $target_vm_ctrl
|
||||
|
Loading…
Reference in New Issue
Block a user