test/vhost: Drop support for multiple nvme controllers
This test path is currently not being run through CI, hence no need to support it. Signed-off-by: Michal Berger <michalx.berger@intel.com> Change-Id: I9a7cc7005c4e20f25724d2a0a417656deb09abb1 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/6543 Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com> Reviewed-by: Karol Latecki <karol.latecki@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Community-CI: Mellanox Build Bot Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
parent
3369ef457f
commit
e0335bb121
@ -50,8 +50,7 @@ iscsi_tgt_fuzz
|
|||||||
nvmf_fuzz
|
nvmf_fuzz
|
||||||
nvmf_multiconnection
|
nvmf_multiconnection
|
||||||
nvmf_initiator_timeout
|
nvmf_initiator_timeout
|
||||||
vhost_blk_2core_2ctrl
|
vhost_blk_cores_2ctrl
|
||||||
vhost_blk_1core_2ctrl
|
|
||||||
vhost_blk_fs_integrity
|
vhost_blk_fs_integrity
|
||||||
vhost_blk_integrity
|
vhost_blk_integrity
|
||||||
vhost_blk_nightly
|
vhost_blk_nightly
|
||||||
@ -63,6 +62,4 @@ vhost_readonly
|
|||||||
vhost_scsi_fs_integrity
|
vhost_scsi_fs_integrity
|
||||||
vhost_scsi_integrity
|
vhost_scsi_integrity
|
||||||
vhost_scsi_nightly
|
vhost_scsi_nightly
|
||||||
vhost_scsi_2core_2ctrl
|
vhost_scsi_cores_2ctrl
|
||||||
vhost_scsi_1core_2ctrl
|
|
||||||
vhost_scsi_1core_1ctrl
|
|
||||||
|
@ -9,7 +9,6 @@ source $rootdir/scripts/common.sh
|
|||||||
rpc_py="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
|
rpc_py="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
|
||||||
|
|
||||||
vm_count=1
|
vm_count=1
|
||||||
max_disks=""
|
|
||||||
ctrl_type="spdk_vhost_scsi"
|
ctrl_type="spdk_vhost_scsi"
|
||||||
use_fs=false
|
use_fs=false
|
||||||
nested_lvol=false
|
nested_lvol=false
|
||||||
@ -28,8 +27,6 @@ function usage() {
|
|||||||
echo " --vm-count=INT Virtual machines to use in test;"
|
echo " --vm-count=INT Virtual machines to use in test;"
|
||||||
echo " Each VM will get one lvol bdev on each NVMe."
|
echo " Each VM will get one lvol bdev on each NVMe."
|
||||||
echo " Default: 1"
|
echo " Default: 1"
|
||||||
echo " --max-disks=INT Maximum number of NVMe drives to use in test."
|
|
||||||
echo " Default: will use all available NVMes."
|
|
||||||
echo " --ctrl-type=TYPE Controller type to use for test:"
|
echo " --ctrl-type=TYPE Controller type to use for test:"
|
||||||
echo " spdk_vhost_scsi - use spdk vhost scsi"
|
echo " spdk_vhost_scsi - use spdk vhost scsi"
|
||||||
echo " spdk_vhost_blk - use spdk vhost block"
|
echo " spdk_vhost_blk - use spdk vhost block"
|
||||||
@ -67,10 +64,8 @@ function clean_lvol_cfg() {
|
|||||||
done
|
done
|
||||||
|
|
||||||
notice "Removing lvol stores"
|
notice "Removing lvol stores"
|
||||||
for lvol_store in "${lvol_stores[@]}"; do
|
$rpc_py bdev_lvol_delete_lvstore -u "$ls_guid"
|
||||||
$rpc_py bdev_lvol_delete_lvstore -u $lvol_store
|
notice "lvol store $ls_guid removed"
|
||||||
notice "lvol store $lvol_store removed"
|
|
||||||
done
|
|
||||||
}
|
}
|
||||||
|
|
||||||
while getopts 'xh-:' optchar; do
|
while getopts 'xh-:' optchar; do
|
||||||
@ -80,7 +75,6 @@ while getopts 'xh-:' optchar; do
|
|||||||
help) usage $0 ;;
|
help) usage $0 ;;
|
||||||
fio-bin=*) fio_bin="--fio-bin=${OPTARG#*=}" ;;
|
fio-bin=*) fio_bin="--fio-bin=${OPTARG#*=}" ;;
|
||||||
vm-count=*) vm_count="${OPTARG#*=}" ;;
|
vm-count=*) vm_count="${OPTARG#*=}" ;;
|
||||||
max-disks=*) max_disks="${OPTARG#*=}" ;;
|
|
||||||
ctrl-type=*) ctrl_type="${OPTARG#*=}" ;;
|
ctrl-type=*) ctrl_type="${OPTARG#*=}" ;;
|
||||||
nested-lvol) nested_lvol=true ;;
|
nested-lvol) nested_lvol=true ;;
|
||||||
distribute-cores) distribute_cores=true ;;
|
distribute-cores) distribute_cores=true ;;
|
||||||
@ -99,17 +93,6 @@ done
|
|||||||
|
|
||||||
vhosttestinit
|
vhosttestinit
|
||||||
|
|
||||||
notice "Get NVMe disks:"
|
|
||||||
nvmes=($(get_nvme_bdfs))
|
|
||||||
|
|
||||||
if [[ -z $max_disks ]]; then
|
|
||||||
max_disks=${#nvmes[@]}
|
|
||||||
fi
|
|
||||||
|
|
||||||
if ((${#nvmes[@]} < max_disks)); then
|
|
||||||
fail "Number of NVMe drives (${#nvmes[@]}) is lower than number of requested disks for test ($max_disks)"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if $distribute_cores; then
|
if $distribute_cores; then
|
||||||
# FIXME: this need to be handled entirely in common.sh
|
# FIXME: this need to be handled entirely in common.sh
|
||||||
source $testdir/autotest.config
|
source $testdir/autotest.config
|
||||||
@ -125,21 +108,16 @@ notice "..."
|
|||||||
|
|
||||||
trap 'clean_lvol_cfg; error_exit "${FUNCNAME}" "${LINENO}"' SIGTERM SIGABRT ERR
|
trap 'clean_lvol_cfg; error_exit "${FUNCNAME}" "${LINENO}"' SIGTERM SIGABRT ERR
|
||||||
|
|
||||||
lvol_stores=()
|
|
||||||
lvol_bdevs=()
|
lvol_bdevs=()
|
||||||
nest_lvol_stores=()
|
nest_lvol_stores=()
|
||||||
nest_lvol_bdevs=()
|
nest_lvol_bdevs=()
|
||||||
used_vms=""
|
used_vms=""
|
||||||
|
|
||||||
# On each NVMe create one lvol store
|
id=0
|
||||||
for ((i = 0; i < max_disks; i++)); do
|
# Create base lvol store on NVMe
|
||||||
|
notice "Creating lvol store on device Nvme${id}n1"
|
||||||
# Create base lvol store on NVMe
|
ls_guid=$($rpc_py bdev_lvol_create_lvstore Nvme0n1 lvs_$id -c 4194304)
|
||||||
notice "Creating lvol store on device Nvme${i}n1"
|
if $nested_lvol; then
|
||||||
ls_guid=$($rpc_py bdev_lvol_create_lvstore Nvme${i}n1 lvs_$i -c 4194304)
|
|
||||||
lvol_stores+=("$ls_guid")
|
|
||||||
|
|
||||||
if $nested_lvol; then
|
|
||||||
free_mb=$(get_lvs_free_mb "$ls_guid")
|
free_mb=$(get_lvs_free_mb "$ls_guid")
|
||||||
size=$((free_mb / (vm_count + 1)))
|
size=$((free_mb / (vm_count + 1)))
|
||||||
|
|
||||||
@ -147,26 +125,25 @@ for ((i = 0; i < max_disks; i++)); do
|
|||||||
lb_name=$($rpc_py bdev_lvol_create -u $ls_guid lbd_nest $size $thin)
|
lb_name=$($rpc_py bdev_lvol_create -u $ls_guid lbd_nest $size $thin)
|
||||||
|
|
||||||
notice "Creating nested lvol store on lvol bdev: $lb_name"
|
notice "Creating nested lvol store on lvol bdev: $lb_name"
|
||||||
nest_ls_guid=$($rpc_py bdev_lvol_create_lvstore $lb_name lvs_n_$i -c 4194304)
|
nest_ls_guid=$($rpc_py bdev_lvol_create_lvstore $lb_name lvs_n_$id -c 4194304)
|
||||||
nest_lvol_stores+=("$nest_ls_guid")
|
nest_lvol_stores+=("$nest_ls_guid")
|
||||||
|
|
||||||
for ((j = 0; j < vm_count; j++)); do
|
for ((j = 0; j < vm_count; j++)); do
|
||||||
notice "Creating nested lvol bdev for VM $i on lvol store $nest_ls_guid"
|
notice "Creating nested lvol bdev for VM $id on lvol store $nest_ls_guid"
|
||||||
free_mb=$(get_lvs_free_mb "$nest_ls_guid")
|
free_mb=$(get_lvs_free_mb "$nest_ls_guid")
|
||||||
nest_size=$((free_mb / (vm_count - j)))
|
nest_size=$((free_mb / (vm_count - j)))
|
||||||
lb_name=$($rpc_py bdev_lvol_create -u $nest_ls_guid lbd_vm_$j $nest_size $thin)
|
lb_name=$($rpc_py bdev_lvol_create -u $nest_ls_guid lbd_vm_$j $nest_size $thin)
|
||||||
nest_lvol_bdevs+=("$lb_name")
|
nest_lvol_bdevs+=("$lb_name")
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Create base lvol bdevs
|
# Create base lvol bdevs
|
||||||
for ((j = 0; j < vm_count; j++)); do
|
for ((j = 0; j < vm_count; j++)); do
|
||||||
notice "Creating lvol bdev for VM $i on lvol store $ls_guid"
|
notice "Creating lvol bdev for VM $id on lvol store $ls_guid"
|
||||||
free_mb=$(get_lvs_free_mb "$ls_guid")
|
free_mb=$(get_lvs_free_mb "$ls_guid")
|
||||||
size=$((free_mb / (vm_count - j)))
|
size=$((free_mb / (vm_count - j)))
|
||||||
lb_name=$($rpc_py bdev_lvol_create -u $ls_guid lbd_vm_$j $size $thin)
|
lb_name=$($rpc_py bdev_lvol_create -u $ls_guid lbd_vm_$j $size $thin)
|
||||||
lvol_bdevs+=("$lb_name")
|
lvol_bdevs+=("$lb_name")
|
||||||
done
|
|
||||||
done
|
done
|
||||||
|
|
||||||
bdev_info=$($rpc_py bdev_get_bdevs)
|
bdev_info=$($rpc_py bdev_get_bdevs)
|
||||||
|
@ -44,8 +44,6 @@ if [[ ! -r "${VM_IMAGE}" ]]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
DISKS_NUMBER=$(lspci -mm -n | grep 0108 | tr -d '"' | awk -F " " '{print "0000:"$1}' | wc -l)
|
|
||||||
|
|
||||||
WORKDIR=$(readlink -f $(dirname $0))
|
WORKDIR=$(readlink -f $(dirname $0))
|
||||||
|
|
||||||
case $1 in
|
case $1 in
|
||||||
|
@ -23,8 +23,6 @@ if [[ ! -r "${VM_IMAGE}" ]]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
DISKS_NUMBER=$(lspci -mm -n | grep 0108 | tr -d '"' | awk -F " " '{print "0000:"$1}' | wc -l)
|
|
||||||
|
|
||||||
WORKDIR=$(readlink -f $(dirname $0))
|
WORKDIR=$(readlink -f $(dirname $0))
|
||||||
|
|
||||||
run_test "vhost_negative" $WORKDIR/other/negative.sh
|
run_test "vhost_negative" $WORKDIR/other/negative.sh
|
||||||
@ -50,31 +48,13 @@ if [ $RUN_NIGHTLY -eq 1 ]; then
|
|||||||
echo 'Running filesystem integrity suite with BLK...'
|
echo 'Running filesystem integrity suite with BLK...'
|
||||||
run_test "vhost_blk_fs_integrity" $WORKDIR/integrity/integrity_start.sh --ctrl-type=spdk_vhost_blk --fs="xfs ntfs btrfs ext4"
|
run_test "vhost_blk_fs_integrity" $WORKDIR/integrity/integrity_start.sh --ctrl-type=spdk_vhost_blk --fs="xfs ntfs btrfs ext4"
|
||||||
|
|
||||||
if [[ $DISKS_NUMBER -ge 2 ]]; then
|
echo 'Running lvol integrity nightly suite with multiple cores and two vhost controllers (vhost_scsi)'
|
||||||
echo 'Running lvol integrity nightly suite with two cores and two controllers'
|
run_test "vhost_scsi_cores_2ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
|
||||||
run_test "vhost_scsi_2core_2ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
|
--ctrl-type=spdk_vhost_scsi --distribute-cores --vm-count=2
|
||||||
--ctrl-type=spdk_vhost_scsi --max-disks=2 --distribute-cores --vm-count=2
|
|
||||||
|
|
||||||
echo 'Running lvol integrity nightly suite with one core and two controllers'
|
echo 'Running lvol integrity nightly suite with multiple cores and two vhost controllers (vhost_blk)'
|
||||||
run_test "vhost_scsi_1core_2ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
|
run_test "vhost_blk_cores_2ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
|
||||||
--ctrl-type=spdk_vhost_scsi --max-disks=2 --vm-count=2
|
--ctrl-type=spdk_vhost_blk --distribute-cores --vm-count=2
|
||||||
fi
|
|
||||||
echo 'Running lvol integrity nightly suite with one core and one controller'
|
|
||||||
run_test "vhost_scsi_1core_1ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
|
|
||||||
--ctrl-type=spdk_vhost_scsi --max-disks=1
|
|
||||||
|
|
||||||
if [[ $DISKS_NUMBER -ge 2 ]]; then
|
|
||||||
echo 'Running lvol integrity nightly suite with two cores and two controllers'
|
|
||||||
run_test "vhost_blk_2core_2ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
|
|
||||||
--ctrl-type=spdk_vhost_blk --max-disks=2 --distribute-cores --vm-count=2
|
|
||||||
|
|
||||||
echo 'Running lvol integrity nightly suite with one core and two controllers'
|
|
||||||
run_test "vhost_blk_1core_2ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
|
|
||||||
--ctrl-type=spdk_vhost_blk --max-disks=2 --vm-count=2
|
|
||||||
fi
|
|
||||||
echo 'Running lvol integrity nightly suite with one core and one controller'
|
|
||||||
run_test "vhost_lvol_integrity_1core_1ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
|
|
||||||
--ctrl-type=spdk_vhost_blk --max-disks=1
|
|
||||||
|
|
||||||
echo 'Running readonly tests suite...'
|
echo 'Running readonly tests suite...'
|
||||||
run_test "vhost_readonly" $WORKDIR/readonly/readonly.sh --vm_image=$VM_IMAGE --disk=Nvme0n1 -x
|
run_test "vhost_readonly" $WORKDIR/readonly/readonly.sh --vm_image=$VM_IMAGE --disk=Nvme0n1 -x
|
||||||
|
Loading…
Reference in New Issue
Block a user