test: refactor the run_test function to add detailed information

1.Refactor the run_test function which used to add detailed information
  during run test suites and test cases.
2.Refactor the lvol feature test scripts to make sure their log is the same.
3.Users can use "run_test suite command" to run test suites
  and use "run_test case command" to run test cases.
4.Update the vhost and lvol test as example.

Change-Id: I7b6387019a861bd1c4f89b9a7712e53150aea8fa
Signed-off-by: Chen Wang <chenx.wang@intel.com>
Reviewed-on: https://review.gerrithub.io/403610
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Chen Wang 2018-03-13 13:57:41 +08:00 committed by Ben Walker
parent 57a5c6020b
commit 1f0bff73df
7 changed files with 101 additions and 104 deletions

View File

@ -85,7 +85,7 @@ timing_exit nvmf_setup
if [ $SPDK_TEST_UNITTEST -eq 1 ]; then if [ $SPDK_TEST_UNITTEST -eq 1 ]; then
timing_enter unittest timing_enter unittest
run_test ./test/unit/unittest.sh run_test suite ./test/unit/unittest.sh
report_test_completion "unittest" report_test_completion "unittest"
timing_exit unittest timing_exit unittest
fi fi
@ -93,109 +93,109 @@ fi
timing_enter lib timing_enter lib
if [ $SPDK_TEST_BLOCKDEV -eq 1 ]; then if [ $SPDK_TEST_BLOCKDEV -eq 1 ]; then
run_test test/bdev/blockdev.sh run_test suite test/bdev/blockdev.sh
if [ $(uname -s) = Linux ]; then if [ $(uname -s) = Linux ]; then
run_test test/bdev/bdevjson/json_config.sh run_test suite test/bdev/bdevjson/json_config.sh
if modprobe -n nbd; then if modprobe -n nbd; then
run_test test/bdev/nbdjson/json_config.sh run_test suite test/bdev/nbdjson/json_config.sh
fi fi
fi fi
fi fi
if [ $SPDK_TEST_EVENT -eq 1 ]; then if [ $SPDK_TEST_EVENT -eq 1 ]; then
run_test test/event/event.sh run_test suite test/event/event.sh
fi fi
if [ $SPDK_TEST_NVME -eq 1 ]; then if [ $SPDK_TEST_NVME -eq 1 ]; then
run_test test/nvme/nvme.sh run_test suite test/nvme/nvme.sh
if [ $SPDK_TEST_NVME_CLI -eq 1 ]; then if [ $SPDK_TEST_NVME_CLI -eq 1 ]; then
run_test test/nvme/spdk_nvme_cli.sh run_test suite test/nvme/spdk_nvme_cli.sh
fi fi
# Only test hotplug without ASAN enabled. Since if it is # Only test hotplug without ASAN enabled. Since if it is
# enabled, it catches SEGV earlier than our handler which # enabled, it catches SEGV earlier than our handler which
# breaks the hotplug logic # breaks the hotplug logic
if [ $SPDK_RUN_ASAN -eq 0 ]; then if [ $SPDK_RUN_ASAN -eq 0 ]; then
run_test test/nvme/hotplug.sh intel run_test suite test/nvme/hotplug.sh intel
fi fi
fi fi
run_test test/env/env.sh run_test suite test/env/env.sh
if [ $SPDK_TEST_IOAT -eq 1 ]; then if [ $SPDK_TEST_IOAT -eq 1 ]; then
run_test test/ioat/ioat.sh run_test suite test/ioat/ioat.sh
fi fi
timing_exit lib timing_exit lib
if [ $SPDK_TEST_ISCSI -eq 1 ]; then if [ $SPDK_TEST_ISCSI -eq 1 ]; then
run_test ./test/iscsi_tgt/iscsi_tgt.sh posix run_test suite ./test/iscsi_tgt/iscsi_tgt.sh posix
run_test ./test/iscsi_tgt/iscsijson/json_config.sh run_test suite ./test/iscsi_tgt/iscsijson/json_config.sh
fi fi
if [ $SPDK_TEST_BLOBFS -eq 1 ]; then if [ $SPDK_TEST_BLOBFS -eq 1 ]; then
run_test ./test/blobfs/rocksdb/rocksdb.sh run_test suite ./test/blobfs/rocksdb/rocksdb.sh
run_test ./test/blobstore/blobstore.sh run_test suite ./test/blobstore/blobstore.sh
fi fi
if [ $SPDK_TEST_NVMF -eq 1 ]; then if [ $SPDK_TEST_NVMF -eq 1 ]; then
run_test ./test/nvmf/nvmf.sh run_test suite ./test/nvmf/nvmf.sh
run_test ./test/nvmf/nvmfjson/json_config.sh run_test suite ./test/nvmf/nvmfjson/json_config.sh
fi fi
if [ $SPDK_TEST_VHOST -eq 1 ]; then if [ $SPDK_TEST_VHOST -eq 1 ]; then
timing_enter vhost timing_enter vhost
timing_enter negative timing_enter negative
run_test ./test/vhost/spdk_vhost.sh --negative run_test suite ./test/vhost/spdk_vhost.sh --negative
timing_exit negative timing_exit negative
timing_enter vhost_json_config timing_enter vhost_json_config
run_test ./test/vhost/json_config/json_config.sh run_test suite ./test/vhost/json_config/json_config.sh
timing_exit vhost_json_config timing_exit vhost_json_config
if [ $RUN_NIGHTLY -eq 1 ]; then if [ $RUN_NIGHTLY -eq 1 ]; then
timing_enter integrity_blk timing_enter integrity_blk
run_test ./test/vhost/spdk_vhost.sh --integrity-blk run_test suite ./test/vhost/spdk_vhost.sh --integrity-blk
timing_exit integrity_blk timing_exit integrity_blk
timing_enter integrity timing_enter integrity
run_test ./test/vhost/spdk_vhost.sh --integrity run_test suite ./test/vhost/spdk_vhost.sh --integrity
timing_exit integrity timing_exit integrity
timing_enter fs_integrity_scsi timing_enter fs_integrity_scsi
run_test ./test/vhost/spdk_vhost.sh --fs-integrity-scsi run_test suite ./test/vhost/spdk_vhost.sh --fs-integrity-scsi
timing_exit fs_integrity_scsi timing_exit fs_integrity_scsi
timing_enter fs_integrity_blk timing_enter fs_integrity_blk
run_test ./test/vhost/spdk_vhost.sh --fs-integrity-blk run_test suite ./test/vhost/spdk_vhost.sh --fs-integrity-blk
timing_exit fs_integrity_blk timing_exit fs_integrity_blk
timing_enter integrity_lvol_scsi_nightly timing_enter integrity_lvol_scsi_nightly
run_test ./test/vhost/spdk_vhost.sh --integrity-lvol-scsi-nightly run_test suite ./test/vhost/spdk_vhost.sh --integrity-lvol-scsi-nightly
timing_exit integrity_lvol_scsi_nightly timing_exit integrity_lvol_scsi_nightly
timing_enter integrity_lvol_blk_nightly timing_enter integrity_lvol_blk_nightly
run_test ./test/vhost/spdk_vhost.sh --integrity-lvol-blk-nightly run_test suite ./test/vhost/spdk_vhost.sh --integrity-lvol-blk-nightly
timing_exit integrity_lvol_blk_nightly timing_exit integrity_lvol_blk_nightly
timing_enter vhost_migration timing_enter vhost_migration
run_test ./test/vhost/spdk_vhost.sh --migration run_test suite ./test/vhost/spdk_vhost.sh --migration
timing_exit vhost_migration timing_exit vhost_migration
# timing_enter readonly # timing_enter readonly
# run_test ./test/vhost/spdk_vhost.sh --readonly # run_test suite ./test/vhost/spdk_vhost.sh --readonly
# timing_exit readonly # timing_exit readonly
fi fi
timing_enter integrity_lvol_scsi timing_enter integrity_lvol_scsi
run_test ./test/vhost/spdk_vhost.sh --integrity-lvol-scsi run_test suite ./test/vhost/spdk_vhost.sh --integrity-lvol-scsi
timing_exit integrity_lvol_scsi timing_exit integrity_lvol_scsi
timing_enter integrity_lvol_blk timing_enter integrity_lvol_blk
run_test ./test/vhost/spdk_vhost.sh --integrity-lvol-blk run_test suite ./test/vhost/spdk_vhost.sh --integrity-lvol-blk
timing_exit integrity_lvol_blk timing_exit integrity_lvol_blk
timing_enter spdk_cli timing_enter spdk_cli
run_test ./test/spdkcli/vhost.sh run_test suite ./test/spdkcli/vhost.sh
timing_exit spdk_cli timing_exit spdk_cli
timing_exit vhost timing_exit vhost
@ -208,27 +208,27 @@ if [ $SPDK_TEST_LVOL -eq 1 ]; then
test_cases+="600,601,650,651,652,654,655," test_cases+="600,601,650,651,652,654,655,"
test_cases+="700,701,702,750,751,752,753,754,755,756,757,758,759," test_cases+="700,701,702,750,751,752,753,754,755,756,757,758,759,"
test_cases+="800,801,802,803,804,10000" test_cases+="800,801,802,803,804,10000"
run_test ./test/lvol/lvol.sh --test-cases=$test_cases run_test suite ./test/lvol/lvol.sh --test-cases=$test_cases
report_test_completion "lvol" report_test_completion "lvol"
timing_exit lvol timing_exit lvol
fi fi
if [ $SPDK_TEST_VHOST_INIT -eq 1 ]; then if [ $SPDK_TEST_VHOST_INIT -eq 1 ]; then
run_test ./test/vhost/initiator/blockdev.sh run_test suite ./test/vhost/initiator/blockdev.sh
run_test ./test/vhost/initiator/json_config.sh run_test suite ./test/vhost/initiator/json_config.sh
run_test ./test/spdkcli/virtio.sh run_test suite ./test/spdkcli/virtio.sh
report_test_completion "vhost_initiator" report_test_completion "vhost_initiator"
fi fi
if [ $SPDK_TEST_PMDK -eq 1 ]; then if [ $SPDK_TEST_PMDK -eq 1 ]; then
run_test ./test/pmem/pmem.sh -x run_test suite ./test/pmem/pmem.sh -x
run_test ./test/pmem/json_config/json_config.sh run_test suite ./test/pmem/json_config/json_config.sh
run_test ./test/spdkcli/pmem.sh run_test suite ./test/spdkcli/pmem.sh
fi fi
if [ $SPDK_TEST_RBD -eq 1 ]; then if [ $SPDK_TEST_RBD -eq 1 ]; then
run_test ./test/bdev/bdevjson/rbd_json_config.sh run_test suite ./test/bdev/bdevjson/rbd_json_config.sh
run_test ./test/spdkcli/rbd.sh run_test suite ./test/spdkcli/rbd.sh
fi fi
timing_enter cleanup timing_enter cleanup

View File

@ -424,14 +424,16 @@ function kill_stub() {
function run_test() { function run_test() {
set +x set +x
local test_type="$(echo $1 | tr 'a-z' 'A-Z')"
shift
echo "************************************" echo "************************************"
echo "START TEST $1" echo "START TEST $test_type $@"
echo "************************************" echo "************************************"
set -x set -x
time "$@" time "$@"
set +x set +x
echo "************************************" echo "************************************"
echo "END TEST $1" echo "END TEST $test_type $@"
echo "************************************" echo "************************************"
set -x set -x
} }

View File

@ -32,24 +32,24 @@ create_veth_interfaces $TEST_TYPE
start_stub "-s 2048 -i 0 -m $ISCSI_TEST_CORE_MASK" start_stub "-s 2048 -i 0 -m $ISCSI_TEST_CORE_MASK"
trap "kill_stub; cleanup_veth_interfaces $TEST_TYPE; exit 1" SIGINT SIGTERM EXIT trap "kill_stub; cleanup_veth_interfaces $TEST_TYPE; exit 1" SIGINT SIGTERM EXIT
run_test ./test/iscsi_tgt/calsoft/calsoft.sh run_test suite ./test/iscsi_tgt/calsoft/calsoft.sh
run_test ./test/iscsi_tgt/filesystem/filesystem.sh run_test suite ./test/iscsi_tgt/filesystem/filesystem.sh
run_test ./test/iscsi_tgt/reset/reset.sh run_test suite ./test/iscsi_tgt/reset/reset.sh
run_test ./test/iscsi_tgt/rpc_config/rpc_config.sh $TEST_TYPE run_test suite ./test/iscsi_tgt/rpc_config/rpc_config.sh $TEST_TYPE
run_test ./test/iscsi_tgt/lvol/iscsi_lvol.sh run_test suite ./test/iscsi_tgt/lvol/iscsi_lvol.sh
run_test ./test/iscsi_tgt/fio/fio.sh run_test suite ./test/iscsi_tgt/fio/fio.sh
run_test ./test/iscsi_tgt/qos/qos.sh run_test suite ./test/iscsi_tgt/qos/qos.sh
if [ $RUN_NIGHTLY -eq 1 ]; then if [ $RUN_NIGHTLY -eq 1 ]; then
if [ $SPDK_TEST_PMDK -eq 1 ]; then if [ $SPDK_TEST_PMDK -eq 1 ]; then
run_test ./test/iscsi_tgt/pmem/iscsi_pmem.sh 4096 10 run_test suite ./test/iscsi_tgt/pmem/iscsi_pmem.sh 4096 10
fi fi
run_test ./test/iscsi_tgt/ip_migration/ip_migration.sh run_test suite ./test/iscsi_tgt/ip_migration/ip_migration.sh
run_test ./test/iscsi_tgt/ext4test/ext4test.sh run_test suite ./test/iscsi_tgt/ext4test/ext4test.sh
run_test ./test/iscsi_tgt/digests/digests.sh run_test suite ./test/iscsi_tgt/digests/digests.sh
fi fi
if [ $SPDK_TEST_RBD -eq 1 ]; then if [ $SPDK_TEST_RBD -eq 1 ]; then
run_test ./test/iscsi_tgt/rbd/rbd.sh run_test suite ./test/iscsi_tgt/rbd/rbd.sh
fi fi
trap "cleanup_veth_interfaces $TEST_TYPE; exit 1" SIGINT SIGTERM EXIT trap "cleanup_veth_interfaces $TEST_TYPE; exit 1" SIGINT SIGTERM EXIT
@ -59,15 +59,15 @@ if [ $SPDK_TEST_NVMF -eq 1 ]; then
# TODO: enable remote NVMe controllers with multi-process so that # TODO: enable remote NVMe controllers with multi-process so that
# we can use the stub for this test # we can use the stub for this test
# Test configure remote NVMe device from rpc and conf file # Test configure remote NVMe device from rpc and conf file
run_test ./test/iscsi_tgt/nvme_remote/fio_remote_nvme.sh run_test suite ./test/iscsi_tgt/nvme_remote/fio_remote_nvme.sh
fi fi
if [ $RUN_NIGHTLY -eq 1 ]; then if [ $RUN_NIGHTLY -eq 1 ]; then
run_test ./test/iscsi_tgt/multiconnection/multiconnection.sh run_test suite ./test/iscsi_tgt/multiconnection/multiconnection.sh
fi fi
if [ $SPDK_TEST_ISCSI_INITIATOR -eq 1 ]; then if [ $SPDK_TEST_ISCSI_INITIATOR -eq 1 ]; then
run_test ./test/iscsi_tgt/initiator/initiator.sh run_test suite ./test/iscsi_tgt/initiator/initiator.sh
fi fi
cleanup_veth_interfaces $TEST_TYPE cleanup_veth_interfaces $TEST_TYPE

View File

@ -3,13 +3,6 @@ import sys
from test_cases import * from test_cases import *
def check_fail_count(fail_count, num_test):
if not fail_count:
print("Test: {num_test} - PASS".format(num_test=num_test))
else:
print("Test: {num_test} - FAIL".format(num_test=num_test))
if __name__ == "__main__": if __name__ == "__main__":
rpc_py = None rpc_py = None
total_size = None total_size = None
@ -37,7 +30,6 @@ if __name__ == "__main__":
fail_count = 0 fail_count = 0
exec("fail_count += tc.test_case{num_test}" exec("fail_count += tc.test_case{num_test}"
"()".format(num_test=num_test)) "()".format(num_test=num_test))
check_fail_count(fail_count, num_test)
if fail_count: if fail_count:
tc_failed.append(num_test) tc_failed.append(num_test)

View File

@ -149,13 +149,16 @@ def case_message(func):
10000: 'SIGTERM', 10000: 'SIGTERM',
} }
num = int(func.__name__.strip('test_case')[:]) num = int(func.__name__.strip('test_case')[:])
print("========================================================") print("************************************")
print("Test Case {num}: Start".format(num=num)) print("START TEST CASE {name}".format(name=test_name[num]))
print("Test Name: {name}".format(name=test_name[num])) print("************************************")
print("========================================================")
fail_count = func(*args, **kwargs) fail_count = func(*args, **kwargs)
print("Test Case {num}: END\n".format(num=num)) print("************************************")
print("========================================================") if not fail_count:
print("END TEST CASE {name} PASS".format(name=test_name[num]))
else:
print("END TEST CASE {name} FAIL".format(name=test_name[num]))
print("************************************")
return fail_count return fail_count
return inner return inner

View File

@ -23,27 +23,27 @@ trap "kill_stub; exit 1" SIGINT SIGTERM EXIT
export NVMF_APP="./app/nvmf_tgt/nvmf_tgt -i 0" export NVMF_APP="./app/nvmf_tgt/nvmf_tgt -i 0"
run_test test/nvmf/filesystem/filesystem.sh run_test suite test/nvmf/filesystem/filesystem.sh
run_test test/nvmf/discovery/discovery.sh run_test suite test/nvmf/discovery/discovery.sh
if [ $SPDK_TEST_NVME_CLI -eq 1 ]; then if [ $SPDK_TEST_NVME_CLI -eq 1 ]; then
run_test test/nvmf/nvme_cli/nvme_cli.sh run_test suite test/nvmf/nvme_cli/nvme_cli.sh
fi fi
run_test test/nvmf/lvol/nvmf_lvol.sh run_test suite test/nvmf/lvol/nvmf_lvol.sh
run_test test/nvmf/shutdown/shutdown.sh run_test suite test/nvmf/shutdown/shutdown.sh
if [ $RUN_NIGHTLY -eq 1 ]; then if [ $RUN_NIGHTLY -eq 1 ]; then
run_test test/nvmf/multiconnection/multiconnection.sh run_test suite test/nvmf/multiconnection/multiconnection.sh
fi fi
timing_enter host timing_enter host
run_test test/nvmf/host/bdevperf.sh run_test suite test/nvmf/host/bdevperf.sh
run_test test/nvmf/host/identify.sh run_test suite test/nvmf/host/identify.sh
run_test test/nvmf/host/perf.sh run_test suite test/nvmf/host/perf.sh
# TODO: disabled due to intermittent failures (RDMA_CM_EVENT_UNREACHABLE/ETIMEDOUT) # TODO: disabled due to intermittent failures (RDMA_CM_EVENT_UNREACHABLE/ETIMEDOUT)
#run_test test/nvmf/host/identify_kernel_nvmf.sh #run_test test/nvmf/host/identify_kernel_nvmf.sh
run_test test/nvmf/host/aer.sh run_test suite test/nvmf/host/aer.sh
run_test test/nvmf/host/fio.sh run_test suite test/nvmf/host/fio.sh
timing_exit host timing_exit host
trap - SIGINT SIGTERM EXIT trap - SIGINT SIGTERM EXIT
@ -51,8 +51,8 @@ kill_stub
# TODO: enable nvme device detachment for multi-process so that # TODO: enable nvme device detachment for multi-process so that
# we can use the stub for this test # we can use the stub for this test
run_test test/nvmf/rpc/rpc.sh run_test suite test/nvmf/rpc/rpc.sh
run_test test/nvmf/fio/fio.sh run_test suite test/nvmf/fio/fio.sh
revert_soft_roce revert_soft_roce
report_test_completion "nvmf" report_test_completion "nvmf"

View File

@ -63,12 +63,12 @@ WORKDIR=$(readlink -f $(dirname $0))
case $1 in case $1 in
-n|--negative) -n|--negative)
echo 'Negative tests suite...' echo 'Negative tests suite...'
$WORKDIR/other/negative.sh run_test case $WORKDIR/other/negative.sh
report_test_completion "vhost_negative" report_test_completion "vhost_negative"
;; ;;
-p|--performance) -p|--performance)
echo 'Running performance suite...' echo 'Running performance suite...'
$WORKDIR/fiotest/autotest.sh --fio-bin=$FIO_BIN \ run_test case $WORKDIR/fiotest/autotest.sh --fio-bin=$FIO_BIN \
--vm=0,$VM_IMAGE,Nvme0n1p0 \ --vm=0,$VM_IMAGE,Nvme0n1p0 \
--test-type=spdk_vhost_scsi \ --test-type=spdk_vhost_scsi \
--fio-job=$WORKDIR/common/fio_jobs/default_performance.job --fio-job=$WORKDIR/common/fio_jobs/default_performance.job
@ -76,7 +76,7 @@ case $1 in
;; ;;
-pb|--performance-blk) -pb|--performance-blk)
echo 'Running blk performance suite...' echo 'Running blk performance suite...'
$WORKDIR/fiotest/autotest.sh --fio-bin=$FIO_BIN \ run_test case $WORKDIR/fiotest/autotest.sh --fio-bin=$FIO_BIN \
--vm=0,$VM_IMAGE,Nvme0n1p0 \ --vm=0,$VM_IMAGE,Nvme0n1p0 \
--test-type=spdk_vhost_blk \ --test-type=spdk_vhost_blk \
--fio-job=$WORKDIR/common/fio_jobs/default_performance.job --fio-job=$WORKDIR/common/fio_jobs/default_performance.job
@ -84,12 +84,12 @@ case $1 in
;; ;;
-m|--migration) -m|--migration)
echo 'Running migration suite...' echo 'Running migration suite...'
$WORKDIR/migration/migration.sh -x \ run_test case $WORKDIR/migration/migration.sh -x \
--fio-bin=$FIO_BIN --os=$VM_IMAGE --test-cases=1,2 --fio-bin=$FIO_BIN --os=$VM_IMAGE --test-cases=1,2
;; ;;
-i|--integrity) -i|--integrity)
echo 'Running SCSI integrity suite...' echo 'Running SCSI integrity suite...'
$WORKDIR/fiotest/autotest.sh -x --fio-bin=$FIO_BIN \ run_test case $WORKDIR/fiotest/autotest.sh -x --fio-bin=$FIO_BIN \
--vm=0,$VM_IMAGE,Nvme0n1p0:Nvme0n1p1:Nvme0n1p2:Nvme0n1p3 \ --vm=0,$VM_IMAGE,Nvme0n1p0:Nvme0n1p1:Nvme0n1p2:Nvme0n1p3 \
--test-type=spdk_vhost_scsi \ --test-type=spdk_vhost_scsi \
--fio-job=$WORKDIR/common/fio_jobs/default_integrity.job --fio-job=$WORKDIR/common/fio_jobs/default_integrity.job
@ -97,7 +97,7 @@ case $1 in
;; ;;
-ib|--integrity-blk) -ib|--integrity-blk)
echo 'Running blk integrity suite...' echo 'Running blk integrity suite...'
$WORKDIR/fiotest/autotest.sh -x --fio-bin=$FIO_BIN \ run_test case $WORKDIR/fiotest/autotest.sh -x --fio-bin=$FIO_BIN \
--vm=0,$VM_IMAGE,Nvme0n1p0:Nvme0n1p1:Nvme0n1p2:Nvme0n1p3 \ --vm=0,$VM_IMAGE,Nvme0n1p0:Nvme0n1p1:Nvme0n1p2:Nvme0n1p3 \
--test-type=spdk_vhost_blk \ --test-type=spdk_vhost_blk \
--fio-job=$WORKDIR/common/fio_jobs/default_integrity.job --fio-job=$WORKDIR/common/fio_jobs/default_integrity.job
@ -105,67 +105,67 @@ case $1 in
;; ;;
-fs|--fs-integrity-scsi) -fs|--fs-integrity-scsi)
echo 'Running filesystem integrity suite with SCSI...' echo 'Running filesystem integrity suite with SCSI...'
$WORKDIR/integrity/integrity_start.sh --ctrl-type=spdk_vhost_scsi --fs="xfs ntfs btrfs ext4" run_test case $WORKDIR/integrity/integrity_start.sh --ctrl-type=spdk_vhost_scsi --fs="xfs ntfs btrfs ext4"
report_test_completion "vhost_fs_integrity_scsi" report_test_completion "vhost_fs_integrity_scsi"
;; ;;
-fb|--fs-integrity-blk) -fb|--fs-integrity-blk)
echo 'Running filesystem integrity suite with BLK...' echo 'Running filesystem integrity suite with BLK...'
$WORKDIR/integrity/integrity_start.sh --ctrl-type=spdk_vhost_blk --fs="xfs ntfs btrfs ext4" run_test case $WORKDIR/integrity/integrity_start.sh --ctrl-type=spdk_vhost_blk --fs="xfs ntfs btrfs ext4"
report_test_completion "vhost_fs_integrity_blk" report_test_completion "vhost_fs_integrity_blk"
;; ;;
-ils|--integrity-lvol-scsi) -ils|--integrity-lvol-scsi)
echo 'Running lvol integrity suite...' echo 'Running lvol integrity suite...'
$WORKDIR/lvol/lvol_test.sh -x --fio-bin=$FIO_BIN \ run_test case $WORKDIR/lvol/lvol_test.sh -x --fio-bin=$FIO_BIN \
--ctrl-type=spdk_vhost_scsi --thin-provisioning --ctrl-type=spdk_vhost_scsi --thin-provisioning
report_test_completion "vhost_integrity_lvol_scsi" report_test_completion "vhost_integrity_lvol_scsi"
;; ;;
-ilb|--integrity-lvol-blk) -ilb|--integrity-lvol-blk)
echo 'Running lvol integrity suite...' echo 'Running lvol integrity suite...'
$WORKDIR/lvol/lvol_test.sh -x --fio-bin=$FIO_BIN \ run_test case $WORKDIR/lvol/lvol_test.sh -x --fio-bin=$FIO_BIN \
--ctrl-type=spdk_vhost_blk --ctrl-type=spdk_vhost_blk
report_test_completion "vhost_integrity_lvol_blk" report_test_completion "vhost_integrity_lvol_blk"
;; ;;
-ilsn|--integrity-lvol-scsi-nightly) -ilsn|--integrity-lvol-scsi-nightly)
if [[ $DISKS_NUMBER -ge 2 ]]; then if [[ $DISKS_NUMBER -ge 2 ]]; then
echo 'Running lvol integrity nightly suite with two cores and two controllers' echo 'Running lvol integrity nightly suite with two cores and two controllers'
$WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \ run_test case $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
--ctrl-type=spdk_vhost_scsi --max-disks=2 --distribute-cores --vm-count=2 --ctrl-type=spdk_vhost_scsi --max-disks=2 --distribute-cores --vm-count=2
echo 'Running lvol integrity nightly suite with one core and two controllers' echo 'Running lvol integrity nightly suite with one core and two controllers'
$WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \ run_test case $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
--ctrl-type=spdk_vhost_scsi --max-disks=2 --vm-count=2 --ctrl-type=spdk_vhost_scsi --max-disks=2 --vm-count=2
fi fi
if [[ -e $CENTOS_VM_IMAGE ]]; then if [[ -e $CENTOS_VM_IMAGE ]]; then
echo 'Running lvol integrity nightly suite with different os types' echo 'Running lvol integrity nightly suite with different os types'
$WORKDIR/lvol/lvol_test.sh --fio-bin=$CENTOS_FIO_BIN \ run_test case $WORKDIR/lvol/lvol_test.sh --fio-bin=$CENTOS_FIO_BIN \
--ctrl-type=spdk_vhost_scsi --vm-count=2 --multi-os --ctrl-type=spdk_vhost_scsi --vm-count=2 --multi-os
fi fi
echo 'Running lvol integrity nightly suite with one core and one controller' echo 'Running lvol integrity nightly suite with one core and one controller'
$WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \ run_test case $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
--ctrl-type=spdk_vhost_scsi --max-disks=1 --ctrl-type=spdk_vhost_scsi --max-disks=1
;; ;;
-ilbn|--integrity-lvol-blk-nightly) -ilbn|--integrity-lvol-blk-nightly)
if [[ $DISKS_NUMBER -ge 2 ]]; then if [[ $DISKS_NUMBER -ge 2 ]]; then
echo 'Running lvol integrity nightly suite with two cores and two controllers' echo 'Running lvol integrity nightly suite with two cores and two controllers'
$WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \ run_test case $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
--ctrl-type=spdk_vhost_blk --max-disks=2 --distribute-cores --vm-count=2 --ctrl-type=spdk_vhost_blk --max-disks=2 --distribute-cores --vm-count=2
echo 'Running lvol integrity nightly suite with one core and two controllers' echo 'Running lvol integrity nightly suite with one core and two controllers'
$WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \ run_test case $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
--ctrl-type=spdk_vhost_blk --max-disks=2 --vm-count=2 --ctrl-type=spdk_vhost_blk --max-disks=2 --vm-count=2
fi fi
if [[ -e $CENTOS_VM_IMAGE ]]; then if [[ -e $CENTOS_VM_IMAGE ]]; then
echo 'Running lvol integrity nightly suite with different os types' echo 'Running lvol integrity nightly suite with different os types'
$WORKDIR/lvol/lvol_test.sh --fio-bin=$CENTOS_FIO_BIN \ run_test case $WORKDIR/lvol/lvol_test.sh --fio-bin=$CENTOS_FIO_BIN \
--ctrl-type=spdk_vhost_blk --vm-count=2 --multi-os --ctrl-type=spdk_vhost_blk --vm-count=2 --multi-os
fi fi
echo 'Running lvol integrity nightly suite with one core and one controller' echo 'Running lvol integrity nightly suite with one core and one controller'
$WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \ run_test case $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
--ctrl-type=spdk_vhost_blk --max-disks=1 --ctrl-type=spdk_vhost_blk --max-disks=1
;; ;;
-hp|--hotplug) -hp|--hotplug)
echo 'Running hotplug tests suite...' echo 'Running hotplug tests suite...'
$WORKDIR/hotplug/scsi_hotplug.sh --fio-bin=$FIO_BIN \ run_test case $WORKDIR/hotplug/scsi_hotplug.sh --fio-bin=$FIO_BIN \
--vm=0,$VM_IMAGE,Nvme0n1p0:Nvme0n1p1 \ --vm=0,$VM_IMAGE,Nvme0n1p0:Nvme0n1p1 \
--vm=1,$VM_IMAGE,Nvme0n1p2:Nvme0n1p3 \ --vm=1,$VM_IMAGE,Nvme0n1p2:Nvme0n1p3 \
--vm=2,$VM_IMAGE,Nvme0n1p4:Nvme0n1p5 \ --vm=2,$VM_IMAGE,Nvme0n1p4:Nvme0n1p5 \
@ -176,7 +176,7 @@ case $1 in
;; ;;
-shr|--scsi-hot-remove) -shr|--scsi-hot-remove)
echo 'Running scsi hotremove tests suite...' echo 'Running scsi hotremove tests suite...'
$WORKDIR/hotplug/scsi_hotplug.sh --fio-bin=$FIO_BIN \ run_test case $WORKDIR/hotplug/scsi_hotplug.sh --fio-bin=$FIO_BIN \
--vm=0,$VM_IMAGE,Nvme0n1p0:Nvme0n1p1 \ --vm=0,$VM_IMAGE,Nvme0n1p0:Nvme0n1p1 \
--vm=1,$VM_IMAGE,Nvme0n1p2:Nvme0n1p3 \ --vm=1,$VM_IMAGE,Nvme0n1p2:Nvme0n1p3 \
--test-type=spdk_vhost_scsi \ --test-type=spdk_vhost_scsi \
@ -185,7 +185,7 @@ case $1 in
;; ;;
-bhr|--blk-hot-remove) -bhr|--blk-hot-remove)
echo 'Running blk hotremove tests suite...' echo 'Running blk hotremove tests suite...'
$WORKDIR/hotplug/scsi_hotplug.sh --fio-bin=$FIO_BIN \ run_test case $WORKDIR/hotplug/scsi_hotplug.sh --fio-bin=$FIO_BIN \
--vm=0,$VM_IMAGE,Nvme0n1p0:Nvme0n1p1 \ --vm=0,$VM_IMAGE,Nvme0n1p0:Nvme0n1p1 \
--vm=1,$VM_IMAGE,Nvme0n1p2:Nvme0n1p3 \ --vm=1,$VM_IMAGE,Nvme0n1p2:Nvme0n1p3 \
--test-type=spdk_vhost_blk \ --test-type=spdk_vhost_blk \
@ -194,7 +194,7 @@ case $1 in
;; ;;
-ro|--readonly) -ro|--readonly)
echo 'Running readonly tests suite...' echo 'Running readonly tests suite...'
$WORKDIR/readonly/readonly.sh --vm_image=$VM_IMAGE --disk=Nvme0n1 -x run_test case $WORKDIR/readonly/readonly.sh --vm_image=$VM_IMAGE --disk=Nvme0n1 -x
report_test_completion "vhost_readonly" report_test_completion "vhost_readonly"
;; ;;
*) *)