test: add a test_name param to run_test

This will allow us to use timing_enter and timing_exit directly
inside the run_test function. That function already lends itself well to
nesting the way we do our timing.

This patch series is aimed at combining the timing_*, run_test, and
report_test_completions calls all into a single place. This will greatly
reduce the number of lines of code in our bash scripts devoted to
tracking timing, formatting, and test completion. It will also enable us
to expand on the reporting of test completions. Further down the line,
this will also allow us to unify test case documentation.

Change-Id: I8e1f4bcea86b2c3b88cc6e42339c57dfce4d58f2
Signed-off-by: Seth Howell <seth.howell@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/476799
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Broadcom SPDK FC-NVMe CI <spdk-ci.pdl@broadcom.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Karol Latecki <karol.latecki@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
This commit is contained in:
Seth Howell 2019-12-04 12:39:14 -07:00 committed by Tomasz Zawadzki
parent e6ee199098
commit 3710048472
10 changed files with 117 additions and 116 deletions

View File

@ -145,7 +145,7 @@ fi
if [ $SPDK_TEST_UNITTEST -eq 1 ]; then if [ $SPDK_TEST_UNITTEST -eq 1 ]; then
timing_enter unittest timing_enter unittest
run_test suite ./test/unit/unittest.sh run_test suite "unittest" ./test/unit/unittest.sh
report_test_completion "unittest" report_test_completion "unittest"
timing_exit unittest timing_exit unittest
fi fi
@ -154,121 +154,120 @@ fi
if [ $SPDK_RUN_FUNCTIONAL_TEST -eq 1 ]; then if [ $SPDK_RUN_FUNCTIONAL_TEST -eq 1 ]; then
timing_enter lib timing_enter lib
run_test suite test/env/env.sh run_test suite "env" test/env/env.sh
run_test suite test/rpc_client/rpc_client.sh run_test suite "rpc_client" test/rpc_client/rpc_client.sh
run_test suite ./test/json_config/json_config.sh run_test suite "json_config" ./test/json_config/json_config.sh
run_test suite test/json_config/alias_rpc/alias_rpc.sh run_test suite "alias_rpc" test/json_config/alias_rpc/alias_rpc.sh
run_test suite test/spdkcli/tcp.sh run_test suite "spdkcli_tcp" test/spdkcli/tcp.sh
if [ $SPDK_TEST_BLOCKDEV -eq 1 ]; then if [ $SPDK_TEST_BLOCKDEV -eq 1 ]; then
run_test suite test/bdev/blockdev.sh run_test suite "blockdev" test/bdev/blockdev.sh
run_test suite test/bdev/bdev_raid.sh run_test suite "bdev_raid" test/bdev/bdev_raid.sh
fi fi
if [ $SPDK_TEST_JSON -eq 1 ]; then if [ $SPDK_TEST_JSON -eq 1 ]; then
run_test suite test/config_converter/test_converter.sh run_test suite "test_converter" test/config_converter/test_converter.sh
fi fi
if [ $SPDK_TEST_EVENT -eq 1 ]; then if [ $SPDK_TEST_EVENT -eq 1 ]; then
run_test suite test/event/event.sh run_test suite "event" test/event/event.sh
fi fi
if [ $SPDK_TEST_NVME -eq 1 ]; then if [ $SPDK_TEST_NVME -eq 1 ]; then
run_test suite test/nvme/nvme.sh run_test suite "nvme" test/nvme/nvme.sh
if [[ $SPDK_TEST_NVME_CLI -eq 1 ]]; then if [[ $SPDK_TEST_NVME_CLI -eq 1 ]]; then
run_test suite test/nvme/spdk_nvme_cli.sh run_test suite "nvme_cli" test/nvme/spdk_nvme_cli.sh
fi fi
if [[ $SPDK_TEST_NVME_CUSE -eq 1 ]]; then if [[ $SPDK_TEST_NVME_CUSE -eq 1 ]]; then
run_test suite test/nvme/spdk_nvme_cli_cuse.sh run_test suite "nvme_cli_cuse" test/nvme/spdk_nvme_cli_cuse.sh
fi fi
# Only test hotplug without ASAN enabled. Since if it is # Only test hotplug without ASAN enabled. Since if it is
# enabled, it catches SEGV earlier than our handler which # enabled, it catches SEGV earlier than our handler which
# breaks the hotplug logic. # breaks the hotplug logic.
if [ $SPDK_RUN_ASAN -eq 0 ]; then if [ $SPDK_RUN_ASAN -eq 0 ]; then
run_test suite test/nvme/hotplug.sh intel run_test suite "nvme_hotplug" test/nvme/hotplug.sh intel
fi fi
fi fi
if [ $SPDK_TEST_IOAT -eq 1 ]; then if [ $SPDK_TEST_IOAT -eq 1 ]; then
run_test suite test/ioat/ioat.sh run_test suite "ioat" test/ioat/ioat.sh
fi fi
timing_exit lib timing_exit lib
if [ $SPDK_TEST_ISCSI -eq 1 ]; then if [ $SPDK_TEST_ISCSI -eq 1 ]; then
run_test suite ./test/iscsi_tgt/iscsi_tgt.sh posix run_test suite "iscsi_tgt_posix" ./test/iscsi_tgt/iscsi_tgt.sh posix
run_test suite ./test/spdkcli/iscsi.sh run_test suite "spdkcli_iscsi" ./test/spdkcli/iscsi.sh
# Run raid spdkcli test under iSCSI since blockdev tests run on systems that can't run spdkcli yet # Run raid spdkcli test under iSCSI since blockdev tests run on systems that can't run spdkcli yet
run_test suite test/spdkcli/raid.sh run_test suite "spdkcli_raid" test/spdkcli/raid.sh
fi fi
if [ $SPDK_TEST_VPP -eq 1 ]; then if [ $SPDK_TEST_VPP -eq 1 ]; then
run_test suite ./test/iscsi_tgt/iscsi_tgt.sh vpp run_test suite "iscsi_tgt_vpp" ./test/iscsi_tgt/iscsi_tgt.sh vpp
fi fi
if [ $SPDK_TEST_BLOBFS -eq 1 ]; then if [ $SPDK_TEST_BLOBFS -eq 1 ]; then
run_test suite ./test/blobfs/rocksdb/rocksdb.sh run_test suite "rocksdb" ./test/blobfs/rocksdb/rocksdb.sh
run_test suite ./test/blobstore/blobstore.sh run_test suite "blobstore" ./test/blobstore/blobstore.sh
run_test suite ./test/blobfs/blobfs.sh run_test suite "blobfs" ./test/blobfs/blobfs.sh
fi fi
if [ $SPDK_TEST_NVMF -eq 1 ]; then if [ $SPDK_TEST_NVMF -eq 1 ]; then
run_test suite ./test/nvmf/nvmf.sh --transport=$SPDK_TEST_NVMF_TRANSPORT run_test suite "nvmf" ./test/nvmf/nvmf.sh --transport=$SPDK_TEST_NVMF_TRANSPORT
run_test suite ./test/spdkcli/nvmf.sh run_test suite "spdkcli_nvmf" ./test/spdkcli/nvmf.sh
fi fi
if [ $SPDK_TEST_VHOST -eq 1 ]; then if [ $SPDK_TEST_VHOST -eq 1 ]; then
run_test suite ./test/vhost/vhost.sh run_test suite "vhost" ./test/vhost/vhost.sh
report_test_completion "vhost"
fi fi
if [ $SPDK_TEST_LVOL -eq 1 ]; then if [ $SPDK_TEST_LVOL -eq 1 ]; then
timing_enter lvol timing_enter lvol
run_test suite ./test/lvol/lvol.sh --test-cases=all run_test suite "lvol" ./test/lvol/lvol.sh --test-cases=all
run_test suite ./test/lvol/lvol2.sh run_test suite "lvol2" ./test/lvol/lvol2.sh
run_test suite ./test/blobstore/blob_io_wait/blob_io_wait.sh run_test suite "blob_io_wait" ./test/blobstore/blob_io_wait/blob_io_wait.sh
report_test_completion "lvol" report_test_completion "lvol"
timing_exit lvol timing_exit lvol
fi fi
if [ $SPDK_TEST_VHOST_INIT -eq 1 ]; then if [ $SPDK_TEST_VHOST_INIT -eq 1 ]; then
timing_enter vhost_initiator timing_enter vhost_initiator
run_test suite ./test/vhost/initiator/blockdev.sh run_test suite "vhost_blockdev" ./test/vhost/initiator/blockdev.sh
run_test suite ./test/spdkcli/virtio.sh run_test suite "spdkcli_virtio" ./test/spdkcli/virtio.sh
run_test suite ./test/vhost/shared/shared.sh run_test suite "vhost_shared" ./test/vhost/shared/shared.sh
run_test suite ./test/vhost/fuzz/fuzz.sh run_test suite "vhost_fuzz" ./test/vhost/fuzz/fuzz.sh
report_test_completion "vhost_initiator" report_test_completion "vhost initiator"
timing_exit vhost_initiator timing_exit vhost_initiator
fi fi
if [ $SPDK_TEST_PMDK -eq 1 ]; then if [ $SPDK_TEST_PMDK -eq 1 ]; then
run_test suite ./test/pmem/pmem.sh -x run_test suite "pmem" ./test/pmem/pmem.sh -x
run_test suite ./test/spdkcli/pmem.sh run_test suite "spdkcli_pmem" ./test/spdkcli/pmem.sh
fi fi
if [ $SPDK_TEST_RBD -eq 1 ]; then if [ $SPDK_TEST_RBD -eq 1 ]; then
run_test suite ./test/spdkcli/rbd.sh run_test suite "spdkcli_rbd" ./test/spdkcli/rbd.sh
fi fi
if [ $SPDK_TEST_OCF -eq 1 ]; then if [ $SPDK_TEST_OCF -eq 1 ]; then
run_test suite ./test/ocf/ocf.sh run_test suite "ocf" ./test/ocf/ocf.sh
fi fi
if [ $SPDK_TEST_FTL -eq 1 ]; then if [ $SPDK_TEST_FTL -eq 1 ]; then
run_test suite ./test/ftl/ftl.sh run_test suite "ftl" ./test/ftl/ftl.sh
fi fi
if [ $SPDK_TEST_VMD -eq 1 ]; then if [ $SPDK_TEST_VMD -eq 1 ]; then
run_test suite ./test/vmd/vmd.sh run_test suite "vmd" ./test/vmd/vmd.sh
fi fi
if [ $SPDK_TEST_REDUCE -eq 1 ]; then if [ $SPDK_TEST_REDUCE -eq 1 ]; then
run_test suite ./test/compress/compress.sh run_test suite "compress" ./test/compress/compress.sh
fi fi
if [ $SPDK_TEST_OPAL -eq 1 ]; then if [ $SPDK_TEST_OPAL -eq 1 ]; then
run_test suite ./test/nvme/nvme_opal.sh run_test suite "nvme_opal" ./test/nvme/nvme_opal.sh
fi fi
fi fi

View File

@ -567,6 +567,8 @@ function run_test() {
local test_type local test_type
test_type="$(echo $1 | tr '[:lower:]' '[:upper:]')" test_type="$(echo $1 | tr '[:lower:]' '[:upper:]')"
shift shift
local test_name="$1"
shift
echo "************************************" echo "************************************"
echo "START TEST $test_type $*" echo "START TEST $test_type $*"
echo "************************************" echo "************************************"

View File

@ -48,30 +48,30 @@ fi
timing_enter ftl timing_enter ftl
timing_enter bdevperf timing_enter bdevperf
run_test suite $testdir/bdevperf.sh $device run_test suite "ftl_bdevperf" $testdir/bdevperf.sh $device
timing_exit bdevperf timing_exit bdevperf
timing_enter restore timing_enter restore
run_test suite $testdir/restore.sh $device run_test suite "ftl_restore" $testdir/restore.sh $device
if [ -n "$nv_cache" ]; then if [ -n "$nv_cache" ]; then
run_test suite $testdir/restore.sh -c $nv_cache $device run_test suite "ftl_restore_nv_cache" $testdir/restore.sh -c $nv_cache $device
fi fi
timing_exit restore timing_exit restore
if [ -n "$nv_cache" ]; then if [ -n "$nv_cache" ]; then
timing_enter dirty_shutdown timing_enter dirty_shutdown
run_test suite $testdir/dirty_shutdown.sh -c $nv_cache $device run_test suite "ftl_dirty_shutdown" $testdir/dirty_shutdown.sh -c $nv_cache $device
timing_exit dirty_shutdown timing_exit dirty_shutdown
fi fi
timing_enter json timing_enter json
run_test suite $testdir/json.sh $device run_test suite "ftl_json" $testdir/json.sh $device
timing_exit json timing_exit json
if [ $SPDK_TEST_FTL_EXTENDED -eq 1 ]; then if [ $SPDK_TEST_FTL_EXTENDED -eq 1 ]; then
timing_enter fio_basic timing_enter fio_basic
run_test suite $testdir/fio.sh $device basic run_test suite "ftl_fio_basic" $testdir/fio.sh $device basic
timing_exit fio_basic timing_exit fio_basic
$rootdir/app/spdk_tgt/spdk_tgt & $rootdir/app/spdk_tgt/spdk_tgt &
@ -86,7 +86,7 @@ if [ $SPDK_TEST_FTL_EXTENDED -eq 1 ]; then
trap - SIGINT SIGTERM EXIT trap - SIGINT SIGTERM EXIT
timing_enter fio_extended timing_enter fio_extended
run_test suite $testdir/fio.sh $device extended $uuid run_test suite "ftl_fio_extended" $testdir/fio.sh $device extended $uuid
timing_exit fio_extended timing_exit fio_extended
fi fi

View File

@ -24,39 +24,39 @@ create_veth_interfaces $TEST_TYPE
trap 'cleanup_veth_interfaces $TEST_TYPE; exit 1' SIGINT SIGTERM EXIT trap 'cleanup_veth_interfaces $TEST_TYPE; exit 1' SIGINT SIGTERM EXIT
run_test suite ./test/iscsi_tgt/sock/sock.sh $TEST_TYPE run_test suite "iscsi_tgt_sock" ./test/iscsi_tgt/sock/sock.sh $TEST_TYPE
if [ "$TEST_TYPE" == "posix" ]; then if [ "$TEST_TYPE" == "posix" ]; then
# calsoft doesn't handle TCP stream properly and fails decoding iSCSI # calsoft doesn't handle TCP stream properly and fails decoding iSCSI
# requests when are divided by TCP segmentation. This is very common # requests when are divided by TCP segmentation. This is very common
# situation for VPP and causes that calsoft.sh never PASS. # situation for VPP and causes that calsoft.sh never PASS.
run_test suite ./test/iscsi_tgt/calsoft/calsoft.sh run_test suite "iscsi_tgt_calsoft" ./test/iscsi_tgt/calsoft/calsoft.sh
fi fi
run_test suite ./test/iscsi_tgt/filesystem/filesystem.sh run_test suite "iscsi_tgt_filesystem" ./test/iscsi_tgt/filesystem/filesystem.sh
run_test suite ./test/iscsi_tgt/reset/reset.sh run_test suite "iscsi_tgt_reset" ./test/iscsi_tgt/reset/reset.sh
run_test suite ./test/iscsi_tgt/rpc_config/rpc_config.sh $TEST_TYPE run_test suite "iscsi_tgt_rpc_config" ./test/iscsi_tgt/rpc_config/rpc_config.sh $TEST_TYPE
run_test suite ./test/iscsi_tgt/lvol/iscsi_lvol.sh run_test suite "iscsi_tgt_iscsi_lvol" ./test/iscsi_tgt/lvol/iscsi_lvol.sh
run_test suite ./test/iscsi_tgt/fio/fio.sh run_test suite "iscsi_tgt_fio" ./test/iscsi_tgt/fio/fio.sh
run_test suite ./test/iscsi_tgt/qos/qos.sh run_test suite "iscsi_tgt_qos" ./test/iscsi_tgt/qos/qos.sh
# IP Migration tests do not support network namespaces, # IP Migration tests do not support network namespaces,
# they can only be run on posix sockets. # they can only be run on posix sockets.
if [ "$TEST_TYPE" == "posix" ]; then if [ "$TEST_TYPE" == "posix" ]; then
run_test suite ./test/iscsi_tgt/ip_migration/ip_migration.sh run_test suite "iscsi_tgt_ip_migration" ./test/iscsi_tgt/ip_migration/ip_migration.sh
fi fi
run_test suite ./test/iscsi_tgt/trace_record/trace_record.sh run_test suite "iscsi_tgt_trace_record" ./test/iscsi_tgt/trace_record/trace_record.sh
if [ $RUN_NIGHTLY -eq 1 ]; then if [ $RUN_NIGHTLY -eq 1 ]; then
if [ $SPDK_TEST_PMDK -eq 1 ]; then if [ $SPDK_TEST_PMDK -eq 1 ]; then
run_test suite ./test/iscsi_tgt/pmem/iscsi_pmem.sh 4096 10 run_test suite "iscsi_tgt_pmem" ./test/iscsi_tgt/pmem/iscsi_pmem.sh 4096 10
fi fi
run_test suite ./test/iscsi_tgt/ext4test/ext4test.sh run_test suite "iscsi_tgt_ext4test" ./test/iscsi_tgt/ext4test/ext4test.sh
run_test suite ./test/iscsi_tgt/digests/digests.sh run_test suite "iscsi_tgt_digests" ./test/iscsi_tgt/digests/digests.sh
fi fi
if [ $SPDK_TEST_RBD -eq 1 ]; then if [ $SPDK_TEST_RBD -eq 1 ]; then
# RBD tests do not support network namespaces, # RBD tests do not support network namespaces,
# they can only be run on posix sockets. # they can only be run on posix sockets.
if [ "$TEST_TYPE" == "posix" ]; then if [ "$TEST_TYPE" == "posix" ]; then
run_test suite ./test/iscsi_tgt/rbd/rbd.sh run_test suite "iscsi_tgt_rbd" ./test/iscsi_tgt/rbd/rbd.sh
fi fi
fi fi
@ -67,17 +67,17 @@ if [ $SPDK_TEST_NVMF -eq 1 ]; then
# they can only be run on posix sockets. # they can only be run on posix sockets.
if [ "$TEST_TYPE" == "posix" ]; then if [ "$TEST_TYPE" == "posix" ]; then
# Test configure remote NVMe device from rpc and conf file # Test configure remote NVMe device from rpc and conf file
run_test suite ./test/iscsi_tgt/nvme_remote/fio_remote_nvme.sh run_test suite "iscsi_tgt_fio_remote_nvme" ./test/iscsi_tgt/nvme_remote/fio_remote_nvme.sh
fi fi
fi fi
if [ $RUN_NIGHTLY -eq 1 ]; then if [ $RUN_NIGHTLY -eq 1 ]; then
run_test suite ./test/iscsi_tgt/multiconnection/multiconnection.sh run_test suite "iscsi_tgt_multiconnection" ./test/iscsi_tgt/multiconnection/multiconnection.sh
fi fi
if [ $SPDK_TEST_ISCSI_INITIATOR -eq 1 ]; then if [ $SPDK_TEST_ISCSI_INITIATOR -eq 1 ]; then
run_test suite ./test/iscsi_tgt/initiator/initiator.sh run_test suite "iscsi_tgt_initiator" ./test/iscsi_tgt/initiator/initiator.sh
run_test suite ./test/iscsi_tgt/bdev_io_wait/bdev_io_wait.sh run_test suite "iscsi_tgt_bdev_io_wait" ./test/iscsi_tgt/bdev_io_wait/bdev_io_wait.sh
fi fi
cleanup_veth_interfaces $TEST_TYPE cleanup_veth_interfaces $TEST_TYPE

View File

@ -11,7 +11,7 @@ function rpc_cmd() {
} }
function run_lvol_test() { function run_lvol_test() {
run_test suite "$@" run_test suite "$*" "$@"
leftover_bdevs=$(rpc_cmd bdev_get_bdevs) leftover_bdevs=$(rpc_cmd bdev_get_bdevs)
[ "$(jq length <<< "$leftover_bdevs")" == "0" ] [ "$(jq length <<< "$leftover_bdevs")" == "0" ]

View File

@ -7,7 +7,7 @@ source $rootdir/test/common/autotest_common.sh
timing_enter lvol timing_enter lvol
timing_enter basic timing_enter basic
run_test suite test/lvol/basic.sh run_test suite "lvol_basic" test/lvol/basic.sh
timing_exit basic timing_exit basic
timing_exit lvol timing_exit lvol

View File

@ -15,43 +15,43 @@ trap "exit 1" SIGINT SIGTERM EXIT
TEST_ARGS=( "$@" ) TEST_ARGS=( "$@" )
run_test suite test/nvmf/target/filesystem.sh "${TEST_ARGS[@]}" run_test suite "nvmf_filesystem" test/nvmf/target/filesystem.sh "${TEST_ARGS[@]}"
run_test suite test/nvmf/target/discovery.sh "${TEST_ARGS[@]}" run_test suite "nvmf_discovery" test/nvmf/target/discovery.sh "${TEST_ARGS[@]}"
run_test suite test/nvmf/target/connect_disconnect.sh "${TEST_ARGS[@]}" run_test suite "nvmf_connect_disconnect" test/nvmf/target/connect_disconnect.sh "${TEST_ARGS[@]}"
if [ $SPDK_TEST_NVME_CLI -eq 1 ]; then if [ $SPDK_TEST_NVME_CLI -eq 1 ]; then
run_test suite test/nvmf/target/nvme_cli.sh "${TEST_ARGS[@]}" run_test suite "nvmf_nvme_cli" test/nvmf/target/nvme_cli.sh "${TEST_ARGS[@]}"
fi fi
run_test suite test/nvmf/target/nvmf_lvol.sh "${TEST_ARGS[@]}" run_test suite "nvmf_lvol" test/nvmf/target/nvmf_lvol.sh "${TEST_ARGS[@]}"
#TODO: disabled due to intermittent failures. Need to triage. #TODO: disabled due to intermittent failures. Need to triage.
# run_test suite test/nvmf/target/srq_overwhelm.sh $TEST_ARGS # run_test suite "nvmf_srq_overwhelm" test/nvmf/target/srq_overwhelm.sh $TEST_ARGS
run_test suite test/nvmf/target/nvmf_vhost.sh "${TEST_ARGS[@]}" run_test suite "nvmf_vhost" test/nvmf/target/nvmf_vhost.sh "${TEST_ARGS[@]}"
run_test suite test/nvmf/target/bdev_io_wait.sh "${TEST_ARGS[@]}" run_test suite "nvmf_bdev_io_wait" test/nvmf/target/bdev_io_wait.sh "${TEST_ARGS[@]}"
run_test suite test/nvmf/target/create_transport.sh "${TEST_ARGS[@]}" run_test suite "nvmf_create_transport." test/nvmf/target/create_transport.sh "${TEST_ARGS[@]}"
run_test suite test/nvmf/target/multitarget.sh "${TEST_ARGS[@]}" run_test suite "nvmf_multitarget" test/nvmf/target/multitarget.sh "${TEST_ARGS[@]}"
if [ $RUN_NIGHTLY -eq 1 ]; then if [ $RUN_NIGHTLY -eq 1 ]; then
run_test suite test/nvmf/target/fuzz.sh "${TEST_ARGS[@]}" run_test suite "nvmf_fuzz" test/nvmf/target/fuzz.sh "${TEST_ARGS[@]}"
run_test suite test/nvmf/target/multiconnection.sh "${TEST_ARGS[@]}" run_test suite "nvmf_multiconnection" test/nvmf/target/multiconnection.sh "${TEST_ARGS[@]}"
run_test suite test/nvmf/target/initiator_timeout.sh "${TEST_ARGS[@]}" run_test suite "nvmf_initiator_timeout" test/nvmf/target/initiator_timeout.sh "${TEST_ARGS[@]}"
fi fi
run_test suite test/nvmf/target/nmic.sh "${TEST_ARGS[@]}" run_test suite "nvmf_nmic" test/nvmf/target/nmic.sh "${TEST_ARGS[@]}"
run_test suite test/nvmf/target/rpc.sh "${TEST_ARGS[@]}" run_test suite "nvmf_rpc" test/nvmf/target/rpc.sh "${TEST_ARGS[@]}"
run_test suite test/nvmf/target/fio.sh "${TEST_ARGS[@]}" run_test suite "nvmf_fio" test/nvmf/target/fio.sh "${TEST_ARGS[@]}"
run_test suite test/nvmf/target/shutdown.sh "${TEST_ARGS[@]}" run_test suite "nvmf_shutdown" test/nvmf/target/shutdown.sh "${TEST_ARGS[@]}"
run_test suite test/nvmf/target/bdevio.sh "${TEST_ARGS[@]}" run_test suite "nvmf_bdevio" test/nvmf/target/bdevio.sh "${TEST_ARGS[@]}"
timing_enter host timing_enter host
run_test suite test/nvmf/host/bdevperf.sh "${TEST_ARGS[@]}" run_test suite "nvmf_bdevperf" test/nvmf/host/bdevperf.sh "${TEST_ARGS[@]}"
run_test suite test/nvmf/host/identify.sh "${TEST_ARGS[@]}" run_test suite "nvmf_identify" test/nvmf/host/identify.sh "${TEST_ARGS[@]}"
run_test suite test/nvmf/host/perf.sh "${TEST_ARGS[@]}" run_test suite "nvmf_perf" test/nvmf/host/perf.sh "${TEST_ARGS[@]}"
# TODO: disabled due to intermittent failures (RDMA_CM_EVENT_UNREACHABLE/ETIMEDOUT) # TODO: disabled due to intermittent failures (RDMA_CM_EVENT_UNREACHABLE/ETIMEDOUT)
#run_test test/nvmf/host/identify_kernel_nvmf.sh $TEST_ARGS #run_test test/nvmf/host/identify_kernel_nvmf.sh $TEST_ARGS
run_test suite test/nvmf/host/aer.sh "${TEST_ARGS[@]}" run_test suite "nvmf_aer" test/nvmf/host/aer.sh "${TEST_ARGS[@]}"
run_test suite test/nvmf/host/fio.sh "${TEST_ARGS[@]}" run_test suite "nvmf_fio" test/nvmf/host/fio.sh "${TEST_ARGS[@]}"
run_test suite test/nvmf/host/target_disconnect.sh "${TEST_ARGS[@]}" run_test suite "nvmf_target_disconnect" test/nvmf/host/target_disconnect.sh "${TEST_ARGS[@]}"
timing_exit host timing_exit host

View File

@ -8,7 +8,7 @@ source $rootdir/test/common/autotest_common.sh
function suite() function suite()
{ {
timing_enter $(basename "$@") timing_enter $(basename "$@")
run_test suite "$@" run_test suite "ocf_$(basename "$@")" "$@"
timing_exit $(basename "$@") timing_exit $(basename "$@")
} }

View File

@ -53,7 +53,7 @@ WORKDIR=$(readlink -f $(dirname $0))
case $1 in case $1 in
-hp|--hotplug) -hp|--hotplug)
echo 'Running hotplug tests suite...' echo 'Running hotplug tests suite...'
run_test case $WORKDIR/hotplug/scsi_hotplug.sh --fio-bin=$FIO_BIN \ run_test case "vhost_hotplug" $WORKDIR/hotplug/scsi_hotplug.sh --fio-bin=$FIO_BIN \
--vm=0,$VM_IMAGE,Nvme0n1p0:Nvme0n1p1 \ --vm=0,$VM_IMAGE,Nvme0n1p0:Nvme0n1p1 \
--vm=1,$VM_IMAGE,Nvme0n1p2:Nvme0n1p3 \ --vm=1,$VM_IMAGE,Nvme0n1p2:Nvme0n1p3 \
--vm=2,$VM_IMAGE,Nvme0n1p4:Nvme0n1p5 \ --vm=2,$VM_IMAGE,Nvme0n1p4:Nvme0n1p5 \
@ -64,7 +64,7 @@ case $1 in
;; ;;
-shr|--scsi-hot-remove) -shr|--scsi-hot-remove)
echo 'Running scsi hotremove tests suite...' echo 'Running scsi hotremove tests suite...'
run_test case $WORKDIR/hotplug/scsi_hotplug.sh --fio-bin=$FIO_BIN \ run_test case "vhost_scsi_hot_remove" $WORKDIR/hotplug/scsi_hotplug.sh --fio-bin=$FIO_BIN \
--vm=0,$VM_IMAGE,Nvme0n1p0:Nvme0n1p1 \ --vm=0,$VM_IMAGE,Nvme0n1p0:Nvme0n1p1 \
--vm=1,$VM_IMAGE,Nvme0n1p2:Nvme0n1p3 \ --vm=1,$VM_IMAGE,Nvme0n1p2:Nvme0n1p3 \
--test-type=spdk_vhost_scsi \ --test-type=spdk_vhost_scsi \
@ -73,7 +73,7 @@ case $1 in
;; ;;
-bhr|--blk-hot-remove) -bhr|--blk-hot-remove)
echo 'Running blk hotremove tests suite...' echo 'Running blk hotremove tests suite...'
run_test case $WORKDIR/hotplug/scsi_hotplug.sh --fio-bin=$FIO_BIN \ run_test case "vhost_blk_hot_remove" $WORKDIR/hotplug/scsi_hotplug.sh --fio-bin=$FIO_BIN \
--vm=0,$VM_IMAGE,Nvme0n1p0:Nvme0n1p1 \ --vm=0,$VM_IMAGE,Nvme0n1p0:Nvme0n1p1 \
--vm=1,$VM_IMAGE,Nvme0n1p2:Nvme0n1p3 \ --vm=1,$VM_IMAGE,Nvme0n1p2:Nvme0n1p3 \
--test-type=spdk_vhost_blk \ --test-type=spdk_vhost_blk \

View File

@ -31,19 +31,19 @@ WORKDIR=$(readlink -f $(dirname $0))
timing_enter vhost timing_enter vhost
timing_enter negative timing_enter negative
run_test case $WORKDIR/other/negative.sh run_test case "vhost_negative" $WORKDIR/other/negative.sh
report_test_completion "vhost_negative" report_test_completion "vhost_negative"
timing_exit negative timing_exit negative
timing_enter vhost_boot timing_enter vhost_boot
run_test suite $WORKDIR/vhost_boot/vhost_boot.sh --vm_image=$VM_IMAGE run_test suite "vhost_boot" $WORKDIR/vhost_boot/vhost_boot.sh --vm_image=$VM_IMAGE
report_test_completion "vhost_boot" report_test_completion "vhost_boot"
timing_exit vhost_boot timing_exit vhost_boot
if [ $RUN_NIGHTLY -eq 1 ]; then if [ $RUN_NIGHTLY -eq 1 ]; then
timing_enter integrity_blk timing_enter integrity_blk
echo 'Running blk integrity suite...' echo 'Running blk integrity suite...'
run_test case $WORKDIR/fiotest/fio.sh -x --fio-bin=$FIO_BIN \ run_test case "vhost_blk_integrity" $WORKDIR/fiotest/fio.sh -x --fio-bin=$FIO_BIN \
--vm=0,$VM_IMAGE,Nvme0n1p0:RaidBdev0:RaidBdev1:RaidBdev2 \ --vm=0,$VM_IMAGE,Nvme0n1p0:RaidBdev0:RaidBdev1:RaidBdev2 \
--test-type=spdk_vhost_blk \ --test-type=spdk_vhost_blk \
--fio-job=$WORKDIR/common/fio_jobs/default_integrity.job --fio-job=$WORKDIR/common/fio_jobs/default_integrity.job
@ -52,7 +52,7 @@ if [ $RUN_NIGHTLY -eq 1 ]; then
timing_enter integrity timing_enter integrity
echo 'Running SCSI integrity suite...' echo 'Running SCSI integrity suite...'
run_test case $WORKDIR/fiotest/fio.sh -x --fio-bin=$FIO_BIN \ run_test case "vhost_scsi_integrity" $WORKDIR/fiotest/fio.sh -x --fio-bin=$FIO_BIN \
--vm=0,$VM_IMAGE,Nvme0n1p0:RaidBdev0:RaidBdev1:RaidBdev2 \ --vm=0,$VM_IMAGE,Nvme0n1p0:RaidBdev0:RaidBdev1:RaidBdev2 \
--test-type=spdk_vhost_scsi \ --test-type=spdk_vhost_scsi \
--fio-job=$WORKDIR/common/fio_jobs/default_integrity.job --fio-job=$WORKDIR/common/fio_jobs/default_integrity.job
@ -61,85 +61,85 @@ if [ $RUN_NIGHTLY -eq 1 ]; then
timing_enter fs_integrity_scsi timing_enter fs_integrity_scsi
echo 'Running filesystem integrity suite with SCSI...' echo 'Running filesystem integrity suite with SCSI...'
run_test case $WORKDIR/integrity/integrity_start.sh --ctrl-type=spdk_vhost_scsi --fs="xfs ntfs btrfs ext4" run_test case "vhost_scsi_fs_integrity" $WORKDIR/integrity/integrity_start.sh --ctrl-type=spdk_vhost_scsi --fs="xfs ntfs btrfs ext4"
report_test_completion "vhost_fs_integrity_scsi" report_test_completion "vhost_fs_integrity_scsi"
timing_exit fs_integrity_scsi timing_exit fs_integrity_scsi
timing_enter fs_integrity_blk timing_enter fs_integrity_blk
echo 'Running filesystem integrity suite with BLK...' echo 'Running filesystem integrity suite with BLK...'
run_test case $WORKDIR/integrity/integrity_start.sh --ctrl-type=spdk_vhost_blk --fs="xfs ntfs btrfs ext4" run_test case "vhost_blk_fs_integrity" $WORKDIR/integrity/integrity_start.sh --ctrl-type=spdk_vhost_blk --fs="xfs ntfs btrfs ext4"
report_test_completion "vhost_fs_integrity_blk" report_test_completion "vhost_fs_integrity_blk"
timing_exit fs_integrity_blk timing_exit fs_integrity_blk
timing_enter integrity_lvol_scsi_nightly timing_enter integrity_lvol_scsi_nightly
if [[ $DISKS_NUMBER -ge 2 ]]; then if [[ $DISKS_NUMBER -ge 2 ]]; then
echo 'Running lvol integrity nightly suite with two cores and two controllers' echo 'Running lvol integrity nightly suite with two cores and two controllers'
run_test case $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \ run_test case "vhost_scsi_2core_2ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
--ctrl-type=spdk_vhost_scsi --max-disks=2 --distribute-cores --vm-count=2 --ctrl-type=spdk_vhost_scsi --max-disks=2 --distribute-cores --vm-count=2
echo 'Running lvol integrity nightly suite with one core and two controllers' echo 'Running lvol integrity nightly suite with one core and two controllers'
run_test case $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \ run_test case "vhost_scsi_1core_2ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
--ctrl-type=spdk_vhost_scsi --max-disks=2 --vm-count=2 --ctrl-type=spdk_vhost_scsi --max-disks=2 --vm-count=2
fi fi
if [[ -e $CENTOS_VM_IMAGE ]]; then if [[ -e $CENTOS_VM_IMAGE ]]; then
echo 'Running lvol integrity nightly suite with different os types' echo 'Running lvol integrity nightly suite with different os types'
run_test case $WORKDIR/lvol/lvol_test.sh --fio-bin=$CENTOS_FIO_BIN \ run_test case "vhost_scsi_nightly" $WORKDIR/lvol/lvol_test.sh --fio-bin=$CENTOS_FIO_BIN \
--ctrl-type=spdk_vhost_scsi --vm-count=2 --multi-os --ctrl-type=spdk_vhost_scsi --vm-count=2 --multi-os
fi fi
echo 'Running lvol integrity nightly suite with one core and one controller' echo 'Running lvol integrity nightly suite with one core and one controller'
run_test case $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \ run_test case "vhost_scsi_1core_1ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
--ctrl-type=spdk_vhost_scsi --max-disks=1 --ctrl-type=spdk_vhost_scsi --max-disks=1
timing_exit integrity_lvol_scsi_nightly timing_exit integrity_lvol_scsi_nightly
timing_enter integrity_lvol_blk_nightly timing_enter integrity_lvol_blk_nightly
if [[ $DISKS_NUMBER -ge 2 ]]; then if [[ $DISKS_NUMBER -ge 2 ]]; then
echo 'Running lvol integrity nightly suite with two cores and two controllers' echo 'Running lvol integrity nightly suite with two cores and two controllers'
run_test case $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \ run_test case "vhost_blk_2core_2ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
--ctrl-type=spdk_vhost_blk --max-disks=2 --distribute-cores --vm-count=2 --ctrl-type=spdk_vhost_blk --max-disks=2 --distribute-cores --vm-count=2
echo 'Running lvol integrity nightly suite with one core and two controllers' echo 'Running lvol integrity nightly suite with one core and two controllers'
run_test case $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \ run_test case "vhost_blk_1core_2ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
--ctrl-type=spdk_vhost_blk --max-disks=2 --vm-count=2 --ctrl-type=spdk_vhost_blk --max-disks=2 --vm-count=2
fi fi
if [[ -e $CENTOS_VM_IMAGE ]]; then if [[ -e $CENTOS_VM_IMAGE ]]; then
echo 'Running lvol integrity nightly suite with different os types' echo 'Running lvol integrity nightly suite with different os types'
run_test case $WORKDIR/lvol/lvol_test.sh --fio-bin=$CENTOS_FIO_BIN \ run_test case "vhost_blk_nightly" $WORKDIR/lvol/lvol_test.sh --fio-bin=$CENTOS_FIO_BIN \
--ctrl-type=spdk_vhost_blk --vm-count=2 --multi-os --ctrl-type=spdk_vhost_blk --vm-count=2 --multi-os
fi fi
echo 'Running lvol integrity nightly suite with one core and one controller' echo 'Running lvol integrity nightly suite with one core and one controller'
run_test case $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \ run_test case "vhost_lvol_integrity_1core_1ctrl" $WORKDIR/lvol/lvol_test.sh --fio-bin=$FIO_BIN \
--ctrl-type=spdk_vhost_blk --max-disks=1 --ctrl-type=spdk_vhost_blk --max-disks=1
timing_exit integrity_lvol_blk_nightly timing_exit integrity_lvol_blk_nightly
timing_enter readonly timing_enter readonly
echo 'Running readonly tests suite...' echo 'Running readonly tests suite...'
run_test case $WORKDIR/readonly/readonly.sh --vm_image=$VM_IMAGE --disk=Nvme0n1 -x run_test case "vhost_readonly" $WORKDIR/readonly/readonly.sh --vm_image=$VM_IMAGE --disk=Nvme0n1 -x
report_test_completion "vhost_readonly" report_test_completion "vhost_readonly"
timing_exit readonly timing_exit readonly
timing_enter vhost_migration timing_enter vhost_migration
echo 'Running migration suite...' echo 'Running migration suite...'
run_test case $WORKDIR/migration/migration.sh -x \ run_test case "vhost_migration" $WORKDIR/migration/migration.sh -x \
--fio-bin=$FIO_BIN --os=$VM_IMAGE --test-cases=1,2 --fio-bin=$FIO_BIN --os=$VM_IMAGE --test-cases=1,2
timing_exit vhost_migration timing_exit vhost_migration
fi fi
timing_enter integrity_lvol_scsi timing_enter integrity_lvol_scsi
echo 'Running lvol integrity suite...' echo 'Running lvol integrity suite...'
run_test case $WORKDIR/lvol/lvol_test.sh -x --fio-bin=$FIO_BIN \ run_test case "vhost_scsi_lvol_integrity" $WORKDIR/lvol/lvol_test.sh -x --fio-bin=$FIO_BIN \
--ctrl-type=spdk_vhost_scsi --thin-provisioning --ctrl-type=spdk_vhost_scsi --thin-provisioning
report_test_completion "vhost_integrity_lvol_scsi" report_test_completion "vhost_integrity_lvol_scsi"
timing_exit integrity_lvol_scsi timing_exit integrity_lvol_scsi
timing_enter integrity_lvol_blk timing_enter integrity_lvol_blk
echo 'Running lvol integrity suite...' echo 'Running lvol integrity suite...'
run_test case $WORKDIR/lvol/lvol_test.sh -x --fio-bin=$FIO_BIN \ run_test case "vhost_blk_lvol_integrity" $WORKDIR/lvol/lvol_test.sh -x --fio-bin=$FIO_BIN \
--ctrl-type=spdk_vhost_blk --ctrl-type=spdk_vhost_blk
report_test_completion "vhost_integrity_lvol_blk" report_test_completion "vhost_integrity_lvol_blk"
timing_exit integrity_lvol_blk timing_exit integrity_lvol_blk
timing_enter spdk_cli timing_enter spdk_cli
run_test suite ./test/spdkcli/vhost.sh run_test suite "spdkcli_vhost" ./test/spdkcli/vhost.sh
timing_exit spdk_cli timing_exit spdk_cli
timing_exit vhost timing_exit vhost