Spdk/test/nvmf/nvmf.sh

61 lines
1.9 KiB
Bash
Raw Normal View History

#!/usr/bin/env bash
testdir=$(readlink -f $(dirname $0))
rootdir=$(readlink -f $testdir/../..)
source $rootdir/test/common/autotest_common.sh
if [ ! $(uname -s) = Linux ]; then
exit 0
fi
source $rootdir/test/nvmf/common.sh
timing_enter nvmf_tgt
trap "exit 1" SIGINT SIGTERM EXIT
TEST_ARGS=$@
run_test suite test/nvmf/target/filesystem.sh $TEST_ARGS
run_test suite test/nvmf/target/discovery.sh $TEST_ARGS
run_test suite test/nvmf/target/connect_disconnect.sh $TEST_ARGS
if [ $SPDK_TEST_NVME_CLI -eq 1 ]; then
run_test suite test/nvmf/target/nvme_cli.sh $TEST_ARGS
fi
run_test suite test/nvmf/target/nvmf_lvol.sh $TEST_ARGS
#TODO: disabled due to intermittent failures. Need to triage.
# run_test suite test/nvmf/target/srq_overwhelm.sh $TEST_ARGS
run_test suite test/nvmf/target/nvmf_vhost.sh $TEST_ARGS
run_test suite test/nvmf/target/bdev_io_wait.sh $TEST_ARGS
run_test suite test/nvmf/target/create_transport.sh $TEST_ARGS
if [ $RUN_NIGHTLY -eq 1 ]; then
run_test suite test/nvmf/target/fuzz.sh $TEST_ARGS
run_test suite test/nvmf/target/multiconnection.sh $TEST_ARGS
run_test suite test/nvmf/target/initiator_timeout.sh $TEST_ARGS
fi
run_test suite test/nvmf/target/nmic.sh $TEST_ARGS
run_test suite test/nvmf/target/rpc.sh $TEST_ARGS
run_test suite test/nvmf/target/fio.sh $TEST_ARGS
nvmf/tcp: Remove spdk_nvmf_tcp_qpair_process_pending Phenomenon: Test case: Using the following command to test ./test/nvmf/target/shutdown.sh --iso --transport=tcp without this patch, it will cause coredump. The error is that the NVMe/TCP request in data buffer waiting list has "FREE" state. We do not need call this function in spdk_nvmf_tcp_qpair_flush_pdus_internal, it causes the bug during shutdown test since it will call the function recursively, and it does not work for the shutdown path. There are two possible recursive calls: (1)spdk_nvmf_tcp_qpair_flush_pdus_internal -> spdk_nvmf_tcp_qpair_process_pending -> spdk_nvmf_tcp_qpair_flush_pdus_internal -> >.. (2) spdk_nvmf_tcp_qpair_flush_pdus_internal-> pdu completion (pdu->cb) ->.. -> spdk_nvmf_tcp_qpair_flush_pdus_internal. And we need to move the processing for NVMe/TCP requests which are waiting buffer in another function to handle in order to avoid the complicated possbile recursive function calls. (Previously, we found the simliar issue in spdk_nvmf_tcp_qpair_flush_pdus_internal for pdu sending handling) But we cannot remove this feature, otherwise, the initiator will hang for waiting the I/O. So we add the same functionality in spdk_nvmf_tcp_poll_group_poll function. Purpose: To fix the NVMe/TCP shutdown issue. And this patch also reables the test for shutdown and bdevio. Change-Id: Ifa193faa3f685429dcba7557df5b311bd566e297 Signed-off-by: Ziye Yang <ziye.yang@intel.com> Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/462658 Reviewed-by: Seth Howell <seth.howell@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
2019-07-20 13:06:23 +00:00
run_test suite test/nvmf/target/shutdown.sh $TEST_ARGS
run_test suite test/nvmf/target/bdevio.sh $TEST_ARGS
timing_enter host
run_test suite test/nvmf/host/bdevperf.sh $TEST_ARGS
run_test suite test/nvmf/host/identify.sh $TEST_ARGS
run_test suite test/nvmf/host/perf.sh $TEST_ARGS
# TODO: disabled due to intermittent failures (RDMA_CM_EVENT_UNREACHABLE/ETIMEDOUT)
#run_test test/nvmf/host/identify_kernel_nvmf.sh $TEST_ARGS
run_test suite test/nvmf/host/aer.sh $TEST_ARGS
test/asan: preload ASAN for fio ASAN needs to be LD_PRELOADed before SPDK fio_plugin in order to analyze its code. Just adding that will report any issues in fio binary as well as the fio_plugin. To prevent known fio leaks from affecting the results, a suppression list for LeakSanitizer (used in conjunction with ASAN). At this time the suppression list contains known leaks for fio 3.3. The list might need adjustments as fio version is updated. Side note. Even though it is possible to specify directory to ignore ("leak:/usr/src/fio/"). Which in theory should suppress any leaks in fio. It has side effect of hiding SPDK leaks as well, since the fio_plugins leaks are seen as coming from /usr/src/fio/ioengines.c. See below for examples of each suppressed error: Direct leak of 42 byte(s) in 4 object(s) allocated from: #0 0x7f9d52f3e320 in strdup (/lib64/libasan.so.5+0x3b320) #1 0x41f267 in get_new_job /usr/src/fio/init.c:490 Direct leak of 914936 byte(s) in 10397 object(s) allocated from: #0 0x7f74422e8ea6 in __interceptor_calloc (/lib64/libasan.so.5+0x10dea6) #1 0x46402e in log_io_piece /usr/src/fio/iolog.c:214 Direct leak of 608 byte(s) in 19 object(s) allocated from: #0 0x7f74422e8ca8 in __interceptor_malloc (/lib64/libasan.so.5+0x10dca8) #1 0x44c4e1 in add_to_dump_list /usr/src/fio/parse.c:1039 #2 0x44c4e1 in parse_option /usr/src/fio/parse.c:1098 Direct leak of 173 byte(s) in 20 object(s) allocated from: #0 0x7f744227153d in strdup (/lib64/libasan.so.5+0x9653d) #1 0x44b50d in __handle_option /usr/src/fio/parse.c:718 Indirect leak of 111925528 byte(s) in 1271881 object(s) allocated from: #0 0x7f74422e8ea6 in __interceptor_calloc (/lib64/libasan.so.5+0x10dea6) #1 0x46402e in log_io_piece /usr/src/fio/iolog.c:214 Indirect leak of 171 byte(s) in 19 object(s) allocated from: #0 0x7f744227153d in strdup (/lib64/libasan.so.5+0x9653d) #1 0x44c4ed in add_to_dump_list /usr/src/fio/parse.c:1040 #2 0x44c4ed in parse_option /usr/src/fio/parse.c:1098 Indirect leak of 167 byte(s) in 19 object(s) allocated from: #0 0x7f744227153d in strdup (/lib64/libasan.so.5+0x9653d) #1 0x44c502 in add_to_dump_list /usr/src/fio/parse.c:1042 #2 0x44c502 in parse_option /usr/src/fio/parse.c:1098 Change-Id: I9b5811993508421be50b12af160645c77ea93d7e Signed-off-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com> Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/456315 Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
2019-05-30 07:29:06 +00:00
run_test suite test/nvmf/host/fio.sh $TEST_ARGS
timing_exit host
trap - SIGINT SIGTERM EXIT
revert_soft_roce
report_test_completion "nvmf"
timing_exit nvmf_tgt