Phenomenon: Test case: Using the following command to test ./test/nvmf/target/shutdown.sh --iso --transport=tcp without this patch, it will cause coredump. The error is that the NVMe/TCP request in data buffer waiting list has "FREE" state. We do not need call this function in spdk_nvmf_tcp_qpair_flush_pdus_internal, it causes the bug during shutdown test since it will call the function recursively, and it does not work for the shutdown path. There are two possible recursive calls: (1)spdk_nvmf_tcp_qpair_flush_pdus_internal -> spdk_nvmf_tcp_qpair_process_pending -> spdk_nvmf_tcp_qpair_flush_pdus_internal -> >.. (2) spdk_nvmf_tcp_qpair_flush_pdus_internal-> pdu completion (pdu->cb) ->.. -> spdk_nvmf_tcp_qpair_flush_pdus_internal. And we need to move the processing for NVMe/TCP requests which are waiting buffer in another function to handle in order to avoid the complicated possbile recursive function calls. (Previously, we found the simliar issue in spdk_nvmf_tcp_qpair_flush_pdus_internal for pdu sending handling) But we cannot remove this feature, otherwise, the initiator will hang for waiting the I/O. So we add the same functionality in spdk_nvmf_tcp_poll_group_poll function. Purpose: To fix the NVMe/TCP shutdown issue. And this patch also reables the test for shutdown and bdevio. Change-Id: Ifa193faa3f685429dcba7557df5b311bd566e297 Signed-off-by: Ziye Yang <ziye.yang@intel.com> Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/462658 Reviewed-by: Seth Howell <seth.howell@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
60 lines
1.8 KiB
Bash
Executable File
60 lines
1.8 KiB
Bash
Executable File
#!/usr/bin/env bash
|
|
testdir=$(readlink -f $(dirname $0))
|
|
rootdir=$(readlink -f $testdir/../..)
|
|
source $rootdir/test/common/autotest_common.sh
|
|
|
|
if [ ! $(uname -s) = Linux ]; then
|
|
exit 0
|
|
fi
|
|
|
|
source $rootdir/test/nvmf/common.sh
|
|
|
|
timing_enter nvmf_tgt
|
|
|
|
trap "exit 1" SIGINT SIGTERM EXIT
|
|
|
|
TEST_ARGS=$@
|
|
|
|
run_test suite test/nvmf/target/filesystem.sh $TEST_ARGS
|
|
run_test suite test/nvmf/target/discovery.sh $TEST_ARGS
|
|
run_test suite test/nvmf/target/connect_disconnect.sh $TEST_ARGS
|
|
if [ $SPDK_TEST_NVME_CLI -eq 1 ]; then
|
|
run_test suite test/nvmf/target/nvme_cli.sh $TEST_ARGS
|
|
fi
|
|
run_test suite test/nvmf/target/nvmf_lvol.sh $TEST_ARGS
|
|
#TODO: disabled due to intermittent failures. Need to triage.
|
|
# run_test suite test/nvmf/target/srq_overwhelm.sh $TEST_ARGS
|
|
run_test suite test/nvmf/target/nvmf_vhost.sh $TEST_ARGS
|
|
run_test suite test/nvmf/target/bdev_io_wait.sh $TEST_ARGS
|
|
run_test suite test/nvmf/target/create_transport.sh $TEST_ARGS
|
|
|
|
if [ $RUN_NIGHTLY -eq 1 ]; then
|
|
run_test suite test/nvmf/target/fuzz.sh $TEST_ARGS
|
|
run_test suite test/nvmf/target/multiconnection.sh $TEST_ARGS
|
|
fi
|
|
|
|
run_test suite test/nvmf/target/nmic.sh $TEST_ARGS
|
|
run_test suite test/nvmf/target/rpc.sh $TEST_ARGS
|
|
run_test suite test/nvmf/target/fio.sh $TEST_ARGS
|
|
run_test suite test/nvmf/target/shutdown.sh $TEST_ARGS
|
|
run_test suite test/nvmf/target/bdevio.sh $TEST_ARGS
|
|
|
|
timing_enter host
|
|
|
|
run_test suite test/nvmf/host/bdevperf.sh $TEST_ARGS
|
|
run_test suite test/nvmf/host/identify.sh $TEST_ARGS
|
|
run_test suite test/nvmf/host/perf.sh $TEST_ARGS
|
|
|
|
# TODO: disabled due to intermittent failures (RDMA_CM_EVENT_UNREACHABLE/ETIMEDOUT)
|
|
#run_test test/nvmf/host/identify_kernel_nvmf.sh $TEST_ARGS
|
|
run_test suite test/nvmf/host/aer.sh $TEST_ARGS
|
|
run_test suite test/nvmf/host/fio.sh $TEST_ARGS
|
|
|
|
timing_exit host
|
|
|
|
trap - SIGINT SIGTERM EXIT
|
|
revert_soft_roce
|
|
|
|
report_test_completion "nvmf"
|
|
timing_exit nvmf_tgt
|