test/nvmf: Remove support for soft-RoCE setups

Motivation: https://github.com/spdk/spdk/issues/2277

Signed-off-by: Michal Berger <michalx.berger@intel.com>
Change-Id: I6a85816c65ebecf63c2f454e4b97484542faef9e
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/10929
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Michal Berger 2021-12-30 12:37:17 +01:00 committed by Jim Harris
parent 9c37603b4a
commit cbda5664f5
7 changed files with 12 additions and 56 deletions

View File

@ -41,7 +41,7 @@ if [ $(uname -s) = Linux ]; then
fi
fi
trap "autotest_cleanup || :; revert_soft_roce; exit 1" SIGINT SIGTERM EXIT
trap "autotest_cleanup || :; exit 1" SIGINT SIGTERM EXIT
timing_enter autotest
@ -240,6 +240,7 @@ if [ $SPDK_RUN_FUNCTIONAL_TEST -eq 1 ]; then
fi
if [ $SPDK_TEST_NVMF -eq 1 ]; then
export NET_TYPE
# The NVMe-oF run test cases are split out like this so that the parser that compiles the
# list of all tests can properly differentiate them. Please do not merge them into one line.
if [ "$SPDK_TEST_NVMF_TRANSPORT" = "rdma" ]; then
@ -330,7 +331,6 @@ fi
timing_enter cleanup
autotest_cleanup
revert_soft_roce
timing_exit cleanup
timing_exit autotest

View File

@ -55,11 +55,6 @@ function load_ib_rdma_modules() {
modprobe rdma_ucm
}
function detect_soft_roce_nics() {
rxe_cfg stop # make sure we run tests with a clean slate
rxe_cfg start
}
function allocate_nic_ips() {
((count = NVMF_IP_LEAST_ADDR))
for nic_name in $(get_rdma_if_list); do
@ -86,9 +81,7 @@ function get_rdma_if_list() {
mapfile -t rxe_net_devs < <(rxe_cfg rxe-net)
if ((${#net_devs[@]} == 0)); then
# No rdma-capable nics on board, using soft-RoCE
printf '%s\n' "${rxe_net_devs[@]}"
return 0
return 1
fi
# Pick only these devices which were found during gather_supported_nvmf_pci_devs() run
@ -388,12 +381,13 @@ prepare_net_devs() {
fi
# NET_TYPE == virt or phy-fallback
if [[ $TEST_TRANSPORT == rdma ]]; then
detect_soft_roce_nics
elif [[ $TEST_TRANSPORT == tcp ]]; then
if [[ $TEST_TRANSPORT == tcp ]]; then
nvmf_veth_init
return 0
fi
echo "ERROR: virt and fallback setup is not supported for $TEST_TRANSPORT"
return 1
}
function nvmftestinit() {
@ -459,17 +453,6 @@ function rdma_device_init() {
allocate_nic_ips
}
function revert_soft_roce() {
rxe_cfg stop
}
function check_ip_is_soft_roce() {
if [ "$TEST_TRANSPORT" != "rdma" ]; then
return 0
fi
rxe_cfg status rxe | grep -wq "$1"
}
function nvme_connect() {
local init_count
init_count=$(nvme list | wc -l)

View File

@ -14,12 +14,6 @@ bdevperf_rpc_sock=/var/tmp/bdevperf.sock
nvmftestinit
# This issue brings up a weird error in soft roce where the RDMA WC doesn't point to the correct qpair.
if check_ip_is_soft_roce $NVMF_FIRST_TARGET_IP && [ "$TEST_TRANSPORT" == "rdma" ]; then
echo "Using software RDMA, not running this test due to a known issue."
exit 0
fi
nvmfappstart -m 0xF
$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192

View File

@ -53,10 +53,7 @@ fi
run_test "nvmf_nmic" test/nvmf/target/nmic.sh "${TEST_ARGS[@]}"
run_test "nvmf_fio_target" test/nvmf/target/fio.sh "${TEST_ARGS[@]}"
run_test "nvmf_bdevio" test/nvmf/target/bdevio.sh "${TEST_ARGS[@]}"
if ! check_ip_is_soft_roce $NVMF_FIRST_TARGET_IP; then
# Soft-RoCE will return invalid values in the WC field after a qp has been
# destroyed which lead to NULL pointer references not seen in real hardware.
if [[ $NET_TYPE == phy ]]; then
run_test "nvmf_shutdown" test/nvmf/target/shutdown.sh "${TEST_ARGS[@]}"
#TODO: disabled due to intermittent failures. Need to triage.
# run_test "nvmf_srq_overwhelm" test/nvmf/target/srq_overwhelm.sh $TEST_ARGS
@ -76,9 +73,7 @@ run_test "nvmf_discovery" test/nvmf/host/discovery.sh "${TEST_ARGS[@]}"
#run_test test/nvmf/host/identify_kernel_nvmf.sh $TEST_ARGS
run_test "nvmf_fio_host" test/nvmf/host/fio.sh "${TEST_ARGS[@]}"
# There is an intermittent error relating to those tests and Soft-RoCE.
# Skip those tests if we are using rxe.
if ! check_ip_is_soft_roce $NVMF_FIRST_TARGET_IP; then
if [[ $NET_TYPE == phy ]]; then
# GitHub issue #1165
run_test "nvmf_bdevperf" test/nvmf/host/bdevperf.sh "${TEST_ARGS[@]}"
# GitHub issue #1043
@ -88,4 +83,3 @@ fi
timing_exit host
trap - SIGINT SIGTERM EXIT
revert_soft_roce

View File

@ -15,14 +15,6 @@ rpc_py="$rootdir/scripts/rpc.py"
nvmftestinit
nvmfappstart -m 0xF
# SoftRoce does not have enough queues available for
# multiconnection tests. Detect if we're using software RDMA.
# If so - lower the number of subsystems for test.
if check_ip_is_soft_roce $NVMF_FIRST_TARGET_IP; then
echo "Using software RDMA, lowering number of NVMeOF subsystems."
NVMF_SUBSYS=1
fi
$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
for i in $(seq 1 $NVMF_SUBSYS); do

View File

@ -11,18 +11,14 @@ MALLOC_BLOCK_SIZE=512
rpc_py="$rootdir/scripts/rpc.py"
function starttarget() {
nvmftestinit
# Start the target
nvmfappstart -m 0x1E
$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
num_subsystems=({1..10})
# SoftRoce does not have enough queues available for
# this test. Detect if we're using software RDMA.
# If so, only use two subsystem.
if check_ip_is_soft_roce "$NVMF_FIRST_TARGET_IP"; then
num_subsystems=({1..2})
fi
timing_enter create_subsystems
# Create subsystems
@ -146,8 +142,6 @@ function nvmf_shutdown_tc3() {
stoptarget
}
nvmftestinit
run_test "nvmf_shutdown_tc1" nvmf_shutdown_tc1
run_test "nvmf_shutdown_tc2" nvmf_shutdown_tc2
run_test "nvmf_shutdown_tc3" nvmf_shutdown_tc3

View File

@ -9,7 +9,7 @@ source $rootdir/test/nvmf/common.sh
MATCH_FILE="spdkcli_nvmf.test"
SPDKCLI_BRANCH="/nvmf"
trap 'on_error_exit; revert_soft_roce' ERR
trap 'on_error_exit' ERR
timing_enter run_nvmf_tgt
run_nvmf_tgt
@ -84,4 +84,3 @@ $spdkcli_job "'/nvmf/subsystem/nqn.2014-08.org.spdk:cnode1/namespaces delete nsi
timing_exit spdkcli_clear_nvmf_config
killprocess $nvmf_tgt_pid
#revert_soft_roce