There's a bug in Linux 5.1 (possibly 5.0 too) where the kernel initiator driver crashes if it can't get one queue per CPU. This will get fixed eventually, but for now we need to remove the cases where we restrict the number of queues per controller so that we can test on newer kernels. Even on cases where we're testing the SPDK initiator, there's no real need to restrict the number of queue pairs. The kernel will eventually get fixed, but we should be testing with default behavior anyways (the kernel wants lots of queues). We'll also want to add some regression tests to make sure the kernel doesn't break again. But that will all come later. Signed-off-by: Jim Harris <james.r.harris@intel.com> Change-Id: I9979e6d94456e075688b822b042936b63e518a4a Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/454819 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
96 lines
3.4 KiB
Bash
Executable File
96 lines
3.4 KiB
Bash
Executable File
#!/usr/bin/env bash
|
|
|
|
testdir=$(readlink -f $(dirname $0))
|
|
rootdir=$(readlink -f $testdir/../../..)
|
|
source $rootdir/test/common/autotest_common.sh
|
|
source $rootdir/test/nvmf/common.sh
|
|
|
|
rpc_py="$rootdir/scripts/rpc.py"
|
|
|
|
set -e
|
|
|
|
timing_enter rpc
|
|
# pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization.
|
|
# e.g. sudo ./rpc.sh iso
|
|
nvmftestinit
|
|
nvmfappstart "-m 0xF"
|
|
|
|
$rpc_py nvmf_create_transport -t rdma -u 8192
|
|
|
|
# set times for subsystem construct/delete
|
|
if [ $RUN_NIGHTLY -eq 1 ]; then
|
|
times=50
|
|
else
|
|
times=3
|
|
fi
|
|
|
|
MALLOC_BDEV_SIZE=64
|
|
MALLOC_BLOCK_SIZE=512
|
|
|
|
$rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc1
|
|
|
|
# Disallow host NQN and make sure connect fails
|
|
$rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
|
|
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1
|
|
$rpc_py nvmf_subsystem_allow_any_host -d nqn.2016-06.io.spdk:cnode1
|
|
$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t rdma -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
|
|
|
|
# This connect should fail - the host NQN is not allowed
|
|
! nvme connect -t rdma -n nqn.2016-06.io.spdk:cnode1 -q nqn.2016-06.io.spdk:host1 -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
|
|
|
|
# Add the host NQN and verify that the connect succeeds
|
|
$rpc_py nvmf_subsystem_add_host nqn.2016-06.io.spdk:cnode1 nqn.2016-06.io.spdk:host1
|
|
nvme connect -t rdma -n nqn.2016-06.io.spdk:cnode1 -q nqn.2016-06.io.spdk:host1 -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
|
|
waitforblk "nvme0n1"
|
|
nvme disconnect -n nqn.2016-06.io.spdk:cnode1
|
|
|
|
# Remove the host and verify that the connect fails
|
|
$rpc_py nvmf_subsystem_remove_host nqn.2016-06.io.spdk:cnode1 nqn.2016-06.io.spdk:host1
|
|
! nvme connect -t rdma -n nqn.2016-06.io.spdk:cnode1 -q nqn.2016-06.io.spdk:host1 -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
|
|
|
|
# Allow any host and verify that the connect succeeds
|
|
$rpc_py nvmf_subsystem_allow_any_host -e nqn.2016-06.io.spdk:cnode1
|
|
nvme connect -t rdma -n nqn.2016-06.io.spdk:cnode1 -q nqn.2016-06.io.spdk:host1 -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
|
|
waitforblk "nvme0n1"
|
|
nvme disconnect -n nqn.2016-06.io.spdk:cnode1
|
|
|
|
$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1
|
|
|
|
# do frequent add delete of namespaces with different nsid.
|
|
for i in `seq 1 $times`
|
|
do
|
|
$rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -s SPDK00000000000001
|
|
$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t rdma -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
|
|
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1 -n 5
|
|
$rpc_py nvmf_subsystem_allow_any_host nqn.2016-06.io.spdk:cnode1
|
|
nvme connect -t rdma -n nqn.2016-06.io.spdk:cnode1 -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
|
|
|
|
waitforblk "nvme0n1"
|
|
|
|
nvme disconnect -n nqn.2016-06.io.spdk:cnode1
|
|
|
|
$rpc_py nvmf_subsystem_remove_ns nqn.2016-06.io.spdk:cnode1 5
|
|
$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1
|
|
|
|
done
|
|
|
|
nvmfcleanup
|
|
|
|
# do frequent add delete.
|
|
for i in `seq 1 $times`
|
|
do
|
|
$rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -s SPDK00000000000001
|
|
$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t rdma -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
|
|
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1
|
|
$rpc_py nvmf_subsystem_allow_any_host nqn.2016-06.io.spdk:cnode1
|
|
|
|
$rpc_py nvmf_subsystem_remove_ns nqn.2016-06.io.spdk:cnode1 1
|
|
|
|
$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1
|
|
done
|
|
|
|
trap - SIGINT SIGTERM EXIT
|
|
|
|
nvmftestfini
|
|
timing_exit rpc
|