Spdk/test/nvmf/connect_disconnect/connect_disconnect.sh

68 lines
1.8 KiB
Bash
Raw Normal View History

#!/usr/bin/env bash
testdir=$(readlink -f $(dirname $0))
rootdir=$(readlink -f $testdir/../../..)
source $rootdir/test/common/autotest_common.sh
source $rootdir/test/nvmf/common.sh
MALLOC_BDEV_SIZE=64
MALLOC_BLOCK_SIZE=512
rpc_py="$rootdir/scripts/rpc.py"
set -e
# pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization.
# e.g. sudo ./filesystem.sh iso
nvmftestinit $1
RDMA_IP_LIST=$(get_available_rdma_ips)
NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1)
if [ -z $NVMF_FIRST_TARGET_IP ]; then
echo "no NIC for nvmf test"
exit 0
fi
# connect disconnect is geared towards ensuring that we are properly freeing resources after disconnecting qpairs.
timing_enter connect_disconnect
# Start up the NVMf target in another process
$NVMF_APP -m 0xF &
nvmfpid=$!
trap "process_shm --id $NVMF_APP_SHM_ID; killprocess $nvmfpid; nvmftestfini $1; exit 1" SIGINT SIGTERM EXIT
waitforlisten $nvmfpid
$rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4 -c 0
modprobe -v nvme-rdma
bdev="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
$rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 $bdev
$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t rdma -a $NVMF_FIRST_TARGET_IP -s 4420
if [ $RUN_NIGHTLY -eq 1 ]; then
num_iterations=200
else
num_iterations=10
fi
set +x
for i in $(seq 1 $num_iterations); do
nvme connect -t rdma -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "4420"
waitforblk "nvme0n1"
nvme disconnect -n "nqn.2016-06.io.spdk:cnode1"
waitforblk_disconnect "nvme0n1"
done
set -x
trap - SIGINT SIGTERM EXIT
nvmfcleanup
killprocess $nvmfpid
nvmftestfini $1
timing_exit connect_disconnect