2017-02-28 07:12:02 +00:00
|
|
|
#!/usr/bin/env bash
|
|
|
|
|
|
|
|
testdir=$(readlink -f $(dirname $0))
|
|
|
|
rootdir=$(readlink -f $testdir/../../..)
|
2018-02-27 22:14:08 +00:00
|
|
|
source $rootdir/test/common/autotest_common.sh
|
2017-02-28 07:12:02 +00:00
|
|
|
source $rootdir/test/nvmf/common.sh
|
|
|
|
|
2019-05-15 23:20:10 +00:00
|
|
|
MALLOC_BDEV_SIZE=64
|
2017-02-28 07:12:02 +00:00
|
|
|
MALLOC_BLOCK_SIZE=512
|
|
|
|
|
2018-09-11 13:26:14 +00:00
|
|
|
rpc_py="$rootdir/scripts/rpc.py"
|
2017-02-28 07:12:02 +00:00
|
|
|
|
2019-04-30 23:49:08 +00:00
|
|
|
function waitforio() {
|
|
|
|
# $1 = RPC socket
|
|
|
|
if [ -z "$1" ]; then
|
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
# $2 = bdev name
|
|
|
|
if [ -z "$2" ]; then
|
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
local ret=1
|
|
|
|
local i
|
|
|
|
for (( i = 10; i != 0; i-- )); do
|
2019-04-29 15:02:40 +00:00
|
|
|
read_io_count=$($rpc_py -s $1 get_bdevs_iostat -b $2 | jq -r '.bdevs[0].num_read_ops')
|
2019-04-30 23:49:08 +00:00
|
|
|
# A few I/O will happen during initial examine. So wait until at least 100 I/O
|
|
|
|
# have completed to know that bdevperf is really generating the I/O.
|
|
|
|
if [ $read_io_count -ge 100 ]; then
|
|
|
|
ret=0
|
|
|
|
break
|
|
|
|
fi
|
|
|
|
sleep 0.25
|
|
|
|
done
|
|
|
|
return $ret
|
|
|
|
}
|
|
|
|
|
2017-02-28 07:12:02 +00:00
|
|
|
timing_enter shutdown
|
2019-05-13 19:11:00 +00:00
|
|
|
nvmftestinit
|
|
|
|
nvmfappstart "-m 0xF"
|
2017-02-28 07:12:02 +00:00
|
|
|
|
2019-07-15 04:42:41 +00:00
|
|
|
$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
|
2017-02-28 07:12:02 +00:00
|
|
|
|
2018-02-02 20:23:08 +00:00
|
|
|
num_subsystems=10
|
|
|
|
# SoftRoce does not have enough queues available for
|
|
|
|
# this test. Detect if we're using software RDMA.
|
2018-10-30 18:13:58 +00:00
|
|
|
# If so, only use two subsystem.
|
2018-02-02 20:23:08 +00:00
|
|
|
if check_ip_is_soft_roce "$NVMF_FIRST_TARGET_IP"; then
|
2018-10-30 18:13:58 +00:00
|
|
|
num_subsystems=2
|
2018-02-02 20:23:08 +00:00
|
|
|
fi
|
|
|
|
|
2018-11-07 18:00:28 +00:00
|
|
|
touch $testdir/bdevperf.conf
|
|
|
|
echo "[Nvme]" > $testdir/bdevperf.conf
|
|
|
|
|
2019-04-30 21:12:17 +00:00
|
|
|
timing_enter create_subsystems
|
2018-02-02 20:23:08 +00:00
|
|
|
# Create subsystems
|
2019-05-01 16:03:09 +00:00
|
|
|
rm -rf $testdir/rpcs.txt
|
2019-06-12 07:48:32 +00:00
|
|
|
for i in $(seq 1 $num_subsystems)
|
2017-02-28 07:12:02 +00:00
|
|
|
do
|
2019-05-01 16:03:09 +00:00
|
|
|
echo construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc$i >> $testdir/rpcs.txt
|
|
|
|
echo nvmf_subsystem_create nqn.2016-06.io.spdk:cnode$i -a -s SPDK$i >> $testdir/rpcs.txt
|
|
|
|
echo nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode$i Malloc$i >> $testdir/rpcs.txt
|
2019-06-03 16:34:31 +00:00
|
|
|
echo nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode$i -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT >> $testdir/rpcs.txt
|
2018-11-07 18:00:28 +00:00
|
|
|
|
2019-06-03 16:34:31 +00:00
|
|
|
echo " TransportID \"trtype:$TEST_TRANSPORT adrfam:IPv4 subnqn:nqn.2016-06.io.spdk:cnode$i traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT hostaddr:$NVMF_FIRST_TARGET_IP\" Nvme$i" >> $testdir/bdevperf.conf
|
2017-02-28 07:12:02 +00:00
|
|
|
done
|
2019-05-01 16:03:09 +00:00
|
|
|
$rpc_py < $testdir/rpcs.txt
|
2019-04-30 21:12:17 +00:00
|
|
|
timing_exit create_subsystems
|
2017-02-28 07:12:02 +00:00
|
|
|
|
2018-11-14 23:08:55 +00:00
|
|
|
# Test 1: Kill the initiator unexpectedly with no I/O outstanding
|
|
|
|
|
2019-04-30 21:12:17 +00:00
|
|
|
timing_enter test1
|
2018-11-14 23:08:55 +00:00
|
|
|
# Run bdev_svc, which connects but does not issue I/O
|
|
|
|
$rootdir/test/app/bdev_svc/bdev_svc -i 1 -r /var/tmp/bdevperf.sock -c $testdir/bdevperf.conf &
|
|
|
|
perfpid=$!
|
|
|
|
waitforlisten $perfpid /var/tmp/bdevperf.sock
|
|
|
|
$rpc_py -s /var/tmp/bdevperf.sock wait_subsystem_init
|
|
|
|
|
|
|
|
# Kill bdev_svc
|
|
|
|
kill -9 $perfpid
|
|
|
|
rm -f /var/run/spdk_bdev1
|
|
|
|
|
|
|
|
# Verify the target stays up
|
|
|
|
sleep 1
|
2019-05-13 19:11:00 +00:00
|
|
|
kill -0 $nvmfpid
|
2018-11-14 23:08:55 +00:00
|
|
|
|
|
|
|
# Connect with bdevperf and confirm it works
|
|
|
|
$rootdir/test/bdev/bdevperf/bdevperf -r /var/tmp/bdevperf.sock -c $testdir/bdevperf.conf -q 64 -o 65536 -w verify -t 1
|
2019-04-30 21:12:17 +00:00
|
|
|
timing_exit test1
|
2018-11-14 23:08:55 +00:00
|
|
|
|
|
|
|
# Test 2: Kill initiator unexpectedly with I/O outstanding
|
2018-02-02 20:23:08 +00:00
|
|
|
|
2019-04-30 21:12:17 +00:00
|
|
|
timing_enter test2
|
2018-11-07 18:00:28 +00:00
|
|
|
# Run bdevperf
|
2018-11-14 21:53:35 +00:00
|
|
|
$rootdir/test/bdev/bdevperf/bdevperf -r /var/tmp/bdevperf.sock -c $testdir/bdevperf.conf -q 64 -o 65536 -w verify -t 10 &
|
2018-11-07 18:00:28 +00:00
|
|
|
perfpid=$!
|
2018-11-14 21:53:35 +00:00
|
|
|
waitforlisten $perfpid /var/tmp/bdevperf.sock
|
|
|
|
$rpc_py -s /var/tmp/bdevperf.sock wait_subsystem_init
|
|
|
|
|
2019-04-30 23:49:08 +00:00
|
|
|
waitforio /var/tmp/bdevperf.sock Nvme1n1
|
2018-09-18 22:11:24 +00:00
|
|
|
|
2018-11-07 18:00:28 +00:00
|
|
|
# Kill bdevperf half way through
|
|
|
|
killprocess $perfpid
|
2018-02-02 20:23:08 +00:00
|
|
|
|
2018-11-07 18:00:28 +00:00
|
|
|
# Verify the target stays up
|
|
|
|
sleep 1
|
2019-05-13 19:11:00 +00:00
|
|
|
kill -0 $nvmfpid
|
2019-04-30 21:12:17 +00:00
|
|
|
timing_exit test2
|
2018-09-27 17:52:33 +00:00
|
|
|
|
2018-11-14 23:08:55 +00:00
|
|
|
# Test 3: Kill the target unexpectedly with I/O outstanding
|
2017-02-28 07:12:02 +00:00
|
|
|
|
2019-04-30 21:12:17 +00:00
|
|
|
timing_enter test3
|
2018-11-07 18:00:28 +00:00
|
|
|
# Run bdevperf
|
2018-12-03 19:41:07 +00:00
|
|
|
$rootdir/test/bdev/bdevperf/bdevperf -r /var/tmp/bdevperf.sock -c $testdir/bdevperf.conf -q 64 -o 65536 -w verify -t 10 &
|
2018-11-07 18:00:28 +00:00
|
|
|
perfpid=$!
|
2018-11-14 21:53:35 +00:00
|
|
|
waitforlisten $perfpid /var/tmp/bdevperf.sock
|
|
|
|
$rpc_py -s /var/tmp/bdevperf.sock wait_subsystem_init
|
2018-11-07 18:00:28 +00:00
|
|
|
|
|
|
|
# Expand the trap to clean up bdevperf if something goes wrong
|
2019-07-04 00:51:10 +00:00
|
|
|
trap "process_shm --id $NVMF_APP_SHM_ID; kill -9 $perfpid; nvmftestfini; exit 1" SIGINT SIGTERM EXIT
|
2017-02-28 07:12:02 +00:00
|
|
|
|
2019-04-30 23:49:08 +00:00
|
|
|
waitforio /var/tmp/bdevperf.sock Nvme1n1
|
2018-11-14 21:53:35 +00:00
|
|
|
|
2018-11-07 18:00:28 +00:00
|
|
|
# Kill the target half way through
|
2019-05-13 19:11:00 +00:00
|
|
|
killprocess $nvmfpid
|
2018-02-02 20:23:08 +00:00
|
|
|
|
2018-11-07 18:00:28 +00:00
|
|
|
# Verify bdevperf exits successfully
|
|
|
|
sleep 1
|
|
|
|
# TODO: Right now the NVMe-oF initiator will not correctly detect broken connections
|
|
|
|
# and so it will never shut down. Just kill it.
|
|
|
|
kill -9 $perfpid
|
2019-04-30 21:12:17 +00:00
|
|
|
timing_exit test3
|
2018-11-07 18:00:28 +00:00
|
|
|
|
|
|
|
rm -f ./local-job0-0-verify.state
|
|
|
|
rm -rf $testdir/bdevperf.conf
|
2019-05-01 16:03:09 +00:00
|
|
|
rm -rf $testdir/rpcs.txt
|
2018-11-07 18:00:28 +00:00
|
|
|
trap - SIGINT SIGTERM EXIT
|
|
|
|
|
2019-04-30 21:12:17 +00:00
|
|
|
timing_enter testfini
|
2019-05-16 19:38:53 +00:00
|
|
|
nvmftestfini
|
2019-04-30 21:12:17 +00:00
|
|
|
timing_exit testfini
|
2017-02-28 07:12:02 +00:00
|
|
|
timing_exit shutdown
|