Spdk/test/nvmf/target/rpc.sh
Evgeniy Kochetov 3cbb9690d4 test/nvmf: Add test for nvmf_get_stats RPC method
Signed-off-by: Evgeniy Kochetov <evgeniik@mellanox.com>
Change-Id: I1f9d2f75831a93dded21d06de63d9f6f40b1a013
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/463390
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
2019-07-29 18:05:09 +00:00

131 lines
4.8 KiB
Bash
Executable File

#!/usr/bin/env bash
testdir=$(readlink -f $(dirname $0))
rootdir=$(readlink -f $testdir/../../..)
source $rootdir/test/common/autotest_common.sh
source $rootdir/test/nvmf/common.sh
rpc_py="$rootdir/scripts/rpc.py"
function jcount()
{
local filter=$1
jq "$filter" | wc -l
}
function jsum()
{
local filter=$1
jq "$filter" | awk '{s+=$1}END{print s}'
}
timing_enter rpc
nvmftestinit
nvmfappstart "-m 0xF"
stats=$($rpc_py nvmf_get_stats)
# Expect 4 poll groups (from CPU mask) and no transports yet
[ "4" -eq $(jcount .poll_groups[].name <<< "$stats") ]
[ "null" == $(jq .poll_groups[0].transports[0] <<< "$stats") ]
$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
stats=$($rpc_py nvmf_get_stats)
# Expect no QPs
[ "0" -eq $(jsum .poll_groups[].admin_qpairs <<< "$stats") ]
[ "0" -eq $(jsum .poll_groups[].io_qpairs <<< "$stats") ]
# Transport statistics is currently implemented for RDMA only
if [ 'rdma' == $TEST_TRANSPORT ]; then
# Expect RDMA transport and some devices
[ "1" -eq $(jcount .poll_groups[0].transports[].trtype <<< "$stats") ]
transport_type=$(jq -r .poll_groups[0].transports[0].trtype <<< "$stats")
[ "${transport_type,,}" == "${TEST_TRANSPORT,,}" ]
[ "0" -lt $(jcount .poll_groups[0].transports[0].devices[].name <<< "$stats") ]
fi
# set times for subsystem construct/delete
if [ $RUN_NIGHTLY -eq 1 ]; then
times=50
else
times=3
fi
MALLOC_BDEV_SIZE=64
MALLOC_BLOCK_SIZE=512
$rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc1
# Disallow host NQN and make sure connect fails
$rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1
$rpc_py nvmf_subsystem_allow_any_host -d nqn.2016-06.io.spdk:cnode1
$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
# This connect should fail - the host NQN is not allowed
! nvme connect -t $TEST_TRANSPORT -n nqn.2016-06.io.spdk:cnode1 -q nqn.2016-06.io.spdk:host1 -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
# Add the host NQN and verify that the connect succeeds
$rpc_py nvmf_subsystem_add_host nqn.2016-06.io.spdk:cnode1 nqn.2016-06.io.spdk:host1
nvme connect -t $TEST_TRANSPORT -n nqn.2016-06.io.spdk:cnode1 -q nqn.2016-06.io.spdk:host1 -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
waitforblk "nvme0n1"
nvme disconnect -n nqn.2016-06.io.spdk:cnode1
# Remove the host and verify that the connect fails
$rpc_py nvmf_subsystem_remove_host nqn.2016-06.io.spdk:cnode1 nqn.2016-06.io.spdk:host1
! nvme connect -t $TEST_TRANSPORT -n nqn.2016-06.io.spdk:cnode1 -q nqn.2016-06.io.spdk:host1 -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
# Allow any host and verify that the connect succeeds
$rpc_py nvmf_subsystem_allow_any_host -e nqn.2016-06.io.spdk:cnode1
nvme connect -t $TEST_TRANSPORT -n nqn.2016-06.io.spdk:cnode1 -q nqn.2016-06.io.spdk:host1 -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
waitforblk "nvme0n1"
nvme disconnect -n nqn.2016-06.io.spdk:cnode1
$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1
# do frequent add delete of namespaces with different nsid.
for i in $(seq 1 $times)
do
$rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -s SPDK00000000000001
$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1 -n 5
$rpc_py nvmf_subsystem_allow_any_host nqn.2016-06.io.spdk:cnode1
nvme connect -t $TEST_TRANSPORT -n nqn.2016-06.io.spdk:cnode1 -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
waitforblk "nvme0n1"
nvme disconnect -n nqn.2016-06.io.spdk:cnode1
$rpc_py nvmf_subsystem_remove_ns nqn.2016-06.io.spdk:cnode1 5
$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1
done
# do frequent add delete.
for i in $(seq 1 $times)
do
$rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -s SPDK00000000000001
$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1
$rpc_py nvmf_subsystem_allow_any_host nqn.2016-06.io.spdk:cnode1
$rpc_py nvmf_subsystem_remove_ns nqn.2016-06.io.spdk:cnode1 1
$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1
done
stats=$($rpc_py nvmf_get_stats)
# Expect some admin and IO qpairs
[ "0" -lt $(jsum .poll_groups[].admin_qpairs <<< "$stats") ]
[ "0" -lt $(jsum .poll_groups[].io_qpairs <<< "$stats") ]
# Transport statistics is currently implemented for RDMA only
if [ 'rdma' == $TEST_TRANSPORT ]; then
# Expect non-zero completions and request latencies accumulated
[ "0" -lt $(jsum .poll_groups[].transports[].devices[].completions <<< "$stats") ]
[ "0" -lt $(jsum .poll_groups[].transports[].devices[].request_latency <<< "$stats") ]
fi
trap - SIGINT SIGTERM EXIT
nvmftestfini
timing_exit rpc