RPC is a default feature required for almost all usages, so enable RPC by default, but with a UNIX domain socket for security reasons. -r can now be used from the command line to specify an alternative RPC listen address from the default /var/tmp/spdk.sock. Remove the Enable parameter from the Rpc config section but still allow specifying an alternative listen address using the Listen parameter as an alternative to the command line option. This keeps backward compatibility for this release for anyone using the configuration file still. Remove the Rpc sections from all configuration files that were using them, except for those that specified alternate TCP ports for multi-process test cases. We can fix these later to use an alternate UNIX domain socket and to use the command line instead. Signed-off-by: Jim Harris <james.r.harris@intel.com> Change-Id: Ife0d03fcab638c67b659f1eb85348ddc2b55c4c4 Reviewed-on: https://review.gerrithub.io/386561 Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com> Tested-by: SPDK Automated Test System <sys_sgsw@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com>
96 lines
2.1 KiB
Bash
Executable File
96 lines
2.1 KiB
Bash
Executable File
#!/usr/bin/env bash
|
|
|
|
testdir=$(readlink -f $(dirname $0))
|
|
rootdir=$(readlink -f $testdir/../../..)
|
|
source $rootdir/scripts/autotest_common.sh
|
|
source $rootdir/test/nvmf/common.sh
|
|
|
|
RUNTIME=$1
|
|
PMEM_BDEVS=""
|
|
SUBSYS_NR=1
|
|
PMEM_PER_SUBSYS=8
|
|
rpc_py="python $rootdir/scripts/rpc.py"
|
|
|
|
function disconnect_nvmf()
|
|
{
|
|
for i in `seq 1 $SUBSYS_NR`; do
|
|
nvme disconnect -n "nqn.2016-06.io.spdk:cnode${i}" || true
|
|
done
|
|
}
|
|
|
|
function clear_pmem_pool()
|
|
{
|
|
for pmem in $PMEM_BDEVS; do
|
|
$rpc_py delete_bdev $pmem
|
|
done
|
|
|
|
for i in `seq 1 $SUBSYS_NR`; do
|
|
for c in `seq 1 $PMEM_PER_SUBSYS`; do
|
|
$rpc_py delete_pmem_pool /tmp/pool_file${i}_${c}
|
|
done
|
|
done
|
|
}
|
|
|
|
set -e
|
|
|
|
timing_enter nvmf_pmem
|
|
|
|
RDMA_IP_LIST=$(get_available_rdma_ips)
|
|
NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1)
|
|
if [ -z $NVMF_FIRST_TARGET_IP ]; then
|
|
echo "no NIC for nvmf test"
|
|
exit 0
|
|
fi
|
|
|
|
timing_enter start_nvmf_tgt
|
|
# Start up the NVMf target in another process
|
|
$NVMF_APP -c $testdir/../nvmf.conf &
|
|
pid=$!
|
|
|
|
trap "disconnect_nvmf; rm -f /tmp/pool_file*; killprocess $pid; exit 1" SIGINT SIGTERM EXIT
|
|
|
|
waitforlisten $pid
|
|
timing_exit start_nvmf_tgt
|
|
|
|
modprobe -v nvme-rdma
|
|
|
|
timing_enter setup
|
|
# Create pmem backends on each subsystem
|
|
for i in `seq 1 $SUBSYS_NR`; do
|
|
bdevs=""
|
|
for c in `seq 1 $PMEM_PER_SUBSYS`; do
|
|
$rpc_py create_pmem_pool /tmp/pool_file${i}_${c} 32 512
|
|
bdevs+="$($rpc_py construct_pmem_bdev /tmp/pool_file${i}_${c}) "
|
|
done
|
|
$rpc_py construct_nvmf_subsystem nqn.2016-06.io.spdk:cnode$i "trtype:RDMA traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT" '' -a -s SPDK$i -n "$bdevs"
|
|
PMEM_BDEVS+=$bdevs
|
|
done
|
|
timing_exit setup
|
|
|
|
timing_enter nvmf_connect
|
|
for i in `seq 1 $SUBSYS_NR`; do
|
|
nvme connect -t rdma -n "nqn.2016-06.io.spdk:cnode${i}" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
|
|
done
|
|
timing_exit nvmf_connect
|
|
|
|
timing_enter fio_test
|
|
$testdir/../fio/nvmf_fio.py 131072 64 randwrite $RUNTIME verify
|
|
timing_exit fio_test
|
|
|
|
sync
|
|
disconnect_nvmf
|
|
|
|
for i in `seq 1 $SUBSYS_NR`; do
|
|
$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode$i
|
|
done
|
|
|
|
clear_pmem_pool
|
|
|
|
rm -f ./local-job*
|
|
|
|
trap - SIGINT SIGTERM EXIT
|
|
|
|
nvmfcleanup
|
|
killprocess $pid
|
|
timing_exit nvmf_pmem
|