From e6c4c8a15c3bf7333ddd943b4aa665dae35448b0 Mon Sep 17 00:00:00 2001 From: lgalkax Date: Wed, 20 Sep 2017 08:30:53 +0200 Subject: [PATCH] test/nvmf: Add test scripts with FIO traffic and NVML backend Add test script to use SPDK Nvmf with NVML backends and run FIO read/write traffic with verify flag enabled. Change-Id: Iff8a85f65c36cb7372963076252577b7a1b2378f Signed-off-by: lgalkax Signed-off-by: Karol Latecki Reviewed-on: https://review.gerrithub.io/379247 Tested-by: SPDK Automated Test System Reviewed-by: Daniel Verkamp --- test/nvmf/nvmf.sh | 8 ++++ test/nvmf/pmem/nvmf.conf | 8 ++++ test/nvmf/pmem/nvmf_pmem.sh | 95 +++++++++++++++++++++++++++++++++++++ test/nvmf/test_plan.md | 17 +++++++ 4 files changed, 128 insertions(+) create mode 100644 test/nvmf/pmem/nvmf.conf create mode 100755 test/nvmf/pmem/nvmf_pmem.sh diff --git a/test/nvmf/nvmf.sh b/test/nvmf/nvmf.sh index c282af55f..651dc712c 100755 --- a/test/nvmf/nvmf.sh +++ b/test/nvmf/nvmf.sh @@ -30,10 +30,18 @@ run_test test/nvmf/nvme_cli/nvme_cli.sh run_test test/nvmf/lvol/nvmf_lvol.sh run_test test/nvmf/shutdown/shutdown.sh +if [ $SPDK_TEST_NVML -eq 1 ]; then + run_test test/nvmf/pmem/nvmf_pmem.sh 10 +fi + if [ $RUN_NIGHTLY -eq 1 ]; then run_test test/nvmf/multiconnection/multiconnection.sh fi +if [ $RUN_NIGHTLY -eq 1 ] && [ $SPDK_TEST_NVML -eq 1 ]; then + run_test test/nvmf/pmem/nvmf_pmem.sh 600 +fi + timing_enter host if [ $RUN_NIGHTLY -eq 1 ]; then diff --git a/test/nvmf/pmem/nvmf.conf b/test/nvmf/pmem/nvmf.conf new file mode 100644 index 000000000..b26bedd1f --- /dev/null +++ b/test/nvmf/pmem/nvmf.conf @@ -0,0 +1,8 @@ +[Global] + Comment "Global section" + +[Rpc] + Enable Yes + +[Nvmf] + MaxQueuesPerSession 16 diff --git a/test/nvmf/pmem/nvmf_pmem.sh b/test/nvmf/pmem/nvmf_pmem.sh new file mode 100755 index 000000000..3e0d43bb5 --- /dev/null +++ b/test/nvmf/pmem/nvmf_pmem.sh @@ -0,0 +1,95 @@ +#!/usr/bin/env bash + +testdir=$(readlink -f $(dirname $0)) +rootdir=$(readlink -f $testdir/../../..) +source $rootdir/scripts/autotest_common.sh +source $rootdir/test/nvmf/common.sh + +RUNTIME=$1 +PMEM_BDEVS="" +SUBSYS_NR=1 +PMEM_PER_SUBSYS=8 +rpc_py="python $rootdir/scripts/rpc.py" + +function disconnect_nvmf() +{ + for i in `seq 1 $SUBSYS_NR`; do + nvme disconnect -n "nqn.2016-06.io.spdk:cnode${i}" || true + done +} + +function clear_pmem_pool() +{ + for pmem in $PMEM_BDEVS; do + $rpc_py delete_bdev $pmem + done + + for i in `seq 1 $SUBSYS_NR`; do + for c in `seq 1 $PMEM_PER_SUBSYS`; do + $rpc_py delete_pmem_pool /tmp/pool_file${i}_${c} + done + done +} + +set -e + +timing_enter nvmf_pmem + +RDMA_IP_LIST=$(get_available_rdma_ips) +NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1) +if [ -z $NVMF_FIRST_TARGET_IP ]; then + echo "no NIC for nvmf test" + exit 0 +fi + +timing_enter start_nvmf_tgt +# Start up the NVMf target in another process +$NVMF_APP -c $testdir/../nvmf.conf & +pid=$! + +trap "disconnect_nvmf; rm -f /tmp/pool_file*; killprocess $pid; exit 1" SIGINT SIGTERM EXIT + +waitforlisten $pid ${RPC_PORT} +timing_exit start_nvmf_tgt + +modprobe -v nvme-rdma + +timing_enter setup +# Create pmem backends on each subsystem +for i in `seq 1 $SUBSYS_NR`; do + bdevs="" + for c in `seq 1 $PMEM_PER_SUBSYS`; do + $rpc_py create_pmem_pool /tmp/pool_file${i}_${c} 32 512 + bdevs+="$($rpc_py construct_pmem_bdev /tmp/pool_file${i}_${c}) " + done + $rpc_py construct_nvmf_subsystem nqn.2016-06.io.spdk:cnode$i "trtype:RDMA traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT" '' -a -s SPDK$i -n "$bdevs" + PMEM_BDEVS+=$bdevs +done +timing_exit setup + +timing_enter nvmf_connect +for i in `seq 1 $SUBSYS_NR`; do + nvme connect -t rdma -n "nqn.2016-06.io.spdk:cnode${i}" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT" +done +timing_exit nvmf_connect + +timing_enter fio_test +$testdir/../fio/nvmf_fio.py 131072 64 randwrite $RUNTIME verify +timing_exit fio_test + +sync +disconnect_nvmf + +for i in `seq 1 $SUBSYS_NR`; do + $rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode$i +done + +clear_pmem_pool + +rm -f ./local-job* + +trap - SIGINT SIGTERM EXIT + +nvmfcleanup +killprocess $pid +timing_exit nvmf_pmem diff --git a/test/nvmf/test_plan.md b/test/nvmf/test_plan.md index 94347ef89..17ab423e8 100644 --- a/test/nvmf/test_plan.md +++ b/test/nvmf/test_plan.md @@ -36,6 +36,23 @@ quick test an 10 minutes for longer nightly test. - Step 9: Disconnect kernel initiator from NVMe-oF subsystems. - Step 10: Delete NVMe-oF subsystems from configuration. +#### Test 2: NVMe-OF namespace on a Pmem device +This test configures a SPDK NVMe-OF subsystem backed by pmem +devices and uses FIO to generate I/Os that target those subsystems. +Test steps: +- Step 1: Assign IP addresses to RDMA NICs. +- Step 2: Start SPDK nvmf_tgt application. +- Step 3: Create NVMe-OF subsystem with 10 pmem bdevs namespaces +- Step 4: Repeat step 3 nine more times to get a total of 10 NVMeOF subsystems, +each with 10 pmem bdev namespaces. +- Step 5: Connect to NVMe-OF susbsystems with kernel initiator. +- Step 6: Run FIO with workload parameters: blocksize=128kB, iodepth=16, + workload=randwrite; varify flag is enabled so that FIO reads and verifies + the data written to the pmem device. The run time is 10 seconds for a + quick test an 10 minutes for longer nightly test. +- Step 7: Disconnect kernel initiator from NVMe-OF subsystems. +- Step 8: Delete NVMe-OF subsystems from configuration. + ### Compatibility testing - Verify functionality of SPDK `nvmf_tgt` with Linux kernel NVMe-oF host