diff --git a/test/nvmf/lvol/nvmf_lvol.sh b/test/nvmf/lvol/nvmf_lvol.sh new file mode 100755 index 000000000..7db9bb74e --- /dev/null +++ b/test/nvmf/lvol/nvmf_lvol.sh @@ -0,0 +1,84 @@ +#!/usr/bin/env bash + +testdir=$(readlink -f $(dirname $0)) +rootdir=$(readlink -f $testdir/../../..) +source $rootdir/scripts/autotest_common.sh +source $rootdir/test/nvmf/common.sh + +MALLOC_BDEV_SIZE=128 +MALLOC_BLOCK_SIZE=512 +LVOL_BDEV_SIZE=10 +SUBSYS_NR=1 + +rpc_py="python $rootdir/scripts/rpc.py" + +function disconnect_nvmf() +{ + for i in `seq 1 $SUBSYS_NR`; do + nvme disconnect -n "nqn.2016-06.io.spdk:cnode${i}" || true + done +} + +set -e + +RDMA_IP_LIST=$(get_available_rdma_ips) +NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1) +if [ -z $NVMF_FIRST_TARGET_IP ]; then + echo "no NIC for nvmf test" + exit 0 +fi + +timing_enter lvol_integrity +timing_enter start_nvmf_tgt +# Start up the NVMf target in another process +$NVMF_APP -c $testdir/../nvmf.conf & +pid=$! + +trap "disconnect_nvmf; killprocess $pid; exit 1" SIGINT SIGTERM EXIT + +waitforlisten $pid ${RPC_PORT} +timing_exit start_nvmf_tgt + +modprobe -v nvme-rdma + +lvol_stores=() +lvol_bdevs=() + +# Create malloc backends and creat lvol store on each +for i in `seq 1 $SUBSYS_NR`; do + bdev="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)" + ls_guid="$($rpc_py construct_lvol_store $bdev)" + lvol_stores+=("$ls_guid") + + # 1 NVMe-OF subsystem per malloc bdev / lvol store / 10 lvol bdevs + ns_bdevs="" + + # Create lvol bdevs on each lvol store + for j in `seq 1 10`; do + lb_guid="$($rpc_py construct_lvol_bdev $ls_guid $LVOL_BDEV_SIZE)" + lvol_bdevs+=("$lb_guid") + ns_bdevs+="$lb_guid " + done + $rpc_py construct_nvmf_subsystem nqn.2016-06.io.spdk:cnode$i "trtype:RDMA traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT" '' -a -s SPDK$i -n "$ns_bdevs" +done + +for i in `seq 1 $SUBSYS_NR`; do + nvme connect -t rdma -n "nqn.2016-06.io.spdk:cnode${i}" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT" +done + +$testdir/../fio/nvmf_fio.py 262144 64 randwrite 10 verify + +sync +disconnect_nvmf + +for i in `seq 1 $SUBSYS_NR`; do + $rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode$i +done + +rm -f ./local-job* + +trap - SIGINT SIGTERM EXIT + +nvmfcleanup +killprocess $pid +timing_exit lvol_integrity diff --git a/test/nvmf/nvmf.sh b/test/nvmf/nvmf.sh index 93e536bc1..d941bc0bd 100755 --- a/test/nvmf/nvmf.sh +++ b/test/nvmf/nvmf.sh @@ -27,6 +27,7 @@ run_test test/nvmf/fio/fio.sh run_test test/nvmf/filesystem/filesystem.sh run_test test/nvmf/discovery/discovery.sh run_test test/nvmf/nvme_cli/nvme_cli.sh +run_test test/nvmf/lvol/nvmf_lvol.sh run_test test/nvmf/shutdown/shutdown.sh if [ $RUN_NIGHTLY -eq 1 ]; then diff --git a/test/nvmf/test_plan.md b/test/nvmf/test_plan.md new file mode 100644 index 000000000..5167888c0 --- /dev/null +++ b/test/nvmf/test_plan.md @@ -0,0 +1,37 @@ +# SPDK nvmf_tgt test plan + +## Objective +The purpose of these tests is to verify correct behavior of SPDK NVMeOF +feature. +These tests are run either per-commit or as nightly tests. + +## Configuration +All tests share the same basic configuration file for SPDK nvmf_tgt to run. +Static configuration from config file consists of setting number of per session +queues and enabling RPC for further configuration via RPC calls. +RPC calls used for dynamic configuration consist: +- creating Malloc backend devices +- creating Null Block backend devices +- constructing NVMe-OF subsystems +- deleting NVMe-OF subsystems + +### Tests + +#### Test 1: NVMe-OF namespace on a Logival Volumes device +This test configures a SPDK NVMe-OF subsystem backed by logival volume +devices and uses FIO to generate I/Os that target those subsystems. +The logical volume bdevs are backed by malloc bdevs. +Test steps: +- Step 1: Assign IP addresses to RDMA NICs. +- Step 2: Start SPDK nvmf_tgt application. +- Step 3: Create malloc bdevs. +- Step 4: Create logical volume stores on malloc bdevs. +- Step 5: Create 10 logical volume bdevs on each logical volume store. +- Step 6: Create NVMe-OF subsystems with logical volume bdev namespaces. +- Step 7: Connect to NVMe-OF susbsystems with kernel initiator. +- Step 8: Run FIO with workload parameters: blocksize=256k, iodepth=64, +workload=randwrite; varify flag is enabled so that FIO reads and verifies +the data written to the logical device. The run time is 10 seconds for a +quick test an 10 minutes for longer nightly test. +- Step 9: Disconnect kernel initiator from NVMe-OF subsystems. +- Step 10: Delete NVMe-OF subsystems from configuration.