nvmf: Improve nvmf_lvol.sh

Simplify the test to only run on the RAID volume, but then
also perform several operations on the logical volume while
I/O is occurring in the background. This would have caught
several recently filed bugs.

Change-Id: If937a118ea034ce08d95b70fe74dc5f445cb1008
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/452150
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Ben Walker 2019-04-25 14:45:45 -07:00
parent e740ba637c
commit 219b62430d

View File

@ -7,19 +7,11 @@ source $rootdir/test/nvmf/common.sh
MALLOC_BDEV_SIZE=64 MALLOC_BDEV_SIZE=64
MALLOC_BLOCK_SIZE=512 MALLOC_BLOCK_SIZE=512
LVOL_BDEV_SIZE=10 LVOL_BDEV_INIT_SIZE=20
SUBSYS_NR=2 LVOL_BDEV_FINAL_SIZE=30
LVOL_BDEVS_NR=6
rpc_py="$rootdir/scripts/rpc.py" rpc_py="$rootdir/scripts/rpc.py"
function disconnect_nvmf()
{
for i in `seq 1 $SUBSYS_NR`; do
nvme disconnect -n "nqn.2016-06.io.spdk:cnode${i}" || true
done
}
set -e set -e
# pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization. # pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization.
@ -44,10 +36,10 @@ fi
timing_enter lvol_integrity timing_enter lvol_integrity
timing_enter start_nvmf_tgt timing_enter start_nvmf_tgt
# Start up the NVMf target in another process # Start up the NVMf target in another process
$NVMF_APP -m 0xF & $NVMF_APP -m 0x7 &
pid=$! pid=$!
trap "process_shm --id $NVMF_APP_SHM_ID; disconnect_nvmf; killprocess $pid; nvmftestfini $1; exit 1" SIGINT SIGTERM EXIT trap "process_shm --id $NVMF_APP_SHM_ID; killprocess $pid; nvmftestfini $1; exit 1" SIGINT SIGTERM EXIT
waitforlisten $pid waitforlisten $pid
$rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4 $rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4
@ -55,66 +47,41 @@ timing_exit start_nvmf_tgt
modprobe -v nvme-rdma modprobe -v nvme-rdma
lvol_stores= # Construct a RAID volume for the logical volume store
lvol_bdevs= base_bdevs="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE) "
# Create the first LVS from a Raid-0 bdev, which is created from two malloc bdevs base_bdevs+=$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)
# Create remaining LVSs from a malloc bdev, respectively $rpc_py construct_raid_bdev -n raid0 -z 64 -r 0 -b "$base_bdevs"
for i in `seq 1 $SUBSYS_NR`; do
if [ $i -eq 1 ]; then
# construct RAID bdev and put its name in $bdev
malloc_bdevs="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE) "
malloc_bdevs+="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
$rpc_py construct_raid_bdev -n raid0 -s 64 -r 0 -b "$malloc_bdevs"
bdev="raid0"
else
# construct malloc bdev and put its name in $bdev
bdev="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
fi
ls_guid="$($rpc_py construct_lvol_store $bdev lvs_$i -c 524288)"
lvol_stores+="lvs_$i "
# 1 NVMe-OF subsystem per malloc bdev / lvol store / 10 lvol bdevs # Create the logical volume store on the RAID volume
ns_bdevs="" lvs=$($rpc_py construct_lvol_store raid0 lvs)
# Create lvol bdevs on each lvol store # Create a logical volume on the logical volume store
for j in `seq 1 $LVOL_BDEVS_NR`; do lvol=$($rpc_py construct_lvol_bdev -u $lvs lvol $LVOL_BDEV_INIT_SIZE)
lb_name="$($rpc_py construct_lvol_bdev -u $ls_guid lbd_$j $LVOL_BDEV_SIZE)"
lvol_bdevs+="$lb_name "
ns_bdevs+="$lb_name "
done
$rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode$i -a -s SPDK$i # Create an NVMe-oF subsystem and add the logical volume as a namespace
for bdev in $ns_bdevs; do $rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode0 -a -s SPDK0
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode$i $bdev $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode0 $lvol
done $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode0 -t rdma -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode$i -t rdma -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
done
for i in `seq 1 $SUBSYS_NR`; do # Start random writes in the background
k=$[$i-1] $rootdir/examples/nvme/perf/perf -r "trtype:RDMA adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT" -o 4096 -q 128 -s 512 -w randwrite -t 10 -c 0x18 &
nvme connect -t rdma -n "nqn.2016-06.io.spdk:cnode${i}" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT" perf_pid=$!
for j in `seq 1 $LVOL_BDEVS_NR`; do sleep 1
waitforblk "nvme${k}n${j}"
done
done
$rootdir/scripts/fio.py nvmf 262144 64 randwrite 10 1 verify # Perform some operations on the logical volume
snapshot=$($rpc_py snapshot_lvol_bdev $lvol "MY_SNAPSHOT")
$rpc_py resize_lvol_bdev $lvol $LVOL_BDEV_FINAL_SIZE
clone=$($rpc_py clone_lvol_bdev $snapshot "MY_CLONE")
$rpc_py inflate_lvol_bdev $clone
sync # Wait for I/O to complete
disconnect_nvmf wait $perf_pid
for i in `seq 1 $SUBSYS_NR`; do # Clean up
$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode$i $rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode0
done $rpc_py destroy_lvol_bdev $lvol
$rpc_py destroy_lvol_store -u $lvs
for lb_name in $lvol_bdevs; do
$rpc_py destroy_lvol_bdev "$lb_name"
done
for lvs in $lvol_stores; do
$rpc_py destroy_lvol_store -l $lvs
done
rm -f ./local-job* rm -f ./local-job*