This test has always relied on /dev/nvme0n1 being the location of our test drive which means that it is the only one in the system at the time the test is run. So instead of trying to loop multiple non-existant nvme drives, just call it out directly. Also, add a todo to remove the dependence on NVMe0n1 since we could run this test on a machine where the main filesystem is on an NVMe drive which would break this test. Change-Id: Ibb2448d9367a5c80d85a1c91b0c6e44e58237751 Signed-off-by: Seth Howell <seth.howell@intel.com> Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/476952 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Community-CI: Broadcom SPDK FC-NVMe CI <spdk-ci.pdl@broadcom.com> Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
72 lines
1.7 KiB
Bash
Executable File
72 lines
1.7 KiB
Bash
Executable File
#!/usr/bin/env bash
|
|
|
|
testdir=$(readlink -f $(dirname $0))
|
|
rootdir=$(readlink -f $testdir/../../..)
|
|
source $rootdir/test/common/autotest_common.sh
|
|
source $rootdir/test/nvmf/common.sh
|
|
|
|
MALLOC_BDEV_SIZE=64
|
|
MALLOC_BLOCK_SIZE=512
|
|
|
|
rpc_py="$rootdir/scripts/rpc.py"
|
|
|
|
nvmftestinit
|
|
|
|
for incapsule in 0 4096; do
|
|
nvmfappstart "-m 0xF"
|
|
|
|
$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 -c $incapsule
|
|
|
|
$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc1
|
|
$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
|
|
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1
|
|
$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
|
|
|
|
nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
|
|
|
|
# TODO: fix this to wait for the proper NVMe device.
|
|
# if we are hosting the local filesystem on an NVMe drive, this test will fail
|
|
# because it relies on the no other NVMe drives being present in the system.
|
|
waitforblk "nvme0n1"
|
|
|
|
mkdir -p /mnt/device
|
|
|
|
timing_enter parted
|
|
parted -s /dev/nvme0n1 mklabel msdos mkpart primary '0%' '100%'
|
|
partprobe
|
|
timing_exit parted
|
|
sleep 1
|
|
|
|
for fstype in "ext4" "btrfs" "xfs"; do
|
|
timing_enter $fstype
|
|
if [ $fstype = ext4 ]; then
|
|
force=-F
|
|
else
|
|
force=-f
|
|
fi
|
|
|
|
mkfs.${fstype} $force /dev/nvme0n1p1
|
|
|
|
mount /dev/nvme0n1p1 /mnt/device
|
|
touch /mnt/device/aaa
|
|
sync
|
|
rm /mnt/device/aaa
|
|
sync
|
|
umount /mnt/device
|
|
timing_exit $fstype
|
|
done
|
|
|
|
parted -s /dev/nvme0n1 rm 1
|
|
|
|
sync
|
|
nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" || true
|
|
|
|
$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
|
|
|
|
trap - SIGINT SIGTERM EXIT
|
|
|
|
killprocess $nvmfpid
|
|
done
|
|
|
|
nvmftestfini
|