From 581107c77a6b34d782c19d9a11e303c2eafe3df7 Mon Sep 17 00:00:00 2001 From: Daniel Verkamp Date: Tue, 2 Jan 2018 14:52:43 -0700 Subject: [PATCH] test/nvme: only run setup.sh test on Linux For now, our scripts don't support detecting mounted filesystems on NVMe devices on FreeBSD, so only run those tests on Linux. Change-Id: I558715bf4e95eaad2dd2d8a546626269c0837edb Signed-off-by: Daniel Verkamp Reviewed-on: https://review.gerrithub.io/393420 Tested-by: SPDK Automated Test System Reviewed-by: Ben Walker Reviewed-by: Jim Harris --- test/lib/nvme/nvme.sh | 106 +++++++++++++++++++++--------------------- 1 file changed, 54 insertions(+), 52 deletions(-) diff --git a/test/lib/nvme/nvme.sh b/test/lib/nvme/nvme.sh index d2d85ee2d..6a9e16028 100755 --- a/test/lib/nvme/nvme.sh +++ b/test/lib/nvme/nvme.sh @@ -31,60 +31,62 @@ function get_nvme_name_from_bdf { timing_enter nvme -# check that our setup.sh script does not bind NVMe devices to uio/vfio if they -# have an active mountpoint -$rootdir/scripts/setup.sh reset -# give kernel nvme driver some time to create the block devices before we start looking for them -sleep 1 -blkname='' -# first, find an NVMe device that does not have an active mountpoint already; -# this covers rare case where someone is running this test script on a system -# that has a mounted NVMe filesystem -# -# note: more work probably needs to be done to properly handle devices with multiple -# namespaces -for bdf in $(linux_iter_pci 0108); do - get_nvme_name_from_bdf "$bdf" blkname - if [ "$blkname" != "" ]; then - mountpoints=$(lsblk /dev/$blkname --output MOUNTPOINT -n | wc -w) - if [ "$mountpoints" = "0" ]; then - break - else - blkname='' - fi - fi -done - -# if we found an NVMe block device without an active mountpoint, create and mount -# a filesystem on it for purposes of testing the setup.sh script -if [ "$blkname" != "" ]; then - parted -s /dev/$blkname mklabel gpt - # just create a 100MB partition - this tests our ability to detect mountpoints - # on partitions of the device, not just the device itself; it also is faster - # since we don't trim and initialize the whole namespace - parted -s /dev/$blkname mkpart primary 1 100 +if [ `uname` = Linux ]; then + # check that our setup.sh script does not bind NVMe devices to uio/vfio if they + # have an active mountpoint + $rootdir/scripts/setup.sh reset + # give kernel nvme driver some time to create the block devices before we start looking for them sleep 1 - mkfs.ext4 -F /dev/${blkname}p1 - mkdir -p /tmp/nvmetest - mount /dev/${blkname}p1 /tmp/nvmetest - $rootdir/scripts/setup.sh - driver=$(basename $(readlink /sys/bus/pci/devices/$bdf/driver)) - # check that the nvme driver is still loaded against the device - if [ "$driver" != "nvme" ]; then - exit 1 + blkname='' + # first, find an NVMe device that does not have an active mountpoint already; + # this covers rare case where someone is running this test script on a system + # that has a mounted NVMe filesystem + # + # note: more work probably needs to be done to properly handle devices with multiple + # namespaces + for bdf in $(linux_iter_pci 0108); do + get_nvme_name_from_bdf "$bdf" blkname + if [ "$blkname" != "" ]; then + mountpoints=$(lsblk /dev/$blkname --output MOUNTPOINT -n | wc -w) + if [ "$mountpoints" = "0" ]; then + break + else + blkname='' + fi + fi + done + + # if we found an NVMe block device without an active mountpoint, create and mount + # a filesystem on it for purposes of testing the setup.sh script + if [ "$blkname" != "" ]; then + parted -s /dev/$blkname mklabel gpt + # just create a 100MB partition - this tests our ability to detect mountpoints + # on partitions of the device, not just the device itself; it also is faster + # since we don't trim and initialize the whole namespace + parted -s /dev/$blkname mkpart primary 1 100 + sleep 1 + mkfs.ext4 -F /dev/${blkname}p1 + mkdir -p /tmp/nvmetest + mount /dev/${blkname}p1 /tmp/nvmetest + $rootdir/scripts/setup.sh + driver=$(basename $(readlink /sys/bus/pci/devices/$bdf/driver)) + # check that the nvme driver is still loaded against the device + if [ "$driver" != "nvme" ]; then + exit 1 + fi + umount /tmp/nvmetest + rmdir /tmp/nvmetest + # write zeroes to the device to blow away the partition table and filesystem + dd if=/dev/zero of=/dev/$blkname oflag=direct bs=1M count=1 + $rootdir/scripts/setup.sh + driver=$(basename $(readlink /sys/bus/pci/devices/$bdf/driver)) + # check that the nvme driver is not loaded against the device + if [ "$driver" = "nvme" ]; then + exit 1 + fi + else + $rootdir/scripts/setup.sh fi - umount /tmp/nvmetest - rmdir /tmp/nvmetest - # write zeroes to the device to blow away the partition table and filesystem - dd if=/dev/zero of=/dev/$blkname oflag=direct bs=1M count=1 - $rootdir/scripts/setup.sh - driver=$(basename $(readlink /sys/bus/pci/devices/$bdf/driver)) - # check that the nvme driver is not loaded against the device - if [ "$driver" = "nvme" ]; then - exit 1 - fi -else - $rootdir/scripts/setup.sh fi if [ `uname` = Linux ]; then