setup.sh: do not unbind NVMe devices with active mountpoints
Use lsblk to detect if an NVMe namespace or any partition on the namespace has an active mountpoint. If it does, do not unbind the NVMe device associated with that NVMe namespace. Signed-off-by: Jim Harris <james.r.harris@intel.com> Change-Id: I1ab7540d640baa201efac49bc9515fd861dd8f8c Reviewed-on: https://review.gerrithub.io/382479 Tested-by: SPDK Automated Test System <sys_sgsw@intel.com> Reviewed-by: Dariusz Stojaczyk <dariuszx.stojaczyk@intel.com> Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com> Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
This commit is contained in:
parent
1028372837
commit
1a15ce9b46
@ -64,6 +64,19 @@ function linux_hugetlbfs_mount() {
|
||||
mount | grep ' type hugetlbfs ' | awk '{ print $3 }'
|
||||
}
|
||||
|
||||
function get_nvme_name_from_bdf {
|
||||
set +e
|
||||
nvme_devs=`lsblk -d --output NAME | grep "^nvme"`
|
||||
set -e
|
||||
for dev in $nvme_devs; do
|
||||
bdf=$(basename $(readlink /sys/block/$dev/device/device))
|
||||
if [ "$bdf" = "$1" ]; then
|
||||
eval "$2=$dev"
|
||||
return
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
function configure_linux {
|
||||
driver_name=vfio-pci
|
||||
if [ -z "$(ls /sys/kernel/iommu_groups)" ]; then
|
||||
@ -74,7 +87,18 @@ function configure_linux {
|
||||
# NVMe
|
||||
modprobe $driver_name || true
|
||||
for bdf in $(linux_iter_pci_class_code 0108); do
|
||||
linux_bind_driver "$bdf" "$driver_name"
|
||||
blkname=''
|
||||
get_nvme_name_from_bdf "$bdf" blkname
|
||||
if [ "$blkname" != "" ]; then
|
||||
mountpoints=$(lsblk /dev/$blkname --output MOUNTPOINT -n | wc -w)
|
||||
else
|
||||
mountpoints="0"
|
||||
fi
|
||||
if [ "$mountpoints" = "0" ]; then
|
||||
linux_bind_driver "$bdf" "$driver_name"
|
||||
else
|
||||
echo Active mountpoints on /dev/$blkname, so not binding PCI dev $bdf
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
|
@ -10,8 +10,79 @@ function linux_iter_pci {
|
||||
lspci -mm -n -D | grep $1 | tr -d '"' | awk -F " " '{print $1}'
|
||||
}
|
||||
|
||||
function get_nvme_name_from_bdf {
|
||||
lsblk -d --output NAME
|
||||
if ! [ $(lsblk -d --output NAME | grep "^nvme") ]; then
|
||||
return
|
||||
fi
|
||||
nvme_devs=`lsblk -d --output NAME | grep "^nvme"`
|
||||
for dev in $nvme_devs; do
|
||||
bdf=$(basename $(readlink /sys/block/$dev/device/device))
|
||||
if [ "$bdf" = "$1" ]; then
|
||||
eval "$2=$dev"
|
||||
return
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
timing_enter nvme
|
||||
|
||||
# check that our setup.sh script does not bind NVMe devices to uio/vfio if they
|
||||
# have an active mountpoint
|
||||
$rootdir/scripts/setup.sh reset
|
||||
# give kernel nvme driver some time to create the block devices before we start looking for them
|
||||
sleep 1
|
||||
blkname=''
|
||||
# first, find an NVMe device that does not have an active mountpoint already;
|
||||
# this covers rare case where someone is running this test script on a system
|
||||
# that has a mounted NVMe filesystem
|
||||
#
|
||||
# note: more work probably needs to be done to properly handle devices with multiple
|
||||
# namespaces
|
||||
for bdf in $(linux_iter_pci 0108); do
|
||||
get_nvme_name_from_bdf "$bdf" blkname
|
||||
if [ "$blkname" != "" ]; then
|
||||
mountpoints=$(lsblk /dev/$blkname --output MOUNTPOINT -n | wc -w)
|
||||
if [ "$mountpoints" = "0" ]; then
|
||||
break
|
||||
else
|
||||
blkname=''
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
# if we found an NVMe block device without an active mountpoint, create and mount
|
||||
# a filesystem on it for purposes of testing the setup.sh script
|
||||
if [ "$blkname" != "" ]; then
|
||||
parted -s /dev/$blkname mklabel gpt
|
||||
# just create a 100MB partition - this tests our ability to detect mountpoints
|
||||
# on partitions of the device, not just the device itself; it also is faster
|
||||
# since we don't trim and initialize the whole namespace
|
||||
parted -s /dev/$blkname mkpart primary 1 100
|
||||
sleep 1
|
||||
mkfs.ext4 -F /dev/${blkname}p1
|
||||
mkdir -p /tmp/nvmetest
|
||||
mount /dev/${blkname}p1 /tmp/nvmetest
|
||||
$rootdir/scripts/setup.sh
|
||||
driver=$(basename $(readlink /sys/bus/pci/devices/$bdf/driver))
|
||||
# check that the nvme driver is still loaded against the device
|
||||
if [ "$driver" != "nvme" ]; then
|
||||
exit 1
|
||||
fi
|
||||
umount /tmp/nvmetest
|
||||
rmdir /tmp/nvmetest
|
||||
# write zeroes to the device to blow away the partition table and filesystem
|
||||
dd if=/dev/zero of=/dev/$blkname oflag=direct bs=1M count=1
|
||||
$rootdir/scripts/setup.sh
|
||||
driver=$(basename $(readlink /sys/bus/pci/devices/$bdf/driver))
|
||||
# check that the nvme driver is not loaded against the device
|
||||
if [ "$driver" = "nvme" ]; then
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
$rootdir/scripts/setup.sh
|
||||
fi
|
||||
|
||||
if [ `uname` = Linux ]; then
|
||||
start_stub "-s 2048 -i 0 -m 0xF"
|
||||
trap "kill_stub; exit 1" SIGINT SIGTERM EXIT
|
||||
|
Loading…
Reference in New Issue
Block a user