2016-02-19 21:11:08 +00:00
|
|
|
#!/usr/bin/env bash
|
|
|
|
|
|
|
|
set -e
|
|
|
|
|
2020-05-10 11:04:32 +00:00
|
|
|
os=$(uname -s)
|
|
|
|
|
|
|
|
if [[ $os != Linux && $os != FreeBSD ]]; then
|
|
|
|
echo "Not supported platform ($os), aborting"
|
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
|
2016-02-19 21:11:08 +00:00
|
|
|
rootdir=$(readlink -f $(dirname $0))/..
|
2018-01-02 21:44:48 +00:00
|
|
|
source "$rootdir/scripts/common.sh"
|
2016-02-19 21:11:08 +00:00
|
|
|
|
2020-05-07 11:27:06 +00:00
|
|
|
function usage() {
|
2020-05-10 11:04:32 +00:00
|
|
|
if [[ $os == Linux ]]; then
|
2018-07-17 19:43:33 +00:00
|
|
|
options="[config|reset|status|cleanup|help]"
|
2018-01-15 18:49:30 +00:00
|
|
|
else
|
|
|
|
options="[config|reset|help]"
|
|
|
|
fi
|
|
|
|
|
2020-05-07 11:27:06 +00:00
|
|
|
[[ -n $2 ]] && (
|
|
|
|
echo "$2"
|
|
|
|
echo ""
|
|
|
|
)
|
2019-05-24 09:04:51 +00:00
|
|
|
echo "Helper script for allocating hugepages and binding NVMe, I/OAT, VMD and Virtio devices"
|
|
|
|
echo "to a generic VFIO kernel driver. If VFIO is not available on the system, this script"
|
|
|
|
echo "will fall back to UIO. NVMe and Virtio devices with active mountpoints will be ignored."
|
2018-01-15 18:49:30 +00:00
|
|
|
echo "All hugepage operations use default hugepage size on the system (hugepagesz)."
|
|
|
|
echo "Usage: $(basename $1) $options"
|
|
|
|
echo
|
|
|
|
echo "$options - as following:"
|
|
|
|
echo "config Default mode. Allocate hugepages and bind PCI devices."
|
2020-05-10 11:04:32 +00:00
|
|
|
if [[ $os == Linux ]]; then
|
2020-05-11 12:08:41 +00:00
|
|
|
echo "cleanup Remove any orphaned files that can be left in the system after SPDK application exit"
|
2018-07-17 19:43:33 +00:00
|
|
|
fi
|
2018-01-15 18:49:30 +00:00
|
|
|
echo "reset Rebind PCI devices back to their original drivers."
|
|
|
|
echo " Also cleanup any leftover spdk files/resources."
|
|
|
|
echo " Hugepage memory size will remain unchanged."
|
2020-05-10 11:04:32 +00:00
|
|
|
if [[ $os == Linux ]]; then
|
2018-01-15 18:49:30 +00:00
|
|
|
echo "status Print status of all SPDK-compatible devices on the system."
|
|
|
|
fi
|
|
|
|
echo "help Print this help message."
|
|
|
|
echo
|
|
|
|
echo "The following environment variables can be specified."
|
|
|
|
echo "HUGEMEM Size of hugepage memory to allocate (in MB). 2048 by default."
|
|
|
|
echo " For NUMA systems, the hugepages will be evenly distributed"
|
|
|
|
echo " between CPU nodes"
|
|
|
|
echo "NRHUGE Number of hugepages to allocate. This variable overwrites HUGEMEM."
|
|
|
|
echo "HUGENODE Specific NUMA node to allocate hugepages on. To allocate"
|
|
|
|
echo " hugepages on multiple nodes run this script multiple times -"
|
|
|
|
echo " once for each node."
|
2019-02-01 09:10:17 +00:00
|
|
|
echo "PCI_WHITELIST"
|
2019-05-24 09:04:51 +00:00
|
|
|
echo "PCI_BLACKLIST Whitespace separated list of PCI devices (NVMe, I/OAT, VMD, Virtio)."
|
2018-01-15 18:49:30 +00:00
|
|
|
echo " Each device must be specified as a full PCI address."
|
2018-01-23 14:07:10 +00:00
|
|
|
echo " E.g. PCI_WHITELIST=\"0000:01:00.0 0000:02:00.0\""
|
|
|
|
echo " To blacklist all PCI devices use a non-valid address."
|
|
|
|
echo " E.g. PCI_WHITELIST=\"none\""
|
2019-02-01 09:10:17 +00:00
|
|
|
echo " If PCI_WHITELIST and PCI_BLACKLIST are empty or unset, all PCI devices"
|
|
|
|
echo " will be bound."
|
|
|
|
echo " Each device in PCI_BLACKLIST will be ignored (driver won't be changed)."
|
|
|
|
echo " PCI_BLACKLIST has precedence over PCI_WHITELIST."
|
2018-01-15 18:49:30 +00:00
|
|
|
echo "TARGET_USER User that will own hugepage mountpoint directory and vfio groups."
|
|
|
|
echo " By default the current user will be used."
|
2018-09-29 01:25:16 +00:00
|
|
|
echo "DRIVER_OVERRIDE Disable automatic vfio-pci/uio_pci_generic selection and forcefully"
|
|
|
|
echo " bind devices to the given driver."
|
2019-02-11 19:50:08 +00:00
|
|
|
echo " E.g. DRIVER_OVERRIDE=uio_pci_generic or DRIVER_OVERRIDE=/home/public/dpdk/build/kmod/igb_uio.ko"
|
2018-01-15 18:49:30 +00:00
|
|
|
exit 0
|
|
|
|
}
|
|
|
|
|
2018-03-02 01:43:20 +00:00
|
|
|
# In monolithic kernels the lsmod won't work. So
|
2019-02-07 12:20:38 +00:00
|
|
|
# back that with a /sys/modules. We also check
|
|
|
|
# /sys/bus/pci/drivers/ as neither lsmod nor /sys/modules might
|
|
|
|
# contain needed info (like in Fedora-like OS).
|
2020-05-07 11:27:06 +00:00
|
|
|
function check_for_driver() {
|
2019-02-07 12:20:38 +00:00
|
|
|
if lsmod | grep -q ${1//-/_}; then
|
2018-03-02 01:43:20 +00:00
|
|
|
return 1
|
2019-02-07 12:20:38 +00:00
|
|
|
fi
|
|
|
|
|
2020-05-07 11:27:06 +00:00
|
|
|
if [[ -d /sys/module/${1} || -d \
|
|
|
|
/sys/module/${1//-/_} || -d \
|
|
|
|
/sys/bus/pci/drivers/${1} || -d \
|
|
|
|
/sys/bus/pci/drivers/${1//-/_} ]]; then
|
2019-02-07 12:20:38 +00:00
|
|
|
return 2
|
2018-03-02 01:43:20 +00:00
|
|
|
fi
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2019-02-04 11:57:06 +00:00
|
|
|
function pci_dev_echo() {
|
|
|
|
local bdf="$1"
|
2019-10-24 13:58:04 +00:00
|
|
|
local vendor
|
|
|
|
local device
|
|
|
|
vendor="$(cat /sys/bus/pci/devices/$bdf/vendor)"
|
|
|
|
device="$(cat /sys/bus/pci/devices/$bdf/device)"
|
2019-02-04 11:57:06 +00:00
|
|
|
shift
|
2019-10-10 09:55:04 +00:00
|
|
|
echo "$bdf (${vendor#0x} ${device#0x}): $*"
|
2019-02-04 11:57:06 +00:00
|
|
|
}
|
|
|
|
|
2016-04-14 19:21:32 +00:00
|
|
|
function linux_bind_driver() {
|
|
|
|
bdf="$1"
|
|
|
|
driver_name="$2"
|
|
|
|
old_driver_name="no driver"
|
|
|
|
ven_dev_id=$(lspci -n -s $bdf | cut -d' ' -f3 | sed 's/:/ /')
|
|
|
|
|
|
|
|
if [ -e "/sys/bus/pci/devices/$bdf/driver" ]; then
|
|
|
|
old_driver_name=$(basename $(readlink /sys/bus/pci/devices/$bdf/driver))
|
|
|
|
|
|
|
|
if [ "$driver_name" = "$old_driver_name" ]; then
|
2019-02-04 11:57:06 +00:00
|
|
|
pci_dev_echo "$bdf" "Already using the $old_driver_name driver"
|
2016-04-14 19:21:32 +00:00
|
|
|
return 0
|
|
|
|
fi
|
|
|
|
|
|
|
|
echo "$ven_dev_id" > "/sys/bus/pci/devices/$bdf/driver/remove_id" 2> /dev/null || true
|
|
|
|
echo "$bdf" > "/sys/bus/pci/devices/$bdf/driver/unbind"
|
|
|
|
fi
|
|
|
|
|
2019-02-04 11:57:06 +00:00
|
|
|
pci_dev_echo "$bdf" "$old_driver_name -> $driver_name"
|
2016-04-14 19:21:32 +00:00
|
|
|
|
|
|
|
echo "$ven_dev_id" > "/sys/bus/pci/drivers/$driver_name/new_id" 2> /dev/null || true
|
|
|
|
echo "$bdf" > "/sys/bus/pci/drivers/$driver_name/bind" 2> /dev/null || true
|
2016-04-06 03:03:28 +00:00
|
|
|
|
|
|
|
iommu_group=$(basename $(readlink -f /sys/bus/pci/devices/$bdf/iommu_group))
|
|
|
|
if [ -e "/dev/vfio/$iommu_group" ]; then
|
2018-01-23 13:23:00 +00:00
|
|
|
if [ -n "$TARGET_USER" ]; then
|
2018-01-15 19:08:37 +00:00
|
|
|
chown "$TARGET_USER" "/dev/vfio/$iommu_group"
|
2016-11-29 21:26:42 +00:00
|
|
|
fi
|
2016-04-06 03:03:28 +00:00
|
|
|
fi
|
2016-04-14 19:21:32 +00:00
|
|
|
}
|
|
|
|
|
2017-10-13 15:32:39 +00:00
|
|
|
function linux_unbind_driver() {
|
2019-02-11 14:01:23 +00:00
|
|
|
local bdf="$1"
|
2019-10-24 13:58:04 +00:00
|
|
|
local ven_dev_id
|
|
|
|
ven_dev_id=$(lspci -n -s $bdf | cut -d' ' -f3 | sed 's/:/ /')
|
2019-02-11 14:01:23 +00:00
|
|
|
local old_driver_name="no driver"
|
2017-10-13 15:32:39 +00:00
|
|
|
|
2019-02-11 14:01:23 +00:00
|
|
|
if [ -e "/sys/bus/pci/devices/$bdf/driver" ]; then
|
|
|
|
old_driver_name=$(basename $(readlink /sys/bus/pci/devices/$bdf/driver))
|
|
|
|
echo "$ven_dev_id" > "/sys/bus/pci/devices/$bdf/driver/remove_id" 2> /dev/null || true
|
|
|
|
echo "$bdf" > "/sys/bus/pci/devices/$bdf/driver/unbind"
|
2017-10-13 15:32:39 +00:00
|
|
|
fi
|
|
|
|
|
2019-02-11 14:01:23 +00:00
|
|
|
pci_dev_echo "$bdf" "$old_driver_name -> no driver"
|
2017-10-13 15:32:39 +00:00
|
|
|
}
|
|
|
|
|
2018-01-23 13:09:58 +00:00
|
|
|
function linux_hugetlbfs_mounts() {
|
2017-07-05 23:39:28 +00:00
|
|
|
mount | grep ' type hugetlbfs ' | awk '{ print $3 }'
|
2017-03-28 17:09:31 +00:00
|
|
|
}
|
|
|
|
|
2020-08-21 10:42:35 +00:00
|
|
|
function get_block_dev_from_bdf() {
|
|
|
|
local bdf=$1
|
|
|
|
local block
|
2019-06-12 08:06:35 +00:00
|
|
|
|
2020-08-21 10:42:35 +00:00
|
|
|
for block in /sys/block/*; do
|
|
|
|
if [[ $(readlink -f "$block/device") == *"/$bdf/"* ]]; then
|
|
|
|
echo "${block##*/}"
|
|
|
|
return 0
|
2017-10-13 17:19:43 +00:00
|
|
|
fi
|
|
|
|
done
|
|
|
|
}
|
|
|
|
|
2020-08-21 10:42:35 +00:00
|
|
|
function get_mounted_part_dev_from_bdf_block() {
|
|
|
|
local bdf=$1
|
|
|
|
local blocks block part
|
2018-01-05 08:54:39 +00:00
|
|
|
|
2020-08-21 10:42:35 +00:00
|
|
|
blocks=($(get_block_dev_from_bdf "$bdf"))
|
2018-01-05 08:54:39 +00:00
|
|
|
|
2020-08-21 10:42:35 +00:00
|
|
|
for block in "${blocks[@]}"; do
|
|
|
|
for part in "/sys/block/$block/$block"*; do
|
|
|
|
if [[ $(< /proc/mounts) == *"/dev/${part##*/} "* ]]; then
|
|
|
|
echo "${part##*/}"
|
|
|
|
fi
|
|
|
|
done
|
|
|
|
done
|
2018-01-04 21:16:30 +00:00
|
|
|
}
|
|
|
|
|
2020-08-21 10:15:13 +00:00
|
|
|
function collect_devices() {
|
|
|
|
# NVMe, IOAT, IDXD, VIRTIO, VMD
|
|
|
|
|
|
|
|
local ids dev_type dev_id bdf bdfs
|
|
|
|
|
|
|
|
ids+="PCI_DEVICE_ID_INTEL_IOAT"
|
|
|
|
ids+="|PCI_DEVICE_ID_INTEL_IDXD"
|
|
|
|
ids+="|PCI_DEVICE_ID_VIRTIO"
|
|
|
|
ids+="|PCI_DEVICE_ID_INTEL_VMD"
|
|
|
|
ids+="|SPDK_PCI_CLASS_NVME"
|
|
|
|
|
|
|
|
local -gA nvme_d ioat_d idxd_d virtio_d vmd_d all_devices_d
|
|
|
|
|
|
|
|
while read -r _ dev_type dev_id; do
|
|
|
|
bdfs=(${pci_bus_cache["0x8086:$dev_id"]})
|
|
|
|
[[ $dev_type == *NVME* ]] && bdfs=(${pci_bus_cache["$dev_id"]})
|
|
|
|
[[ $dev_type == *VIRT* ]] && bdfs=(${pci_bus_cache["0x1af4:$dev_id"]})
|
|
|
|
[[ $dev_type =~ (NVME|IOAT|IDXD|VIRTIO|VMD) ]]
|
|
|
|
for bdf in "${bdfs[@]}"; do
|
|
|
|
eval "${BASH_REMATCH[1],,}_d[$bdf]=1"
|
|
|
|
all_devices_d["$bdf"]=1
|
|
|
|
done
|
|
|
|
done < <(grep -E "$ids" "$rootdir/include/spdk/pci_ids.h")
|
|
|
|
}
|
|
|
|
|
2020-08-21 10:50:03 +00:00
|
|
|
function collect_driver() {
|
|
|
|
local bdf=$1
|
|
|
|
local override_driver=$2
|
|
|
|
local drivers driver
|
|
|
|
|
|
|
|
[[ -e /sys/bus/pci/devices/$bdf/modalias ]] || return 1
|
|
|
|
if drivers=($(modprobe -R "$(< "/sys/bus/pci/devices/$bdf/modalias")")); then
|
|
|
|
# Pick first entry in case multiple aliases are bound to a driver.
|
|
|
|
driver=$(readlink -f "/sys/module/${drivers[0]}/drivers/pci:"*)
|
|
|
|
driver=${driver##*/}
|
|
|
|
else
|
|
|
|
driver=$override_driver
|
|
|
|
fi 2> /dev/null
|
|
|
|
echo "$driver"
|
|
|
|
}
|
|
|
|
|
2020-05-07 11:27:06 +00:00
|
|
|
function configure_linux_pci() {
|
2019-02-11 19:50:08 +00:00
|
|
|
local driver_path=""
|
|
|
|
driver_name=""
|
|
|
|
if [[ -n "${DRIVER_OVERRIDE}" ]]; then
|
2020-02-11 20:11:05 +00:00
|
|
|
driver_path="$DRIVER_OVERRIDE"
|
2019-02-11 19:50:08 +00:00
|
|
|
driver_name="${DRIVER_OVERRIDE##*/}"
|
2020-02-11 20:11:05 +00:00
|
|
|
# modprobe and the sysfs don't use the .ko suffix.
|
|
|
|
driver_name=${driver_name%.ko}
|
2019-02-11 19:50:08 +00:00
|
|
|
# path = name -> there is no path
|
|
|
|
if [[ "$driver_path" = "$driver_name" ]]; then
|
|
|
|
driver_path=""
|
2018-09-29 01:25:16 +00:00
|
|
|
fi
|
2020-02-11 20:32:47 +00:00
|
|
|
# igb_uio is a common driver to override with and it depends on uio.
|
|
|
|
if [[ "$driver_name" = "igb_uio" ]]; then
|
|
|
|
modprobe uio
|
|
|
|
fi
|
2020-05-07 11:27:06 +00:00
|
|
|
elif [[ -n "$(ls /sys/kernel/iommu_groups)" || (-e \
|
|
|
|
/sys/module/vfio/parameters/enable_unsafe_noiommu_mode && \
|
|
|
|
"$(cat /sys/module/vfio/parameters/enable_unsafe_noiommu_mode)" == "Y") ]]; then
|
2019-02-11 19:50:08 +00:00
|
|
|
driver_name=vfio-pci
|
2020-05-07 11:27:06 +00:00
|
|
|
elif modinfo uio_pci_generic > /dev/null 2>&1; then
|
2019-02-11 19:50:08 +00:00
|
|
|
driver_name=uio_pci_generic
|
|
|
|
elif [[ -r "$rootdir/dpdk/build/kmod/igb_uio.ko" ]]; then
|
|
|
|
driver_path="$rootdir/dpdk/build/kmod/igb_uio.ko"
|
|
|
|
driver_name="igb_uio"
|
|
|
|
modprobe uio
|
|
|
|
echo "WARNING: uio_pci_generic not detected - using $driver_name"
|
|
|
|
else
|
|
|
|
echo "No valid drivers found [vfio-pci, uio_pci_generic, igb_uio]. Please either enable the vfio-pci or uio_pci_generic"
|
|
|
|
echo "kernel modules, or have SPDK build the igb_uio driver by running ./configure --with-igb-uio-driver and recompiling."
|
|
|
|
return 1
|
|
|
|
fi
|
|
|
|
|
|
|
|
# modprobe assumes the directory of the module. If the user passes in a path, we should use insmod
|
|
|
|
if [[ -n "$driver_path" ]]; then
|
|
|
|
insmod $driver_path || true
|
2018-09-29 01:25:16 +00:00
|
|
|
else
|
2019-02-11 19:50:08 +00:00
|
|
|
modprobe $driver_name
|
2016-02-19 21:11:08 +00:00
|
|
|
fi
|
|
|
|
|
|
|
|
# NVMe
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
for bdf in ${pci_bus_cache["0x010802"]}; do
|
2020-08-21 10:42:35 +00:00
|
|
|
blknames=($(get_mounted_part_dev_from_bdf_block "$bdf"))
|
2019-02-07 12:31:06 +00:00
|
|
|
if ! pci_can_use $bdf; then
|
2019-06-12 08:06:35 +00:00
|
|
|
pci_dev_echo "$bdf" "Skipping un-whitelisted NVMe controller at $bdf"
|
2018-01-10 23:14:39 +00:00
|
|
|
continue
|
|
|
|
fi
|
2019-06-12 08:06:35 +00:00
|
|
|
|
2020-08-21 10:42:35 +00:00
|
|
|
if ((${#blknames[@]} == 0)); then
|
2017-10-13 17:19:43 +00:00
|
|
|
linux_bind_driver "$bdf" "$driver_name"
|
|
|
|
else
|
2019-11-25 11:21:54 +00:00
|
|
|
for name in "${blknames[@]}"; do
|
2019-06-12 08:06:35 +00:00
|
|
|
pci_dev_echo "$bdf" "Active mountpoints on /dev/$name, so not binding PCI dev"
|
|
|
|
done
|
2017-10-13 17:19:43 +00:00
|
|
|
fi
|
2016-02-19 21:11:08 +00:00
|
|
|
done
|
|
|
|
|
|
|
|
# IOAT
|
2019-07-03 15:12:39 +00:00
|
|
|
TMP=$(mktemp)
|
2016-02-19 21:11:08 +00:00
|
|
|
#collect all the device_id info of ioat devices.
|
2016-08-08 22:57:49 +00:00
|
|
|
grep "PCI_DEVICE_ID_INTEL_IOAT" $rootdir/include/spdk/pci_ids.h \
|
2020-05-07 11:27:06 +00:00
|
|
|
| awk -F"x" '{print $2}' > $TMP
|
2016-02-19 21:11:08 +00:00
|
|
|
|
2020-05-07 11:27:06 +00:00
|
|
|
while IFS= read -r dev_id; do
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do
|
2019-02-07 12:31:06 +00:00
|
|
|
if ! pci_can_use $bdf; then
|
2019-02-04 11:57:06 +00:00
|
|
|
pci_dev_echo "$bdf" "Skipping un-whitelisted I/OAT device"
|
2018-01-23 14:07:10 +00:00
|
|
|
continue
|
|
|
|
fi
|
2018-09-29 01:25:16 +00:00
|
|
|
|
2016-04-14 19:21:32 +00:00
|
|
|
linux_bind_driver "$bdf" "$driver_name"
|
2016-02-19 21:11:08 +00:00
|
|
|
done
|
2019-10-16 05:35:00 +00:00
|
|
|
done < $TMP
|
2016-02-19 21:11:08 +00:00
|
|
|
rm $TMP
|
|
|
|
|
2020-05-07 11:27:06 +00:00
|
|
|
# IDXD
|
|
|
|
TMP=$(mktemp)
|
|
|
|
#collect all the device_id info of idxd devices.
|
|
|
|
grep "PCI_DEVICE_ID_INTEL_IDXD" $rootdir/include/spdk/pci_ids.h \
|
|
|
|
| awk -F"x" '{print $2}' > $TMP
|
|
|
|
|
|
|
|
while IFS= read -r dev_id; do
|
|
|
|
for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do
|
|
|
|
if ! pci_can_use $bdf; then
|
|
|
|
pci_dev_echo "$bdf" "Skipping un-whitelisted IDXD device"
|
|
|
|
continue
|
|
|
|
fi
|
|
|
|
|
|
|
|
linux_bind_driver "$bdf" "$driver_name"
|
|
|
|
done
|
|
|
|
done < $TMP
|
|
|
|
rm $TMP
|
2020-04-07 16:20:35 +00:00
|
|
|
|
2017-12-27 15:22:48 +00:00
|
|
|
# virtio
|
2019-07-03 15:12:39 +00:00
|
|
|
TMP=$(mktemp)
|
2017-12-27 15:22:48 +00:00
|
|
|
#collect all the device_id info of virtio devices.
|
|
|
|
grep "PCI_DEVICE_ID_VIRTIO" $rootdir/include/spdk/pci_ids.h \
|
2020-05-07 11:27:06 +00:00
|
|
|
| awk -F"x" '{print $2}' > $TMP
|
2017-05-30 21:13:50 +00:00
|
|
|
|
2020-05-07 11:27:06 +00:00
|
|
|
while IFS= read -r dev_id; do
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
for bdf in ${pci_bus_cache["0x1af4:0x$dev_id"]}; do
|
2019-02-07 12:31:06 +00:00
|
|
|
if ! pci_can_use $bdf; then
|
2019-02-04 11:57:06 +00:00
|
|
|
pci_dev_echo "$bdf" "Skipping un-whitelisted Virtio device at $bdf"
|
2018-01-23 14:07:10 +00:00
|
|
|
continue
|
|
|
|
fi
|
2020-08-21 10:42:35 +00:00
|
|
|
blknames=($(get_mounted_part_dev_from_bdf_block "$bdf"))
|
|
|
|
if ((${#blknames[@]} > 0)); then
|
|
|
|
pci_dev_echo "$bdf" "Active mountpoints on ${blknames[*]}, so not binding"
|
|
|
|
continue
|
|
|
|
fi
|
2018-01-05 08:54:39 +00:00
|
|
|
|
|
|
|
linux_bind_driver "$bdf" "$driver_name"
|
2017-05-30 21:13:50 +00:00
|
|
|
done
|
2019-10-16 05:35:00 +00:00
|
|
|
done < $TMP
|
2017-05-30 21:13:50 +00:00
|
|
|
rm $TMP
|
|
|
|
|
2019-05-24 09:04:51 +00:00
|
|
|
# VMD
|
2019-07-03 15:12:39 +00:00
|
|
|
TMP=$(mktemp)
|
2019-05-24 09:04:51 +00:00
|
|
|
#collect all the device_id info of vmd devices.
|
|
|
|
grep "PCI_DEVICE_ID_INTEL_VMD" $rootdir/include/spdk/pci_ids.h \
|
2020-05-07 11:27:06 +00:00
|
|
|
| awk -F"x" '{print $2}' > $TMP
|
2019-05-24 09:04:51 +00:00
|
|
|
|
2020-05-07 11:27:06 +00:00
|
|
|
while IFS= read -r dev_id; do
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do
|
2019-05-24 09:04:51 +00:00
|
|
|
if [[ -z "$PCI_WHITELIST" ]] || ! pci_can_use $bdf; then
|
|
|
|
echo "Skipping un-whitelisted VMD device at $bdf"
|
|
|
|
continue
|
|
|
|
fi
|
|
|
|
|
|
|
|
linux_bind_driver "$bdf" "$driver_name"
|
2020-05-07 11:27:06 +00:00
|
|
|
echo " VMD generic kdrv: " "$bdf" "$driver_name"
|
2019-05-24 09:04:51 +00:00
|
|
|
done
|
2019-10-16 05:35:00 +00:00
|
|
|
done < $TMP
|
2019-05-24 09:04:51 +00:00
|
|
|
rm $TMP
|
|
|
|
|
2016-02-19 21:11:08 +00:00
|
|
|
echo "1" > "/sys/bus/pci/rescan"
|
2017-11-14 20:05:47 +00:00
|
|
|
}
|
|
|
|
|
2020-05-07 11:27:06 +00:00
|
|
|
function cleanup_linux() {
|
2018-08-16 06:05:00 +00:00
|
|
|
shopt -s extglob nullglob
|
|
|
|
dirs_to_clean=""
|
|
|
|
dirs_to_clean="$(echo {/var/run,/tmp}/dpdk/spdk{,_pid}+([0-9])) "
|
|
|
|
if [[ -d $XDG_RUNTIME_DIR && $XDG_RUNTIME_DIR != *" "* ]]; then
|
|
|
|
dirs_to_clean+="$(readlink -e assert_not_empty $XDG_RUNTIME_DIR/dpdk/spdk{,_pid}+([0-9]) || true) "
|
|
|
|
fi
|
|
|
|
|
|
|
|
files_to_clean=""
|
|
|
|
for dir in $dirs_to_clean; do
|
|
|
|
files_to_clean+="$(echo $dir/*) "
|
|
|
|
done
|
|
|
|
shopt -u extglob nullglob
|
|
|
|
|
2020-05-07 11:27:06 +00:00
|
|
|
files_to_clean+="$(ls -1 /dev/shm/* \
|
|
|
|
| grep -E '(spdk_tgt|iscsi|vhost|nvmf|rocksdb|bdevio|bdevperf|vhost_fuzz|nvme_fuzz)_trace|spdk_iscsi_conns' || true) "
|
2018-07-17 19:43:33 +00:00
|
|
|
files_to_clean="$(readlink -e assert_not_empty $files_to_clean || true)"
|
|
|
|
if [[ -z "$files_to_clean" ]]; then
|
|
|
|
echo "Clean"
|
2020-05-07 11:27:06 +00:00
|
|
|
return 0
|
2018-07-17 19:43:33 +00:00
|
|
|
fi
|
|
|
|
|
|
|
|
shopt -s extglob
|
|
|
|
for fd_dir in $(echo /proc/+([0-9])); do
|
|
|
|
opened_files+="$(readlink -e assert_not_empty $fd_dir/fd/* || true)"
|
|
|
|
done
|
|
|
|
shopt -u extglob
|
|
|
|
|
|
|
|
if [[ -z "$opened_files" ]]; then
|
|
|
|
echo "Can't get list of opened files!"
|
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
|
|
|
|
echo 'Cleaning'
|
|
|
|
for f in $files_to_clean; do
|
2019-07-04 12:52:35 +00:00
|
|
|
if ! echo "$opened_files" | grep -E -q "^$f\$"; then
|
2018-07-17 19:43:33 +00:00
|
|
|
echo "Removing: $f"
|
|
|
|
rm $f
|
|
|
|
else
|
|
|
|
echo "Still open: $f"
|
|
|
|
fi
|
|
|
|
done
|
2018-08-16 06:05:00 +00:00
|
|
|
|
|
|
|
for dir in $dirs_to_clean; do
|
2020-05-07 11:27:06 +00:00
|
|
|
if ! echo "$opened_files" | grep -E -q "^$dir\$"; then
|
|
|
|
echo "Removing: $dir"
|
|
|
|
rmdir $dir
|
|
|
|
else
|
|
|
|
echo "Still open: $dir"
|
|
|
|
fi
|
2018-08-16 06:05:00 +00:00
|
|
|
done
|
2018-07-17 19:43:33 +00:00
|
|
|
echo "Clean"
|
|
|
|
|
2018-08-16 06:05:00 +00:00
|
|
|
unset dirs_to_clean files_to_clean opened_files
|
2018-07-17 19:43:33 +00:00
|
|
|
}
|
|
|
|
|
2020-05-07 11:27:06 +00:00
|
|
|
function configure_linux() {
|
2018-01-23 14:57:26 +00:00
|
|
|
configure_linux_pci
|
2018-01-23 13:09:58 +00:00
|
|
|
hugetlbfs_mounts=$(linux_hugetlbfs_mounts)
|
2017-03-28 17:09:31 +00:00
|
|
|
|
2018-01-23 13:09:58 +00:00
|
|
|
if [ -z "$hugetlbfs_mounts" ]; then
|
|
|
|
hugetlbfs_mounts=/mnt/huge
|
|
|
|
echo "Mounting hugetlbfs at $hugetlbfs_mounts"
|
|
|
|
mkdir -p "$hugetlbfs_mounts"
|
|
|
|
mount -t hugetlbfs nodev "$hugetlbfs_mounts"
|
2016-04-14 20:22:11 +00:00
|
|
|
fi
|
2017-10-27 17:26:58 +00:00
|
|
|
|
|
|
|
if [ -z "$HUGENODE" ]; then
|
|
|
|
hugepages_target="/proc/sys/vm/nr_hugepages"
|
|
|
|
else
|
|
|
|
hugepages_target="/sys/devices/system/node/node${HUGENODE}/hugepages/hugepages-${HUGEPGSZ}kB/nr_hugepages"
|
|
|
|
fi
|
|
|
|
|
|
|
|
echo "$NRHUGE" > "$hugepages_target"
|
2019-07-03 15:12:39 +00:00
|
|
|
allocated_hugepages=$(cat $hugepages_target)
|
2017-10-27 17:56:36 +00:00
|
|
|
if [ "$allocated_hugepages" -lt "$NRHUGE" ]; then
|
|
|
|
echo ""
|
|
|
|
echo "## ERROR: requested $NRHUGE hugepages but only $allocated_hugepages could be allocated."
|
|
|
|
echo "## Memory might be heavily fragmented. Please try flushing the system cache, or reboot the machine."
|
|
|
|
exit 1
|
|
|
|
fi
|
2016-04-06 03:03:28 +00:00
|
|
|
|
|
|
|
if [ "$driver_name" = "vfio-pci" ]; then
|
2018-01-23 13:23:00 +00:00
|
|
|
if [ -n "$TARGET_USER" ]; then
|
2018-01-23 13:09:58 +00:00
|
|
|
for mount in $hugetlbfs_mounts; do
|
|
|
|
chown "$TARGET_USER" "$mount"
|
|
|
|
chmod g+w "$mount"
|
|
|
|
done
|
2016-04-06 03:03:28 +00:00
|
|
|
|
2020-04-21 13:09:15 +00:00
|
|
|
MEMLOCK_AMNT=$(su "$TARGET_USER" -c "ulimit -l")
|
|
|
|
if [[ $MEMLOCK_AMNT != "unlimited" ]]; then
|
|
|
|
MEMLOCK_MB=$((MEMLOCK_AMNT / 1024))
|
|
|
|
cat <<- MEMLOCK
|
|
|
|
"$TARGET_USER" user memlock limit: $MEMLOCK_MB MB
|
|
|
|
|
|
|
|
This is the maximum amount of memory you will be
|
|
|
|
able to use with DPDK and VFIO if run as user "$TARGET_USER".
|
|
|
|
To change this, please adjust limits.conf memlock limit for user "$TARGET_USER".
|
|
|
|
MEMLOCK
|
|
|
|
if ((MEMLOCK_AMNT < 65536)); then
|
|
|
|
echo ""
|
|
|
|
echo "## WARNING: memlock limit is less than 64MB"
|
|
|
|
echo -n "## DPDK with VFIO may not be able to initialize "
|
|
|
|
echo "if run as user \"$TARGET_USER\"."
|
|
|
|
fi
|
2016-04-06 03:03:28 +00:00
|
|
|
fi
|
|
|
|
fi
|
|
|
|
fi
|
2019-08-19 13:43:13 +00:00
|
|
|
|
|
|
|
if [ ! -f /dev/cpu/0/msr ]; then
|
|
|
|
# Some distros build msr as a module. Make sure it's loaded to ensure
|
|
|
|
# DPDK can easily figure out the TSC rate rather than relying on 100ms
|
|
|
|
# sleeps.
|
|
|
|
modprobe msr || true
|
|
|
|
fi
|
2016-02-19 21:11:08 +00:00
|
|
|
}
|
|
|
|
|
2020-05-07 11:27:06 +00:00
|
|
|
function reset_linux_pci() {
|
2016-02-19 21:11:08 +00:00
|
|
|
# NVMe
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
for bdf in ${pci_bus_cache["0x010802"]}; do
|
2019-02-07 12:31:06 +00:00
|
|
|
if ! pci_can_use $bdf; then
|
2019-02-04 11:57:06 +00:00
|
|
|
pci_dev_echo "$bdf" "Skipping un-whitelisted NVMe controller $blkname"
|
2018-01-23 15:07:56 +00:00
|
|
|
continue
|
|
|
|
fi
|
2020-08-21 10:50:03 +00:00
|
|
|
driver=$(collect_driver "$bdf")
|
|
|
|
if ! check_for_driver "$driver"; then
|
|
|
|
linux_bind_driver "$bdf" "$driver"
|
2017-10-13 15:32:39 +00:00
|
|
|
else
|
|
|
|
linux_unbind_driver "$bdf"
|
|
|
|
fi
|
2016-02-19 21:11:08 +00:00
|
|
|
done
|
|
|
|
|
|
|
|
# IOAT
|
2019-07-03 15:12:39 +00:00
|
|
|
TMP=$(mktemp)
|
2016-02-19 21:11:08 +00:00
|
|
|
#collect all the device_id info of ioat devices.
|
2016-08-08 22:57:49 +00:00
|
|
|
grep "PCI_DEVICE_ID_INTEL_IOAT" $rootdir/include/spdk/pci_ids.h \
|
2020-05-07 11:27:06 +00:00
|
|
|
| awk -F"x" '{print $2}' > $TMP
|
2016-02-19 21:11:08 +00:00
|
|
|
|
2020-05-07 11:27:06 +00:00
|
|
|
while IFS= read -r dev_id; do
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do
|
2019-02-07 12:31:06 +00:00
|
|
|
if ! pci_can_use $bdf; then
|
2019-02-04 11:57:06 +00:00
|
|
|
pci_dev_echo "$bdf" "Skipping un-whitelisted I/OAT device"
|
2018-01-23 15:07:56 +00:00
|
|
|
continue
|
|
|
|
fi
|
2020-08-21 10:50:03 +00:00
|
|
|
driver=$(collect_driver "$bdf")
|
|
|
|
if ! check_for_driver "$driver"; then
|
|
|
|
linux_bind_driver "$bdf" "$driver"
|
2017-10-13 15:32:39 +00:00
|
|
|
else
|
|
|
|
linux_unbind_driver "$bdf"
|
|
|
|
fi
|
2016-02-19 21:11:08 +00:00
|
|
|
done
|
2019-10-16 05:35:00 +00:00
|
|
|
done < $TMP
|
2016-02-19 21:11:08 +00:00
|
|
|
rm $TMP
|
|
|
|
|
2020-05-07 11:27:06 +00:00
|
|
|
# IDXD
|
|
|
|
TMP=$(mktemp)
|
|
|
|
#collect all the device_id info of idxd devices.
|
|
|
|
grep "PCI_DEVICE_ID_INTEL_IDXD" $rootdir/include/spdk/pci_ids.h \
|
|
|
|
| awk -F"x" '{print $2}' > $TMP
|
|
|
|
while IFS= read -r dev_id; do
|
|
|
|
for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do
|
|
|
|
if ! pci_can_use $bdf; then
|
|
|
|
pci_dev_echo "$bdf" "Skipping un-whitelisted IDXD device"
|
|
|
|
continue
|
|
|
|
fi
|
2020-08-21 10:50:03 +00:00
|
|
|
driver=$(collect_driver "$bdf")
|
|
|
|
if ! check_for_driver "$driver"; then
|
|
|
|
linux_bind_driver "$bdf" "$driver"
|
2020-05-07 11:27:06 +00:00
|
|
|
else
|
|
|
|
linux_unbind_driver "$bdf"
|
|
|
|
fi
|
|
|
|
done
|
|
|
|
done < $TMP
|
|
|
|
rm $TMP
|
2020-04-07 16:20:35 +00:00
|
|
|
|
2017-12-27 15:22:48 +00:00
|
|
|
# virtio
|
2019-07-03 15:12:39 +00:00
|
|
|
TMP=$(mktemp)
|
2017-12-27 15:22:48 +00:00
|
|
|
#collect all the device_id info of virtio devices.
|
|
|
|
grep "PCI_DEVICE_ID_VIRTIO" $rootdir/include/spdk/pci_ids.h \
|
2020-05-07 11:27:06 +00:00
|
|
|
| awk -F"x" '{print $2}' > $TMP
|
2017-10-13 15:32:39 +00:00
|
|
|
# TODO: check if virtio-pci is loaded first and just unbind if it is not loaded
|
|
|
|
# Requires some more investigation - for example, some kernels do not seem to have
|
|
|
|
# virtio-pci but just virtio_scsi instead. Also need to make sure we get the
|
|
|
|
# underscore vs. dash right in the virtio_scsi name.
|
2017-05-30 21:13:50 +00:00
|
|
|
modprobe virtio-pci || true
|
2020-05-07 11:27:06 +00:00
|
|
|
while IFS= read -r dev_id; do
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
for bdf in ${pci_bus_cache["0x1af4:0x$dev_id"]}; do
|
2019-02-07 12:31:06 +00:00
|
|
|
if ! pci_can_use $bdf; then
|
2019-02-04 11:57:06 +00:00
|
|
|
pci_dev_echo "$bdf" "Skipping un-whitelisted Virtio device at"
|
2018-01-23 15:07:56 +00:00
|
|
|
continue
|
|
|
|
fi
|
2020-08-21 10:50:03 +00:00
|
|
|
driver=$(collect_driver "$bdf" virtio-pci)
|
|
|
|
if ! check_for_driver "$driver"; then
|
|
|
|
linux_bind_driver "$bdf" "$driver"
|
|
|
|
fi
|
2017-05-30 21:13:50 +00:00
|
|
|
done
|
2019-10-16 05:35:00 +00:00
|
|
|
done < $TMP
|
2017-05-30 21:13:50 +00:00
|
|
|
rm $TMP
|
|
|
|
|
2019-05-24 09:04:51 +00:00
|
|
|
# VMD
|
2019-07-03 15:12:39 +00:00
|
|
|
TMP=$(mktemp)
|
2019-05-24 09:04:51 +00:00
|
|
|
#collect all the device_id info of vmd devices.
|
|
|
|
grep "PCI_DEVICE_ID_INTEL_VMD" $rootdir/include/spdk/pci_ids.h \
|
2020-05-07 11:27:06 +00:00
|
|
|
| awk -F"x" '{print $2}' > $TMP
|
2019-05-24 09:04:51 +00:00
|
|
|
|
2020-05-07 11:27:06 +00:00
|
|
|
while IFS= read -r dev_id; do
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do
|
2019-05-24 09:04:51 +00:00
|
|
|
if ! pci_can_use $bdf; then
|
|
|
|
echo "Skipping un-whitelisted VMD device at $bdf"
|
|
|
|
continue
|
|
|
|
fi
|
2020-08-21 10:50:03 +00:00
|
|
|
driver=$(collect_driver "$bdf")
|
|
|
|
if ! check_for_driver "$driver"; then
|
|
|
|
linux_bind_driver "$bdf" "$driver"
|
2019-05-24 09:04:51 +00:00
|
|
|
fi
|
|
|
|
done
|
2019-10-16 05:35:00 +00:00
|
|
|
done < $TMP
|
2019-05-24 09:04:51 +00:00
|
|
|
rm $TMP
|
|
|
|
|
2016-02-19 21:11:08 +00:00
|
|
|
echo "1" > "/sys/bus/pci/rescan"
|
2017-11-14 20:05:47 +00:00
|
|
|
}
|
|
|
|
|
2020-05-07 11:27:06 +00:00
|
|
|
function reset_linux() {
|
2018-01-23 14:57:26 +00:00
|
|
|
reset_linux_pci
|
2018-01-23 13:09:58 +00:00
|
|
|
for mount in $(linux_hugetlbfs_mounts); do
|
|
|
|
rm -f "$mount"/spdk*map_*
|
|
|
|
done
|
2017-09-25 17:55:07 +00:00
|
|
|
rm -f /run/.spdk*
|
2016-02-19 21:11:08 +00:00
|
|
|
}
|
|
|
|
|
2020-05-07 11:27:06 +00:00
|
|
|
function status_linux() {
|
2018-03-02 15:18:38 +00:00
|
|
|
echo "Hugepages"
|
2020-05-07 11:27:06 +00:00
|
|
|
printf "%-6s %10s %8s / %6s\n" "node" "hugesize" "free" "total"
|
2018-03-02 15:18:38 +00:00
|
|
|
|
|
|
|
numa_nodes=0
|
|
|
|
shopt -s nullglob
|
2020-05-07 08:30:19 +00:00
|
|
|
for path in /sys/devices/system/node/node*/hugepages/hugepages-*/; do
|
2018-03-02 15:18:38 +00:00
|
|
|
numa_nodes=$((numa_nodes + 1))
|
2019-07-03 15:12:39 +00:00
|
|
|
free_pages=$(cat $path/free_hugepages)
|
|
|
|
all_pages=$(cat $path/nr_hugepages)
|
2018-03-02 15:18:38 +00:00
|
|
|
|
|
|
|
[[ $path =~ (node[0-9]+)/hugepages/hugepages-([0-9]+kB) ]]
|
|
|
|
|
|
|
|
node=${BASH_REMATCH[1]}
|
|
|
|
huge_size=${BASH_REMATCH[2]}
|
|
|
|
|
|
|
|
printf "%-6s %10s %8s / %6s\n" $node $huge_size $free_pages $all_pages
|
|
|
|
done
|
|
|
|
shopt -u nullglob
|
|
|
|
|
|
|
|
# fall back to system-wide hugepages
|
|
|
|
if [ "$numa_nodes" = "0" ]; then
|
2019-07-03 15:12:39 +00:00
|
|
|
free_pages=$(grep HugePages_Free /proc/meminfo | awk '{ print $2 }')
|
|
|
|
all_pages=$(grep HugePages_Total /proc/meminfo | awk '{ print $2 }')
|
2018-03-02 15:18:38 +00:00
|
|
|
node="-"
|
|
|
|
huge_size="$HUGEPGSZ"
|
|
|
|
|
|
|
|
printf "%-6s %10s %8s / %6s\n" $node $huge_size $free_pages $all_pages
|
|
|
|
fi
|
|
|
|
|
2019-02-04 11:57:06 +00:00
|
|
|
echo ""
|
2017-01-30 13:38:44 +00:00
|
|
|
echo "NVMe devices"
|
|
|
|
|
2019-02-04 11:57:06 +00:00
|
|
|
echo -e "BDF\t\tVendor\tDevice\tNUMA\tDriver\t\tDevice name"
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
for bdf in ${pci_bus_cache["0x010802"]}; do
|
2020-05-07 11:27:06 +00:00
|
|
|
driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent | awk -F"=" '{print $2}')
|
2019-03-12 09:39:09 +00:00
|
|
|
if [ "$numa_nodes" = "0" ]; then
|
|
|
|
node="-"
|
|
|
|
else
|
|
|
|
node=$(cat /sys/bus/pci/devices/$bdf/numa_node)
|
2020-05-07 08:40:44 +00:00
|
|
|
if ((node == -1)); then
|
|
|
|
node=unknown
|
|
|
|
fi
|
2019-03-12 09:39:09 +00:00
|
|
|
fi
|
2019-02-04 11:57:06 +00:00
|
|
|
device=$(cat /sys/bus/pci/devices/$bdf/device)
|
|
|
|
vendor=$(cat /sys/bus/pci/devices/$bdf/vendor)
|
2019-10-23 13:04:25 +00:00
|
|
|
if [ "$driver" = "nvme" ] && [ -d /sys/bus/pci/devices/$bdf/nvme ]; then
|
2020-05-07 11:27:06 +00:00
|
|
|
name="\t"$(ls /sys/bus/pci/devices/$bdf/nvme)
|
2017-01-30 13:38:44 +00:00
|
|
|
else
|
2020-05-07 11:27:06 +00:00
|
|
|
name="-"
|
2017-01-30 13:38:44 +00:00
|
|
|
fi
|
2020-05-07 11:27:06 +00:00
|
|
|
echo -e "$bdf\t${vendor#0x}\t${device#0x}\t$node\t${driver:--}\t\t$name"
|
2017-01-30 13:38:44 +00:00
|
|
|
done
|
|
|
|
|
2019-02-04 11:57:06 +00:00
|
|
|
echo ""
|
2020-04-29 23:11:10 +00:00
|
|
|
echo "I/OAT Engine"
|
2017-01-30 13:38:44 +00:00
|
|
|
|
|
|
|
#collect all the device_id info of ioat devices.
|
2019-07-03 15:12:39 +00:00
|
|
|
TMP=$(grep "PCI_DEVICE_ID_INTEL_IOAT" $rootdir/include/spdk/pci_ids.h \
|
2020-05-07 11:27:06 +00:00
|
|
|
| awk -F"x" '{print $2}')
|
2019-02-04 11:57:06 +00:00
|
|
|
echo -e "BDF\t\tVendor\tDevice\tNUMA\tDriver"
|
2017-01-30 13:38:44 +00:00
|
|
|
for dev_id in $TMP; do
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do
|
2020-05-07 11:27:06 +00:00
|
|
|
driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent | awk -F"=" '{print $2}')
|
2019-03-12 09:39:09 +00:00
|
|
|
if [ "$numa_nodes" = "0" ]; then
|
|
|
|
node="-"
|
|
|
|
else
|
|
|
|
node=$(cat /sys/bus/pci/devices/$bdf/numa_node)
|
2020-05-07 08:40:44 +00:00
|
|
|
if ((node == -1)); then
|
|
|
|
node=unknown
|
|
|
|
fi
|
2019-03-12 09:39:09 +00:00
|
|
|
fi
|
2019-02-04 11:57:06 +00:00
|
|
|
device=$(cat /sys/bus/pci/devices/$bdf/device)
|
|
|
|
vendor=$(cat /sys/bus/pci/devices/$bdf/vendor)
|
2019-02-07 11:56:19 +00:00
|
|
|
echo -e "$bdf\t${vendor#0x}\t${device#0x}\t$node\t${driver:--}"
|
2017-01-30 13:38:44 +00:00
|
|
|
done
|
|
|
|
done
|
2017-05-30 21:13:50 +00:00
|
|
|
|
2020-05-07 11:27:06 +00:00
|
|
|
echo ""
|
2020-04-29 23:11:10 +00:00
|
|
|
echo "IDXD Engine"
|
2020-05-07 11:27:06 +00:00
|
|
|
|
|
|
|
#collect all the device_id info of idxd devices.
|
|
|
|
TMP=$(grep "PCI_DEVICE_ID_INTEL_IDXD" $rootdir/include/spdk/pci_ids.h \
|
|
|
|
| awk -F"x" '{print $2}')
|
|
|
|
echo -e "BDF\t\tVendor\tDevice\tNUMA\tDriver"
|
|
|
|
for dev_id in $TMP; do
|
|
|
|
for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do
|
|
|
|
driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent | awk -F"=" '{print $2}')
|
|
|
|
if [ "$numa_nodes" = "0" ]; then
|
|
|
|
node="-"
|
|
|
|
else
|
|
|
|
node=$(cat /sys/bus/pci/devices/$bdf/numa_node)
|
|
|
|
fi
|
|
|
|
device=$(cat /sys/bus/pci/devices/$bdf/device)
|
|
|
|
vendor=$(cat /sys/bus/pci/devices/$bdf/vendor)
|
|
|
|
echo -e "$bdf\t${vendor#0x}\t${device#0x}\t$node\t${driver:--}"
|
|
|
|
done
|
|
|
|
done
|
2020-04-07 16:20:35 +00:00
|
|
|
|
2019-02-04 11:57:06 +00:00
|
|
|
echo ""
|
2017-05-30 21:13:50 +00:00
|
|
|
echo "virtio"
|
|
|
|
|
2017-12-27 15:22:48 +00:00
|
|
|
#collect all the device_id info of virtio devices.
|
2019-07-03 15:12:39 +00:00
|
|
|
TMP=$(grep "PCI_DEVICE_ID_VIRTIO" $rootdir/include/spdk/pci_ids.h \
|
2020-05-07 11:27:06 +00:00
|
|
|
| awk -F"x" '{print $2}')
|
2019-02-04 11:57:06 +00:00
|
|
|
echo -e "BDF\t\tVendor\tDevice\tNUMA\tDriver\t\tDevice name"
|
2017-05-30 21:13:50 +00:00
|
|
|
for dev_id in $TMP; do
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
for bdf in ${pci_bus_cache["0x1af4:0x$dev_id"]}; do
|
2020-05-07 11:27:06 +00:00
|
|
|
driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent | awk -F"=" '{print $2}')
|
2019-03-12 09:39:09 +00:00
|
|
|
if [ "$numa_nodes" = "0" ]; then
|
|
|
|
node="-"
|
|
|
|
else
|
|
|
|
node=$(cat /sys/bus/pci/devices/$bdf/numa_node)
|
2020-05-07 08:40:44 +00:00
|
|
|
if ((node == -1)); then
|
|
|
|
node=unknown
|
|
|
|
fi
|
2019-03-12 09:39:09 +00:00
|
|
|
fi
|
2019-02-04 11:57:06 +00:00
|
|
|
device=$(cat /sys/bus/pci/devices/$bdf/device)
|
|
|
|
vendor=$(cat /sys/bus/pci/devices/$bdf/vendor)
|
2020-08-21 10:42:35 +00:00
|
|
|
blknames=($(get_mounted_part_dev_from_bdf_block "$bdf"))
|
2019-11-18 14:55:26 +00:00
|
|
|
echo -e "$bdf\t${vendor#0x}\t${device#0x}\t$node\t\t${driver:--}\t\t" "${blknames[@]}"
|
2017-05-30 21:13:50 +00:00
|
|
|
done
|
|
|
|
done
|
2019-05-24 09:04:51 +00:00
|
|
|
|
2020-06-03 17:08:09 +00:00
|
|
|
echo ""
|
2019-05-24 09:04:51 +00:00
|
|
|
echo "VMD"
|
|
|
|
|
|
|
|
#collect all the device_id info of vmd devices.
|
2019-07-03 15:12:39 +00:00
|
|
|
TMP=$(grep "PCI_DEVICE_ID_INTEL_VMD" $rootdir/include/spdk/pci_ids.h \
|
2020-05-07 11:27:06 +00:00
|
|
|
| awk -F"x" '{print $2}')
|
2019-05-24 09:04:51 +00:00
|
|
|
echo -e "BDF\t\tNuma Node\tDriver Name"
|
|
|
|
for dev_id in $TMP; do
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do
|
2020-05-07 11:27:06 +00:00
|
|
|
driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent | awk -F"=" '{print $2}')
|
|
|
|
node=$(cat /sys/bus/pci/devices/$bdf/numa_node)
|
2020-05-07 08:40:44 +00:00
|
|
|
if ((node == -1)); then
|
|
|
|
node=unknown
|
|
|
|
fi
|
2019-05-24 09:04:51 +00:00
|
|
|
echo -e "$bdf\t$node\t\t$driver"
|
|
|
|
done
|
|
|
|
done
|
2017-01-30 13:38:44 +00:00
|
|
|
}
|
|
|
|
|
2020-05-06 21:28:52 +00:00
|
|
|
function status_freebsd() {
|
|
|
|
local id pci
|
|
|
|
local ioat idxd vmd
|
|
|
|
|
|
|
|
status_print() (
|
|
|
|
local dev driver
|
|
|
|
|
|
|
|
echo -e "BDF\t\tVendor\tDevice\tDriver"
|
|
|
|
|
|
|
|
for id; do
|
|
|
|
for pci in ${pci_bus_cache["$id"]}; do
|
|
|
|
driver=$(pciconf -l "pci$pci")
|
|
|
|
driver=${driver%@*}
|
|
|
|
printf '%s\t%s\t%s\t%s\n' \
|
|
|
|
"$pci" \
|
|
|
|
"${pci_ids_vendor["$pci"]}" \
|
|
|
|
"${pci_ids_device["$pci"]}" \
|
|
|
|
"$driver"
|
|
|
|
done
|
|
|
|
done
|
|
|
|
)
|
|
|
|
|
|
|
|
devs=PCI_DEVICE_ID_INTEL_IOAT
|
|
|
|
devs+="|PCI_DEVICE_ID_INTEL_IDXD"
|
|
|
|
devs+="|PCI_DEVICE_ID_INTEL_VMD"
|
|
|
|
|
|
|
|
local dev_type dev_id
|
|
|
|
while read -r _ dev_type dev_id; do
|
|
|
|
case "$dev_type" in
|
|
|
|
*IOAT*) ioat+=("0x8086:$dev_id") ;;
|
|
|
|
*IDXD*) idxd+=("0x8086:$dev_id") ;;
|
|
|
|
*VMD*) vmd+=("0x8086:$dev_id") ;;
|
|
|
|
esac
|
|
|
|
done < <(grep -E "$devs" "$rootdir/include/spdk/pci_ids.h")
|
|
|
|
|
|
|
|
local contigmem=present
|
|
|
|
if ! kldstat -q -m contigmem; then
|
|
|
|
contigmem="not present"
|
|
|
|
fi
|
|
|
|
|
|
|
|
cat <<- BSD_INFO
|
|
|
|
Contigmem ($contigmem)
|
|
|
|
Buffer Size: $(kenv hw.contigmem.buffer_size)
|
|
|
|
Num Buffers: $(kenv hw.contigmem.num_buffers)
|
|
|
|
|
|
|
|
NVMe devices
|
|
|
|
$(status_print 0x010802)
|
|
|
|
|
|
|
|
I/IOAT DMA
|
|
|
|
$(status_print "${ioat[@]}")
|
|
|
|
|
|
|
|
IDXD DMA
|
|
|
|
$(status_print "${idxd[@]}")
|
|
|
|
|
|
|
|
VMD
|
|
|
|
$(status_print "${vmd[@]}")
|
|
|
|
BSD_INFO
|
|
|
|
}
|
|
|
|
|
2020-05-07 11:27:06 +00:00
|
|
|
function configure_freebsd_pci() {
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
local devs ids id
|
|
|
|
local BDFS
|
2016-08-08 22:57:49 +00:00
|
|
|
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
devs=PCI_DEVICE_ID_INTEL_IOAT
|
|
|
|
devs+="|PCI_DEVICE_ID_INTEL_IDXD"
|
|
|
|
devs+="|PCI_DEVICE_ID_INTEL_VMD"
|
2016-08-08 22:57:49 +00:00
|
|
|
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
ids=($(grep -E "$devs" "$rootdir/include/spdk/pci_ids.h" | awk '{print $3}'))
|
2016-08-08 22:57:49 +00:00
|
|
|
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
if [[ -n ${pci_bus_cache["0x010802"]} ]]; then
|
|
|
|
BDFS+=(${pci_bus_cache["0x010802"]})
|
|
|
|
fi
|
2019-05-24 09:04:51 +00:00
|
|
|
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
for id in "${ids[@]}"; do
|
|
|
|
[[ -n ${pci_bus_cache["0x8086:$id"]} ]] || continue
|
|
|
|
BDFS+=(${pci_bus_cache["0x8086:$id"]})
|
|
|
|
done
|
2016-08-08 22:57:49 +00:00
|
|
|
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
# Drop the domain part from all the addresses
|
|
|
|
BDFS=("${BDFS[@]#*:}")
|
2016-08-08 22:57:49 +00:00
|
|
|
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
local IFS=","
|
2016-02-19 21:11:08 +00:00
|
|
|
kldunload nic_uio.ko || true
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
kenv hw.nic_uio.bdfs="${BDFS[*]}"
|
2016-02-19 21:11:08 +00:00
|
|
|
kldload nic_uio.ko
|
2017-11-14 20:05:47 +00:00
|
|
|
}
|
|
|
|
|
2020-05-07 11:27:06 +00:00
|
|
|
function configure_freebsd() {
|
2018-01-23 14:57:26 +00:00
|
|
|
configure_freebsd_pci
|
2018-05-23 15:48:46 +00:00
|
|
|
# If contigmem is already loaded but the HUGEMEM specified doesn't match the
|
|
|
|
# previous value, unload contigmem so that we can reload with the new value.
|
|
|
|
if kldstat -q -m contigmem; then
|
2019-07-03 15:12:39 +00:00
|
|
|
if [ $(kenv hw.contigmem.num_buffers) -ne "$((HUGEMEM / 256))" ]; then
|
2018-05-23 15:48:46 +00:00
|
|
|
kldunload contigmem.ko
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
if ! kldstat -q -m contigmem; then
|
|
|
|
kenv hw.contigmem.num_buffers=$((HUGEMEM / 256))
|
|
|
|
kenv hw.contigmem.buffer_size=$((256 * 1024 * 1024))
|
|
|
|
kldload contigmem.ko
|
|
|
|
fi
|
2016-02-19 21:11:08 +00:00
|
|
|
}
|
|
|
|
|
2020-05-07 11:27:06 +00:00
|
|
|
function reset_freebsd() {
|
2016-02-19 21:11:08 +00:00
|
|
|
kldunload contigmem.ko || true
|
2018-01-23 14:57:26 +00:00
|
|
|
kldunload nic_uio.ko || true
|
2016-02-19 21:11:08 +00:00
|
|
|
}
|
|
|
|
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
CMD=reset cache_pci_bus
|
|
|
|
|
2018-01-15 19:08:37 +00:00
|
|
|
mode=$1
|
2016-04-06 03:03:28 +00:00
|
|
|
|
2018-01-15 19:08:37 +00:00
|
|
|
if [ -z "$mode" ]; then
|
2016-02-19 21:11:08 +00:00
|
|
|
mode="config"
|
|
|
|
fi
|
|
|
|
|
2017-08-30 18:20:22 +00:00
|
|
|
: ${HUGEMEM:=2048}
|
2018-01-23 14:07:10 +00:00
|
|
|
: ${PCI_WHITELIST:=""}
|
2019-02-01 09:10:17 +00:00
|
|
|
: ${PCI_BLACKLIST:=""}
|
2018-01-23 14:07:10 +00:00
|
|
|
|
|
|
|
if [ -n "$NVME_WHITELIST" ]; then
|
|
|
|
PCI_WHITELIST="$PCI_WHITELIST $NVME_WHITELIST"
|
|
|
|
fi
|
|
|
|
|
2018-01-23 14:57:26 +00:00
|
|
|
if [ -n "$SKIP_PCI" ]; then
|
|
|
|
PCI_WHITELIST="none"
|
|
|
|
fi
|
|
|
|
|
2018-01-15 19:08:37 +00:00
|
|
|
if [ -z "$TARGET_USER" ]; then
|
|
|
|
TARGET_USER="$SUDO_USER"
|
|
|
|
if [ -z "$TARGET_USER" ]; then
|
2020-05-07 11:27:06 +00:00
|
|
|
TARGET_USER=$(logname 2> /dev/null) || true
|
2018-01-15 19:08:37 +00:00
|
|
|
fi
|
|
|
|
fi
|
|
|
|
|
2020-08-21 10:15:13 +00:00
|
|
|
collect_devices "$mode"
|
2020-05-10 11:04:32 +00:00
|
|
|
if [[ $os == Linux ]]; then
|
2020-05-07 11:27:06 +00:00
|
|
|
HUGEPGSZ=$(($(grep Hugepagesize /proc/meminfo | cut -d : -f 2 | tr -dc '0-9')))
|
|
|
|
HUGEPGSZ_MB=$((HUGEPGSZ / 1024))
|
|
|
|
: ${NRHUGE=$(((HUGEMEM + HUGEPGSZ_MB - 1) / HUGEPGSZ_MB))}
|
2017-08-30 18:20:22 +00:00
|
|
|
|
2016-02-19 21:11:08 +00:00
|
|
|
if [ "$mode" == "config" ]; then
|
|
|
|
configure_linux
|
2018-07-17 19:43:33 +00:00
|
|
|
elif [ "$mode" == "cleanup" ]; then
|
|
|
|
cleanup_linux
|
2016-02-19 21:11:08 +00:00
|
|
|
elif [ "$mode" == "reset" ]; then
|
|
|
|
reset_linux
|
2017-01-30 13:38:44 +00:00
|
|
|
elif [ "$mode" == "status" ]; then
|
|
|
|
status_linux
|
2018-01-15 18:49:30 +00:00
|
|
|
elif [ "$mode" == "help" ]; then
|
|
|
|
usage $0
|
|
|
|
else
|
|
|
|
usage $0 "Invalid argument '$mode'"
|
2016-02-19 21:11:08 +00:00
|
|
|
fi
|
|
|
|
else
|
|
|
|
if [ "$mode" == "config" ]; then
|
|
|
|
configure_freebsd
|
|
|
|
elif [ "$mode" == "reset" ]; then
|
|
|
|
reset_freebsd
|
2018-09-24 08:11:47 +00:00
|
|
|
elif [ "$mode" == "cleanup" ]; then
|
2020-05-10 11:04:32 +00:00
|
|
|
echo "setup.sh cleanup function not yet supported on $os"
|
2018-09-24 08:11:47 +00:00
|
|
|
elif [ "$mode" == "status" ]; then
|
2020-05-06 21:28:52 +00:00
|
|
|
status_freebsd
|
2018-01-15 18:49:30 +00:00
|
|
|
elif [ "$mode" == "help" ]; then
|
|
|
|
usage $0
|
|
|
|
else
|
|
|
|
usage $0 "Invalid argument '$mode'"
|
2016-02-19 21:11:08 +00:00
|
|
|
fi
|
|
|
|
fi
|