diff --git a/scripts/perf/nvmf/common.py b/scripts/perf/nvmf/common.py index c32a8d181..8c0d435f3 100644 --- a/scripts/perf/nvmf/common.py +++ b/scripts/perf/nvmf/common.py @@ -21,7 +21,9 @@ def get_nvme_devices_count(): def get_nvme_devices_bdf(): print("Getting BDFs for NVMe section") - output = check_output("source scripts/common.sh; iter_pci_class_code 01 08 02", + output = check_output("rootdir=$PWD; \ + source test/common/autotest_common.sh; \ + get_nvme_bdfs 01 08 02", executable="/bin/bash", shell=True) output = [str(x, encoding="utf-8") for x in output.split()] print("Done getting BDFs") diff --git a/test/common/autotest_common.sh b/test/common/autotest_common.sh index cad88edfb..f2ecee1f8 100644 --- a/test/common/autotest_common.sh +++ b/test/common/autotest_common.sh @@ -1126,6 +1126,19 @@ function opal_revert_cleanup { killprocess $spdk_tgt_pid } +# Get BDF addresses of all NVMe drives currently attached to +# uio-pci-generic or vfio-pci +function get_nvme_bdfs() { + xtrace_disable + jq -r .config[].params.traddr <<< $(scripts/gen_nvme.sh --json) + xtrace_restore +} + +# Same as function above, but just get the first disks BDF address +function get_first_nvme_bdf() { + head -1 <<< $(get_nvme_bdfs) +} + set -o errtrace shopt -s extdebug trap "trap - ERR; print_backtrace >&2" ERR diff --git a/test/iscsi_tgt/filesystem/filesystem.sh b/test/iscsi_tgt/filesystem/filesystem.sh index 73173009a..06a2002e7 100755 --- a/test/iscsi_tgt/filesystem/filesystem.sh +++ b/test/iscsi_tgt/filesystem/filesystem.sh @@ -40,7 +40,7 @@ echo "iscsi_tgt is listening. Running tests..." timing_exit start_iscsi_tgt -bdf=$(iter_pci_class_code 01 08 02 | head -1) +bdf=$(get_first_nvme_bdf) $rpc_py iscsi_create_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT $rpc_py iscsi_create_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK $rpc_py bdev_nvme_attach_controller -b "Nvme0" -t "pcie" -a $bdf diff --git a/test/nvme/nvme.sh b/test/nvme/nvme.sh index 7ea981b61..759480b7c 100755 --- a/test/nvme/nvme.sh +++ b/test/nvme/nvme.sh @@ -7,7 +7,7 @@ source $rootdir/test/common/autotest_common.sh function nvme_identify { $rootdir/examples/nvme/identify/identify -i 0 - for bdf in $(iter_pci_class_code 01 08 02); do + for bdf in $(get_nvme_bdfs); do $rootdir/examples/nvme/identify/identify -r "trtype:PCIe traddr:${bdf}" -i 0 done timing_exit identify @@ -25,7 +25,7 @@ function nvme_perf { function nvme_fio_test { PLUGIN_DIR=$rootdir/examples/nvme/fio_plugin - for bdf in $(iter_pci_class_code 01 08 02); do + for bdf in $(get_nvme_bdfs); do for blkname in $(get_nvme_name_from_bdf $bdf); do fio_nvme $PLUGIN_DIR/example_config.fio --filename="trtype=PCIe traddr=${bdf//:/.} ns=${blkname##*n}" done @@ -55,7 +55,7 @@ if [ $(uname) = Linux ]; then # # note: more work probably needs to be done to properly handle devices with multiple # namespaces - for bdf in $(iter_pci_class_code 01 08 02); do + for bdf in $(get_nvme_bdfs); do for name in $(get_nvme_name_from_bdf $bdf); do if [ "$name" != "" ]; then mountpoints=$(lsblk /dev/$name --output MOUNTPOINT -n | wc -w) diff --git a/test/nvme/nvme_rpc.sh b/test/nvme/nvme_rpc.sh index 0c3688ec5..57f6da6f7 100755 --- a/test/nvme/nvme_rpc.sh +++ b/test/nvme/nvme_rpc.sh @@ -7,7 +7,7 @@ source $rootdir/test/common/autotest_common.sh rpc_py=$rootdir/scripts/rpc.py -bdf=$(iter_pci_class_code 01 08 02 | head -1) +bdf=$(get_first_nvme_bdf) $rootdir/app/spdk_tgt/spdk_tgt -m 0x3 & spdk_tgt_pid=$! diff --git a/test/nvme/perf/common.sh b/test/nvme/perf/common.sh index 6ab62a252..c78a5255e 100755 --- a/test/nvme/perf/common.sh +++ b/test/nvme/perf/common.sh @@ -80,7 +80,7 @@ function get_numa_node(){ done else # Only target not mounted NVMes - for bdf in $(iter_pci_class_code 01 08 02); do + for bdf in $(get_nvme_bdfs); do if is_bdf_not_mounted $bdf; then cat /sys/bus/pci/devices/$bdf/numa_node fi @@ -91,11 +91,8 @@ function get_numa_node(){ function get_disks(){ local plugin=$1 if [[ "$plugin" =~ "nvme" ]]; then - for bdf in $(iter_pci_class_code 01 08 02); do - driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent |awk -F"=" '{print $2}') - if [ "$driver" = "vfio-pci" ] || [ "$driver" = "uio_pci_generic" ]; then - echo "$bdf" - fi + for bdf in $(get_nvme_bdfs); do + echo "$bdf" done elif [[ "$plugin" =~ "bdev" ]]; then local bdevs @@ -103,7 +100,7 @@ function get_disks(){ jq -r '.[].name' <<< $bdevs else # Only target not mounted NVMes - for bdf in $(iter_pci_class_code 01 08 02); do + for bdf in $(get_nvme_bdfs); do if is_bdf_not_mounted $bdf; then local blkname blkname=$(ls -l /sys/block/ | grep $bdf | awk '{print $9}') diff --git a/test/nvme/spdk_nvme_cli.sh b/test/nvme/spdk_nvme_cli.sh index c0d7392be..cc2ae50a4 100755 --- a/test/nvme/spdk_nvme_cli.sh +++ b/test/nvme/spdk_nvme_cli.sh @@ -30,7 +30,7 @@ cd $spdk_nvme_cli make clean && make -j$(nproc) LDFLAGS="$(make -s -C $spdk_nvme_cli/spdk ldflags)" sed -i 's/spdk=0/spdk=1/g' spdk.conf sed -i 's/shm_id=.*/shm_id=0/g' spdk.conf -for bdf in $(iter_pci_class_code 01 08 02); do +for bdf in $(get_nvme_bdfs); do ./nvme list ./nvme id-ctrl $bdf ./nvme list-ctrl $bdf diff --git a/test/nvme/spdk_nvme_cli_cuse.sh b/test/nvme/spdk_nvme_cli_cuse.sh index 1845ec6a6..18734aad7 100755 --- a/test/nvme/spdk_nvme_cli_cuse.sh +++ b/test/nvme/spdk_nvme_cli_cuse.sh @@ -14,7 +14,7 @@ CUSE_OUT=$testdir/match_files/cuse.out NVME_CMD=/usr/local/src/nvme-cli/nvme rpc_py=$rootdir/scripts/rpc.py -bdf=$(iter_pci_class_code 01 08 02 | head -1) +bdf=$(get_first_nvme_bdf) PCI_WHITELIST="${bdf}" $rootdir/scripts/setup.sh reset sleep 1 diff --git a/test/nvme/spdk_smartctl_cuse.sh b/test/nvme/spdk_smartctl_cuse.sh index 4348f7b10..9de248f38 100755 --- a/test/nvme/spdk_smartctl_cuse.sh +++ b/test/nvme/spdk_smartctl_cuse.sh @@ -8,7 +8,7 @@ source $rootdir/test/common/autotest_common.sh SMARTCTL_CMD='smartctl -d nvme' rpc_py=$rootdir/scripts/rpc.py -bdf=$(iter_pci_class_code 01 08 02 | head -1) +bdf=$(get_first_nvme_bdf) PCI_WHITELIST="${bdf}" $rootdir/scripts/setup.sh reset sleep 1 diff --git a/test/nvmf/host/fio.sh b/test/nvmf/host/fio.sh index 3925d646a..85f9a00f1 100755 --- a/test/nvmf/host/fio.sh +++ b/test/nvmf/host/fio.sh @@ -44,7 +44,7 @@ $rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1 if [ $RUN_NIGHTLY -eq 1 ]; then # Test fio_plugin as host with nvme lvol backend - bdfs=$(iter_pci_class_code 01 08 02) + bdfs=$(get_nvme_bdfs) $rpc_py bdev_nvme_attach_controller -b Nvme0 -t PCIe -a $(echo $bdfs | awk '{ print $1 }') -i $NVMF_FIRST_TARGET_IP ls_guid=$($rpc_py bdev_lvol_create_lvstore -c 1073741824 Nvme0n1 lvs_0) get_lvs_free_mb $ls_guid diff --git a/test/nvmf/target/identify_passthru.sh b/test/nvmf/target/identify_passthru.sh index e102c749c..ac8d7ee42 100755 --- a/test/nvmf/target/identify_passthru.sh +++ b/test/nvmf/target/identify_passthru.sh @@ -12,7 +12,7 @@ nvmftestinit timing_enter nvme_identify -bdf=$(iter_pci_class_code 01 08 02 | head -1) +bdf=$(get_first_nvme_bdf) if [ -z "${bdf}" ] ; then echo "No NVMe drive found but test requires it. Failing the test." exit 1 diff --git a/test/vhost/lvol/lvol_test.sh b/test/vhost/lvol/lvol_test.sh index 6e69a2044..060fdef03 100755 --- a/test/vhost/lvol/lvol_test.sh +++ b/test/vhost/lvol/lvol_test.sh @@ -100,7 +100,7 @@ done vhosttestinit notice "Get NVMe disks:" -nvmes=($(iter_pci_class_code 01 08 02)) +nvmes=($(get_nvme_bdfs)) if [[ -z $max_disks ]]; then max_disks=${#nvmes[@]}