test: do not use iter_pci_class_code in tests

Do not use iter_pci_class_code function in tests to
iterate over NVMe drives. This function can return
drives which at the moment of execution can not be
whitelisted for use.

This can result in test errors (such as simply
bdev_nvme_attach_controller RPC command failing) or
even using and deleting data from NVMe drive which
was not meant to be used in tests.

Fixes #1235

Change-Id: I82b9935fc88605b636c2096be6c71d4880a567c8
Signed-off-by: Karol Latecki <karol.latecki@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1309
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Karol Latecki 2020-03-16 17:32:39 +01:00 committed by Tomasz Zawadzki
parent bad71510cb
commit db43b387ba
12 changed files with 31 additions and 19 deletions

View File

@ -21,7 +21,9 @@ def get_nvme_devices_count():
def get_nvme_devices_bdf(): def get_nvme_devices_bdf():
print("Getting BDFs for NVMe section") print("Getting BDFs for NVMe section")
output = check_output("source scripts/common.sh; iter_pci_class_code 01 08 02", output = check_output("rootdir=$PWD; \
source test/common/autotest_common.sh; \
get_nvme_bdfs 01 08 02",
executable="/bin/bash", shell=True) executable="/bin/bash", shell=True)
output = [str(x, encoding="utf-8") for x in output.split()] output = [str(x, encoding="utf-8") for x in output.split()]
print("Done getting BDFs") print("Done getting BDFs")

View File

@ -1126,6 +1126,19 @@ function opal_revert_cleanup {
killprocess $spdk_tgt_pid killprocess $spdk_tgt_pid
} }
# Get BDF addresses of all NVMe drives currently attached to
# uio-pci-generic or vfio-pci
function get_nvme_bdfs() {
xtrace_disable
jq -r .config[].params.traddr <<< $(scripts/gen_nvme.sh --json)
xtrace_restore
}
# Same as function above, but just get the first disks BDF address
function get_first_nvme_bdf() {
head -1 <<< $(get_nvme_bdfs)
}
set -o errtrace set -o errtrace
shopt -s extdebug shopt -s extdebug
trap "trap - ERR; print_backtrace >&2" ERR trap "trap - ERR; print_backtrace >&2" ERR

View File

@ -40,7 +40,7 @@ echo "iscsi_tgt is listening. Running tests..."
timing_exit start_iscsi_tgt timing_exit start_iscsi_tgt
bdf=$(iter_pci_class_code 01 08 02 | head -1) bdf=$(get_first_nvme_bdf)
$rpc_py iscsi_create_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT $rpc_py iscsi_create_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT
$rpc_py iscsi_create_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK $rpc_py iscsi_create_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
$rpc_py bdev_nvme_attach_controller -b "Nvme0" -t "pcie" -a $bdf $rpc_py bdev_nvme_attach_controller -b "Nvme0" -t "pcie" -a $bdf

View File

@ -7,7 +7,7 @@ source $rootdir/test/common/autotest_common.sh
function nvme_identify { function nvme_identify {
$rootdir/examples/nvme/identify/identify -i 0 $rootdir/examples/nvme/identify/identify -i 0
for bdf in $(iter_pci_class_code 01 08 02); do for bdf in $(get_nvme_bdfs); do
$rootdir/examples/nvme/identify/identify -r "trtype:PCIe traddr:${bdf}" -i 0 $rootdir/examples/nvme/identify/identify -r "trtype:PCIe traddr:${bdf}" -i 0
done done
timing_exit identify timing_exit identify
@ -25,7 +25,7 @@ function nvme_perf {
function nvme_fio_test { function nvme_fio_test {
PLUGIN_DIR=$rootdir/examples/nvme/fio_plugin PLUGIN_DIR=$rootdir/examples/nvme/fio_plugin
for bdf in $(iter_pci_class_code 01 08 02); do for bdf in $(get_nvme_bdfs); do
for blkname in $(get_nvme_name_from_bdf $bdf); do for blkname in $(get_nvme_name_from_bdf $bdf); do
fio_nvme $PLUGIN_DIR/example_config.fio --filename="trtype=PCIe traddr=${bdf//:/.} ns=${blkname##*n}" fio_nvme $PLUGIN_DIR/example_config.fio --filename="trtype=PCIe traddr=${bdf//:/.} ns=${blkname##*n}"
done done
@ -55,7 +55,7 @@ if [ $(uname) = Linux ]; then
# #
# note: more work probably needs to be done to properly handle devices with multiple # note: more work probably needs to be done to properly handle devices with multiple
# namespaces # namespaces
for bdf in $(iter_pci_class_code 01 08 02); do for bdf in $(get_nvme_bdfs); do
for name in $(get_nvme_name_from_bdf $bdf); do for name in $(get_nvme_name_from_bdf $bdf); do
if [ "$name" != "" ]; then if [ "$name" != "" ]; then
mountpoints=$(lsblk /dev/$name --output MOUNTPOINT -n | wc -w) mountpoints=$(lsblk /dev/$name --output MOUNTPOINT -n | wc -w)

View File

@ -7,7 +7,7 @@ source $rootdir/test/common/autotest_common.sh
rpc_py=$rootdir/scripts/rpc.py rpc_py=$rootdir/scripts/rpc.py
bdf=$(iter_pci_class_code 01 08 02 | head -1) bdf=$(get_first_nvme_bdf)
$rootdir/app/spdk_tgt/spdk_tgt -m 0x3 & $rootdir/app/spdk_tgt/spdk_tgt -m 0x3 &
spdk_tgt_pid=$! spdk_tgt_pid=$!

View File

@ -80,7 +80,7 @@ function get_numa_node(){
done done
else else
# Only target not mounted NVMes # Only target not mounted NVMes
for bdf in $(iter_pci_class_code 01 08 02); do for bdf in $(get_nvme_bdfs); do
if is_bdf_not_mounted $bdf; then if is_bdf_not_mounted $bdf; then
cat /sys/bus/pci/devices/$bdf/numa_node cat /sys/bus/pci/devices/$bdf/numa_node
fi fi
@ -91,11 +91,8 @@ function get_numa_node(){
function get_disks(){ function get_disks(){
local plugin=$1 local plugin=$1
if [[ "$plugin" =~ "nvme" ]]; then if [[ "$plugin" =~ "nvme" ]]; then
for bdf in $(iter_pci_class_code 01 08 02); do for bdf in $(get_nvme_bdfs); do
driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent |awk -F"=" '{print $2}') echo "$bdf"
if [ "$driver" = "vfio-pci" ] || [ "$driver" = "uio_pci_generic" ]; then
echo "$bdf"
fi
done done
elif [[ "$plugin" =~ "bdev" ]]; then elif [[ "$plugin" =~ "bdev" ]]; then
local bdevs local bdevs
@ -103,7 +100,7 @@ function get_disks(){
jq -r '.[].name' <<< $bdevs jq -r '.[].name' <<< $bdevs
else else
# Only target not mounted NVMes # Only target not mounted NVMes
for bdf in $(iter_pci_class_code 01 08 02); do for bdf in $(get_nvme_bdfs); do
if is_bdf_not_mounted $bdf; then if is_bdf_not_mounted $bdf; then
local blkname local blkname
blkname=$(ls -l /sys/block/ | grep $bdf | awk '{print $9}') blkname=$(ls -l /sys/block/ | grep $bdf | awk '{print $9}')

View File

@ -30,7 +30,7 @@ cd $spdk_nvme_cli
make clean && make -j$(nproc) LDFLAGS="$(make -s -C $spdk_nvme_cli/spdk ldflags)" make clean && make -j$(nproc) LDFLAGS="$(make -s -C $spdk_nvme_cli/spdk ldflags)"
sed -i 's/spdk=0/spdk=1/g' spdk.conf sed -i 's/spdk=0/spdk=1/g' spdk.conf
sed -i 's/shm_id=.*/shm_id=0/g' spdk.conf sed -i 's/shm_id=.*/shm_id=0/g' spdk.conf
for bdf in $(iter_pci_class_code 01 08 02); do for bdf in $(get_nvme_bdfs); do
./nvme list ./nvme list
./nvme id-ctrl $bdf ./nvme id-ctrl $bdf
./nvme list-ctrl $bdf ./nvme list-ctrl $bdf

View File

@ -14,7 +14,7 @@ CUSE_OUT=$testdir/match_files/cuse.out
NVME_CMD=/usr/local/src/nvme-cli/nvme NVME_CMD=/usr/local/src/nvme-cli/nvme
rpc_py=$rootdir/scripts/rpc.py rpc_py=$rootdir/scripts/rpc.py
bdf=$(iter_pci_class_code 01 08 02 | head -1) bdf=$(get_first_nvme_bdf)
PCI_WHITELIST="${bdf}" $rootdir/scripts/setup.sh reset PCI_WHITELIST="${bdf}" $rootdir/scripts/setup.sh reset
sleep 1 sleep 1

View File

@ -8,7 +8,7 @@ source $rootdir/test/common/autotest_common.sh
SMARTCTL_CMD='smartctl -d nvme' SMARTCTL_CMD='smartctl -d nvme'
rpc_py=$rootdir/scripts/rpc.py rpc_py=$rootdir/scripts/rpc.py
bdf=$(iter_pci_class_code 01 08 02 | head -1) bdf=$(get_first_nvme_bdf)
PCI_WHITELIST="${bdf}" $rootdir/scripts/setup.sh reset PCI_WHITELIST="${bdf}" $rootdir/scripts/setup.sh reset
sleep 1 sleep 1

View File

@ -44,7 +44,7 @@ $rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
if [ $RUN_NIGHTLY -eq 1 ]; then if [ $RUN_NIGHTLY -eq 1 ]; then
# Test fio_plugin as host with nvme lvol backend # Test fio_plugin as host with nvme lvol backend
bdfs=$(iter_pci_class_code 01 08 02) bdfs=$(get_nvme_bdfs)
$rpc_py bdev_nvme_attach_controller -b Nvme0 -t PCIe -a $(echo $bdfs | awk '{ print $1 }') -i $NVMF_FIRST_TARGET_IP $rpc_py bdev_nvme_attach_controller -b Nvme0 -t PCIe -a $(echo $bdfs | awk '{ print $1 }') -i $NVMF_FIRST_TARGET_IP
ls_guid=$($rpc_py bdev_lvol_create_lvstore -c 1073741824 Nvme0n1 lvs_0) ls_guid=$($rpc_py bdev_lvol_create_lvstore -c 1073741824 Nvme0n1 lvs_0)
get_lvs_free_mb $ls_guid get_lvs_free_mb $ls_guid

View File

@ -12,7 +12,7 @@ nvmftestinit
timing_enter nvme_identify timing_enter nvme_identify
bdf=$(iter_pci_class_code 01 08 02 | head -1) bdf=$(get_first_nvme_bdf)
if [ -z "${bdf}" ] ; then if [ -z "${bdf}" ] ; then
echo "No NVMe drive found but test requires it. Failing the test." echo "No NVMe drive found but test requires it. Failing the test."
exit 1 exit 1

View File

@ -100,7 +100,7 @@ done
vhosttestinit vhosttestinit
notice "Get NVMe disks:" notice "Get NVMe disks:"
nvmes=($(iter_pci_class_code 01 08 02)) nvmes=($(get_nvme_bdfs))
if [[ -z $max_disks ]]; then if [[ -z $max_disks ]]; then
max_disks=${#nvmes[@]} max_disks=${#nvmes[@]}