scripts/setup remove flakiness from tests

This removes all the sleeps called after setup.sh reset. These
sleeps were meant to wait for devices given tests depend on.

Change-Id: Ibb86c9f6f8d5f1b05d165e84d9019530af84f9ea
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/4035
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Michal Berger 2020-09-02 17:32:24 +02:00 committed by Tomasz Zawadzki
parent 6182bff272
commit b80667550f
8 changed files with 0 additions and 16 deletions

View File

@ -83,9 +83,6 @@ rm -f /var/tmp/spdk*.sock
# Load the kernel driver
./scripts/setup.sh reset
# Let the kernel discover any filesystems or partitions
sleep 10
if [ $(uname -s) = Linux ]; then
# OCSSD devices drivers don't support IO issues by kernel so
# detect OCSSD devices and blacklist them (unbind from any driver).

View File

@ -55,11 +55,6 @@ function setup_nvme_conf() {
function setup_gpt_conf() {
if [[ $(uname -s) = Linux ]] && hash sgdisk; then
$rootdir/scripts/setup.sh reset
# FIXME: Note that we are racing with the kernel here. There's no guarantee that
# proper object will be already in place under sysfs nor that any udev-like
# helper created proper block devices for us. Replace the below sleep with proper
# udev settle routine.
sleep 1s
# Get nvme devices by following drivers' links towards nvme class
local nvme_devs=(/sys/bus/pci/drivers/nvme/*/nvme/nvme*/nvme*n*) nvme_dev
gpt_nvme=""
@ -398,7 +393,6 @@ fi
#-----------------------------------------------------
if [ "$test_type" = "gpt" ]; then
"$rootdir/scripts/setup.sh" reset
sleep 1s
if [[ -b $gpt_nvme ]]; then
dd if=/dev/zero of="$gpt_nvme" bs=4096 count=8 oflag=direct
fi

View File

@ -1267,7 +1267,6 @@ function nvme_namespace_revert() {
bdfs=$(get_nvme_bdfs)
$rootdir/scripts/setup.sh reset
sleep 1
for bdf in $bdfs; do
nvme_ctrlr=/dev/$(get_nvme_ctrlr_from_bdf ${bdf})

View File

@ -15,7 +15,6 @@ sleep 1
bdfs=$(get_nvme_bdfs)
$rootdir/scripts/setup.sh reset
sleep 1
# Find bdf that supports Namespace Managment
for bdf in $bdfs; do

View File

@ -17,7 +17,6 @@ rpc_py=$rootdir/scripts/rpc.py
bdf=$(get_first_nvme_bdf)
PCI_WHITELIST="${bdf}" $rootdir/scripts/setup.sh reset
sleep 1
nvme_name=$(get_nvme_ctrlr_from_bdf ${bdf})
if [[ -z "$nvme_name" ]]; then
echo "setup.sh failed bind kernel driver to ${bdf}"

View File

@ -11,7 +11,6 @@ rpc_py=$rootdir/scripts/rpc.py
bdf=$(get_first_nvme_bdf)
PCI_WHITELIST="${bdf}" $rootdir/scripts/setup.sh reset
sleep 1
nvme_name=$(get_nvme_ctrlr_from_bdf ${bdf})
if [[ -z "$nvme_name" ]]; then
echo "setup.sh failed bind kernel driver to ${bdf}"

View File

@ -50,8 +50,6 @@ if [ $(uname) = Linux ]; then
# check that our setup.sh script does not bind NVMe devices to uio/vfio if they
# have an active mountpoint
$rootdir/scripts/setup.sh reset
# give kernel nvme driver some time to create the block devices before we start looking for them
sleep 1
blkname=''
# first, find an NVMe device that does not have an active mountpoint already;
# this covers rare case where someone is running this test script on a system

View File

@ -15,7 +15,6 @@ function clear_nvme() {
# Clear metadata on NVMe device
$rootdir/scripts/setup.sh reset
sleep 5
name=$(get_nvme_name_from_bdf "${bdf[0]}")
mountpoints=$(lsblk /dev/$name --output MOUNTPOINT -n | wc -w)