Spdk/test/ftl/restore.sh
Konrad Sztyber 83af29b4eb test/ftl: wait till nbd is ready when loading JSON config
When creating the nbd, we wait until it's ready, but we didn't do
that when loading the configuration from JSON, which resulted in
sporadic IO failures, as the device hasn't been initialized yet. This
patch adds waitfornbd after each load_config call.

Change-Id: Id350ae7b1afab11f5f3fbd131d938dbd65a8cb15
Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/459616
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Wojciech Malikowski <wojciech.malikowski@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Mateusz Kozlowski <mateusz.kozlowski@intel.com>
Reviewed-by: Paul Luse <paul.e.luse@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
2019-07-12 12:39:38 +00:00

82 lines
1.9 KiB
Bash
Executable File

#!/usr/bin/env bash
testdir=$(readlink -f $(dirname $0))
rootdir=$(readlink -f $testdir/../..)
source $rootdir/test/common/autotest_common.sh
rpc_py=$rootdir/scripts/rpc.py
mount_dir=$(mktemp -d)
device=$1
uuid=$2
restore_kill() {
if mount | grep $mount_dir; then
umount $mount_dir
fi
rm -rf $mount_dir
rm -f $testdir/testfile.md5
rm -f $testdir/testfile2.md5
rm -f $testdir/config/ftl.json
$rpc_py delete_ftl_bdev -b nvme0
killprocess $svcpid
rmmod nbd || true
}
trap "restore_kill; exit 1" SIGINT SIGTERM EXIT
$rootdir/test/app/bdev_svc/bdev_svc & svcpid=$!
# Wait until bdev_svc starts
waitforlisten $svcpid
if [ -n "$uuid" ]; then
$rpc_py construct_ftl_bdev -b nvme0 -a $device -l 0-3 -u $uuid
else
$rpc_py construct_ftl_bdev -b nvme0 -a $device -l 0-3
fi
# Load the nbd driver
modprobe nbd
$rpc_py start_nbd_disk nvme0 /dev/nbd0
waitfornbd nbd0
$rpc_py save_config > $testdir/config/ftl.json
# Prepare the disk by creating ext4 fs and putting a file on it
mkfs.ext4 -F /dev/nbd0
mount /dev/nbd0 $mount_dir
dd if=/dev/urandom of=$mount_dir/testfile bs=4K count=256K
sync
mount -o remount /dev/nbd0 $mount_dir
md5sum $mount_dir/testfile > $testdir/testfile.md5
# Kill bdev service and start it again
umount $mount_dir
killprocess $svcpid
$rootdir/test/app/bdev_svc/bdev_svc & svcpid=$!
# Wait until bdev_svc starts
waitforlisten $svcpid
$rpc_py load_config < $testdir/config/ftl.json
waitfornbd nbd0
mount /dev/nbd0 $mount_dir
# Write second file, to make sure writer thread has restored properly
dd if=/dev/urandom of=$mount_dir/testfile2 bs=4K count=256K
md5sum $mount_dir/testfile2 > $testdir/testfile2.md5
# Make sure second file will be read from disk
echo 3 > /proc/sys/vm/drop_caches
# Check both files have proper data
md5sum -c $testdir/testfile.md5
md5sum -c $testdir/testfile2.md5
report_test_completion occsd_restore
trap - SIGINT SIGTERM EXIT
restore_kill