ftl: Dirty shutdown tests
Add additional tests checking against dirty shutdown recovery. Also adds 'write after write' test - checking if two simultaneous writes to the same LBAs return the same data before and after dirty shutdown. Signed-off-by: Kozlowski Mateusz <mateusz.kozlowski@intel.com> Signed-off-by: Artur Paszkiewicz <artur.paszkiewicz@intel.com> Change-Id: Idcf9b51d9c00d0d065f7e9655387668f5eeb646d Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/13376 Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
parent
42899f1a74
commit
ea80f87e0e
@ -3,6 +3,7 @@
|
||||
# cases
|
||||
ftl_dirty_shutdown
|
||||
ftl_restore_fast
|
||||
ftl_write_after_write
|
||||
ftl_fio_basic
|
||||
ftl_fio_extended
|
||||
ftl_fio_nightly
|
||||
|
20
test/ftl/config/fio/write_after_write.fio
Normal file
20
test/ftl/config/fio/write_after_write.fio
Normal file
@ -0,0 +1,20 @@
|
||||
[global]
|
||||
ioengine=spdk_bdev
|
||||
spdk_json_conf=${FTL_JSON_CONF}
|
||||
filename=${FTL_BDEV_NAME}
|
||||
thread=1
|
||||
direct=1
|
||||
random_distribution=normal
|
||||
io_size=5G
|
||||
|
||||
[job1]
|
||||
iodepth=512
|
||||
rw=write
|
||||
bs=32k
|
||||
buffer_pattern="abcdef"
|
||||
|
||||
[job2]
|
||||
iodepth=512
|
||||
rw=write
|
||||
bs=64k
|
||||
buffer_pattern="qwert"
|
92
test/ftl/dirty_shutdown.sh
Executable file
92
test/ftl/dirty_shutdown.sh
Executable file
@ -0,0 +1,92 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
testdir=$(readlink -f $(dirname $0))
|
||||
rootdir=$(readlink -f $testdir/../..)
|
||||
source $rootdir/test/common/autotest_common.sh
|
||||
source $testdir/common.sh
|
||||
|
||||
rpc_py=$rootdir/scripts/rpc.py
|
||||
|
||||
while getopts ':u:c:' opt; do
|
||||
case $opt in
|
||||
u) uuid=$OPTARG ;;
|
||||
c) nv_cache=$OPTARG ;;
|
||||
?) echo "Usage: $0 [-u UUID] [-c NV_CACHE_PCI_BDF] BASE_PCI_BDF" && exit 1 ;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND - 1))
|
||||
|
||||
device=$1
|
||||
timeout=240
|
||||
|
||||
chunk_size=262144
|
||||
data_size=$((chunk_size * 2))
|
||||
|
||||
restore_kill() {
|
||||
rm -f $testdir/config/ftl.json
|
||||
rm -f $testdir/testfile
|
||||
rm -f $testdir/testfile2
|
||||
rm -f $testdir/testfile.md5
|
||||
rm -f $testdir/testfile2.md5
|
||||
|
||||
killprocess $svcpid || true
|
||||
rmmod nbd || true
|
||||
remove_shm
|
||||
}
|
||||
|
||||
trap "restore_kill; exit 1" SIGINT SIGTERM EXIT
|
||||
|
||||
"$SPDK_BIN_DIR/spdk_tgt" &
|
||||
svcpid=$!
|
||||
# Wait until spdk_tgt starts
|
||||
waitforlisten $svcpid
|
||||
|
||||
split_bdev=$(create_base_bdev nvme0 $device $((1024 * 101)))
|
||||
|
||||
if [ -n "$nv_cache" ]; then
|
||||
nvc_bdev=$(create_nv_cache_bdev nvc0 $nv_cache $split_bdev)
|
||||
fi
|
||||
|
||||
l2p_dram_size_mb=$(($(get_bdev_size $split_bdev) * 10 / 100 / 1024))
|
||||
ftl_construct_args="bdev_ftl_create -b ftl0 -d $split_bdev --l2p_dram_limit $l2p_dram_size_mb"
|
||||
|
||||
[ -n "$uuid" ] && ftl_construct_args+=" -u $uuid"
|
||||
[ -n "$nv_cache" ] && ftl_construct_args+=" -c $nvc_bdev"
|
||||
|
||||
$rpc_py -t $timeout $ftl_construct_args
|
||||
|
||||
(
|
||||
echo '{"subsystems": ['
|
||||
$rpc_py save_subsystem_config -n bdev
|
||||
echo ']}'
|
||||
) > $testdir/config/ftl.json
|
||||
|
||||
# Load the nbd driver
|
||||
modprobe nbd
|
||||
$rpc_py nbd_start_disk ftl0 /dev/nbd0
|
||||
waitfornbd nbd0
|
||||
|
||||
# Write and calculate checksum of the data written
|
||||
dd if=/dev/urandom of=$testdir/testfile bs=4K count=$data_size
|
||||
md5sum $testdir/testfile > $testdir/testfile.md5
|
||||
dd if=$testdir/testfile of=/dev/nbd0 bs=4K count=$data_size oflag=dsync
|
||||
$rpc_py nbd_stop_disk /dev/nbd0
|
||||
|
||||
# Force kill bdev service (dirty shutdown) and start it again
|
||||
kill -9 $svcpid
|
||||
rm -f /dev/shm/spdk_tgt_trace.pid$svcpid
|
||||
|
||||
# Write extra data after restore
|
||||
dd if=/dev/urandom of=$testdir/testfile2 bs=4K count=$chunk_size
|
||||
"$SPDK_BIN_DIR/spdk_dd" --if=$testdir/testfile2 --ob=ftl0 --count=$chunk_size --seek=$data_size --json=$testdir/config/ftl.json
|
||||
# Save md5 data
|
||||
md5sum $testdir/testfile2 > $testdir/testfile2.md5
|
||||
|
||||
# Verify that the checksum matches and the data is consistent
|
||||
"$SPDK_BIN_DIR/spdk_dd" --ib=ftl0 --of=$testdir/testfile --count=$data_size --json=$testdir/config/ftl.json
|
||||
md5sum -c $testdir/testfile.md5
|
||||
"$SPDK_BIN_DIR/spdk_dd" --ib=ftl0 --of=$testdir/testfile2 --count=$chunk_size --skip=$data_size --json=$testdir/config/ftl.json
|
||||
md5sum -c $testdir/testfile2.md5
|
||||
|
||||
trap - SIGINT SIGTERM EXIT
|
||||
restore_kill
|
@ -71,10 +71,13 @@ if [[ -z $SPDK_TEST_FTL_NIGHTLY ]]; then
|
||||
run_test "ftl_fio_basic" $testdir/fio.sh $device $nv_cache basic
|
||||
run_test "ftl_bdevperf" $testdir/bdevperf.sh $device $nv_cache
|
||||
run_test "ftl_restore" $testdir/restore.sh -c $nv_cache $device
|
||||
run_test "ftl_dirty_shutdown" $testdir/dirty_shutdown.sh -c $nv_cache $device
|
||||
fi
|
||||
|
||||
if [ $SPDK_TEST_FTL_EXTENDED -eq 1 ]; then
|
||||
run_test "ftl_restore_fast" $testdir/restore.sh -f -c $nv_cache $device
|
||||
run_test "ftl_dirty_shutdown" $testdir/dirty_shutdown.sh -c $nv_cache $device
|
||||
run_test "ftl_write_after_write" $testdir/write_after_write.sh $device $nv_cache
|
||||
run_test "ftl_fio_extended" $testdir/fio.sh $device $nv_cache extended
|
||||
fi
|
||||
|
||||
|
88
test/ftl/write_after_write.sh
Executable file
88
test/ftl/write_after_write.sh
Executable file
@ -0,0 +1,88 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
testdir=$(readlink -f $(dirname $0))
|
||||
rootdir=$(readlink -f $testdir/../..)
|
||||
source $rootdir/test/common/autotest_common.sh
|
||||
source $testdir/common.sh
|
||||
|
||||
rpc_py=$rootdir/scripts/rpc.py
|
||||
|
||||
fio_kill() {
|
||||
rm -f $testdir/testfile.md5
|
||||
rm -f $testdir/config/ftl.json
|
||||
|
||||
killprocess $svcpid
|
||||
rmmod nbd || true
|
||||
}
|
||||
|
||||
device=$1
|
||||
cache_device=$2
|
||||
timeout=240
|
||||
data_size=$((262144 * 5))
|
||||
|
||||
if [[ $CONFIG_FIO_PLUGIN != y ]]; then
|
||||
echo "FIO not available"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
export FTL_BDEV_NAME=ftl0
|
||||
export FTL_JSON_CONF=$testdir/config/ftl.json
|
||||
|
||||
trap "fio_kill; exit 1" SIGINT SIGTERM EXIT
|
||||
|
||||
"$SPDK_BIN_DIR/spdk_tgt" &
|
||||
svcpid=$!
|
||||
waitforlisten $svcpid
|
||||
|
||||
split_bdev=$(create_base_bdev nvme0 $device $((1024 * 101)))
|
||||
nv_cache=$(create_nv_cache_bdev nvc0 $cache_device $split_bdev)
|
||||
|
||||
l2p_percentage=60
|
||||
l2p_dram_size_mb=$(($(get_bdev_size $split_bdev) * l2p_percentage / 100 / 1024))
|
||||
|
||||
$rpc_py -t $timeout bdev_ftl_create -b ftl0 -d $split_bdev -c $nv_cache --l2p_dram_limit $l2p_dram_size_mb
|
||||
|
||||
waitforbdev ftl0
|
||||
|
||||
(
|
||||
echo '{"subsystems": ['
|
||||
$rpc_py save_subsystem_config -n bdev
|
||||
echo ']}'
|
||||
) > $FTL_JSON_CONF
|
||||
|
||||
killprocess $svcpid
|
||||
|
||||
fio_bdev $testdir/config/fio/write_after_write.fio
|
||||
|
||||
"$SPDK_BIN_DIR/spdk_tgt" -L ftl_init &
|
||||
svcpid=$!
|
||||
waitforlisten $svcpid
|
||||
|
||||
$rpc_py load_config < $FTL_JSON_CONF
|
||||
# Load the nbd driver
|
||||
modprobe nbd
|
||||
$rpc_py nbd_start_disk ftl0 /dev/nbd0
|
||||
waitfornbd nbd0
|
||||
|
||||
$rpc_py save_config > $testdir/config/ftl.json
|
||||
|
||||
# Calculate checksum of the data written
|
||||
dd if=/dev/nbd0 bs=4K count=$data_size | md5sum > $testdir/testfile.md5
|
||||
$rpc_py nbd_stop_disk /dev/nbd0
|
||||
|
||||
# Force kill bdev service (dirty shutdown) and start it again
|
||||
kill -9 $svcpid
|
||||
rm -f /dev/shm/spdk_tgt_trace.pid$svcpid
|
||||
|
||||
"$SPDK_BIN_DIR/spdk_tgt" -L ftl_init &
|
||||
svcpid=$!
|
||||
waitforlisten $svcpid
|
||||
|
||||
$rpc_py load_config < $testdir/config/ftl.json
|
||||
waitfornbd nbd0
|
||||
|
||||
# Verify that the checksum matches and the data is consistent
|
||||
dd if=/dev/nbd0 bs=4K count=$data_size | md5sum -c $testdir/testfile.md5
|
||||
|
||||
trap - SIGINT SIGTERM EXIT
|
||||
fio_kill
|
Loading…
Reference in New Issue
Block a user