check_format: Reformat the Bash code in compliance with shfmt

Change-Id: I93e7b9d355870b0528a0ac3382fba1a10a558d45
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1718
Community-CI: Mellanox Build Bot
Reviewed-by: Karol Latecki <karol.latecki@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Michal Berger 2020-05-07 13:27:06 +02:00 committed by Jim Harris
parent 0c1d022b57
commit 844c8ec383
135 changed files with 3368 additions and 3397 deletions

View File

@ -35,7 +35,7 @@ $MAKE cc_version
$MAKE cxx_version
echo "** END ** Info for Hostname: $HOSTNAME"
function ocf_precompile {
function ocf_precompile() {
# We compile OCF sources ourselves
# They don't need to be checked with scanbuild and code coverage is not applicable
# So we precompile OCF now for further use as standalone static library
@ -48,7 +48,7 @@ function ocf_precompile {
./configure $config_params
}
function make_fail_cleanup {
function make_fail_cleanup() {
if [ -d $out/scan-build-tmp ]; then
scanoutput=$(ls -1 $out/scan-build-tmp/)
mv $out/scan-build-tmp/$scanoutput $out/scan-build
@ -58,7 +58,7 @@ function make_fail_cleanup {
false
}
function scanbuild_make {
function scanbuild_make() {
pass=true
$scanbuild $MAKE $MAKEFLAGS > $out/build_output.txt && rm -rf $out/scan-build-tmp || make_fail_cleanup
xtrace_disable
@ -92,7 +92,7 @@ function scanbuild_make {
$pass
}
function porcelain_check {
function porcelain_check() {
if [ $(git status --porcelain --ignore-submodules | wc -l) -ne 0 ]; then
echo "Generated files missing from .gitignore:"
git status --porcelain --ignore-submodules
@ -103,7 +103,7 @@ function porcelain_check {
# Check that header file dependencies are working correctly by
# capturing a binary's stat data before and after touching a
# header file and re-making.
function header_dependency_check {
function header_dependency_check() {
STAT1=$(stat app/spdk_tgt/spdk_tgt)
sleep 1
touch lib/nvme/nvme_internal.h
@ -116,7 +116,7 @@ function header_dependency_check {
fi
}
function test_make_uninstall {
function test_make_uninstall() {
# Create empty file to check if it is not deleted by target uninstall
touch "$SPDK_WORKSPACE/usr/lib/sample_xyz.a"
$MAKE $MAKEFLAGS uninstall DESTDIR="$SPDK_WORKSPACE" prefix=/usr
@ -127,14 +127,14 @@ function test_make_uninstall {
fi
}
function build_doc {
function build_doc() {
$MAKE -C "$rootdir"/doc --no-print-directory $MAKEFLAGS &> "$out"/doxygen.log
if [ -s "$out"/doxygen.log ]; then
cat "$out"/doxygen.log
echo "Doxygen errors found!"
exit 1
fi
if hash pdflatex 2>/dev/null; then
if hash pdflatex 2> /dev/null; then
$MAKE -C "$rootdir"/doc/output/latex --no-print-directory $MAKEFLAGS &>> "$out"/doxygen.log
fi
mkdir -p "$out"/doc
@ -149,7 +149,7 @@ function build_doc {
rm -rf "$rootdir"/doc/output
}
function autobuild_test_suite {
function autobuild_test_suite() {
run_test "autobuild_check_format" ./scripts/check_format.sh
run_test "autobuild_external_code" sudo -E $rootdir/test/external_code/test_make.sh $rootdir
if [ "$SPDK_TEST_OCF" -eq 1 ]; then

View File

@ -82,11 +82,9 @@ if [ $(uname -s) = Linux ]; then
# If some OCSSD device is bound to other driver than nvme we won't be able to
# discover if it is OCSSD or not so load the kernel driver first.
while IFS= read -r -d '' dev
do
while IFS= read -r -d '' dev; do
# Send Open Channel 2.0 Geometry opcode "0xe2" - not supported by NVMe device.
if nvme admin-passthru $dev --namespace-id=1 --data-len=4096 --opcode=0xe2 --read >/dev/null; then
if nvme admin-passthru $dev --namespace-id=1 --data-len=4096 --opcode=0xe2 --read > /dev/null; then
bdf="$(basename $(readlink -e /sys/class/nvme/${dev#/dev/}/device))"
echo "INFO: blacklisting OCSSD device: $dev ($bdf)"
PCI_BLACKLIST+=" $bdf"

View File

@ -91,7 +91,7 @@ monmaptool --create --clobber --add a ${mon_ip}:12046 --print ${base_dir}/monmap
sh -c "ulimit -c unlimited && exec ceph-mon --mkfs -c ${ceph_conf} -i a --monmap=${base_dir}/monmap --keyring=${base_dir}/keyring --mon-data=${mon_dir}"
if [ $update_config = true ] ;then
if [ $update_config = true ]; then
sed -i 's/mon addr = /mon addr = v2:/g' $ceph_conf
fi
@ -106,12 +106,12 @@ chmod a+r /etc/ceph/ceph.client.admin.keyring
ceph-run sh -c "ulimit -n 16384 && ulimit -c unlimited && exec ceph-mon -c ${ceph_conf} -i a --keyring=${base_dir}/keyring --pid-file=${base_dir}/pid/root@$(hostname).pid --mon-data=${mon_dir}" || true
# after ceph-mon creation, ceph -s should work.
if [ $update_config = true ] ;then
if [ $update_config = true ]; then
# start to get whole log.
ceph-conf --name mon.a --show-config-value log_file
# add fsid to ceph config file.
fsid=$(ceph -s | grep id |awk '{print $2}')
fsid=$(ceph -s | grep id | awk '{print $2}')
sed -i 's/perf = true/perf = true\n\tfsid = '$fsid' \n/g' $ceph_conf
# unify the filesystem with the old versions.

View File

@ -6,16 +6,16 @@ cd $BASEDIR
# exit on errors
set -e
if ! hash nproc 2>/dev/null; then
if ! hash nproc 2> /dev/null; then
function nproc() {
function nproc() {
echo 8
}
}
fi
function version_lt() {
[ $( echo -e "$1\n$2" | sort -V | head -1 ) != "$1" ]
[ $(echo -e "$1\n$2" | sort -V | head -1) != "$1" ]
}
rc=0
@ -30,7 +30,7 @@ while read -r perm _res0 _res1 path; do
fname=$(basename -- "$path")
case ${fname##*.} in
c|h|cpp|cc|cxx|hh|hpp|md|html|js|json|svg|Doxyfile|yml|LICENSE|README|conf|in|Makefile|mk|gitignore|go|txt)
c | h | cpp | cc | cxx | hh | hpp | md | html | js | json | svg | Doxyfile | yml | LICENSE | README | conf | in | Makefile | mk | gitignore | go | txt)
# These file types should never be executable
if [ "$perm" -eq 100755 ]; then
echo "ERROR: $path is marked executable but is a code file."
@ -65,8 +65,7 @@ fi
if hash astyle; then
echo -n "Checking coding style..."
if [ "$(astyle -V)" \< "Artistic Style Version 3" ]
then
if [ "$(astyle -V)" \< "Artistic Style Version 3" ]; then
echo -n " Your astyle version is too old so skipping coding style checks. Please update astyle to at least 3.0.1 version..."
else
rm -f astyle.log
@ -75,9 +74,9 @@ if hash astyle; then
# as-is to enable ongoing work to synch with a generic upstream DPDK vhost library,
# rather than making diffs more complicated by a lot of changes to follow SPDK
# coding standards.
git ls-files '*.[ch]' '*.cpp' '*.cc' '*.cxx' '*.hh' '*.hpp' | \
grep -v rte_vhost | grep -v cpp_headers | \
xargs -P$(nproc) -n10 astyle --options=.astylerc >> astyle.log
git ls-files '*.[ch]' '*.cpp' '*.cc' '*.cxx' '*.hh' '*.hpp' \
| grep -v rte_vhost | grep -v cpp_headers \
| xargs -P$(nproc) -n10 astyle --options=.astylerc >> astyle.log
if grep -q "^Formatted" astyle.log; then
echo " errors detected"
git diff
@ -96,7 +95,7 @@ else
echo "You do not have astyle installed so your code style is not being checked!"
fi
GIT_VERSION=$( git --version | cut -d' ' -f3 )
GIT_VERSION=$(git --version | cut -d' ' -f3)
if version_lt "1.9.5" "${GIT_VERSION}"; then
# git <1.9.5 doesn't support pathspec magic exclude
@ -171,8 +170,8 @@ rm -f badcunit.log
echo -n "Checking blank lines at end of file..."
if ! git grep -I -l -e . -z './*' ':!*.patch' | \
xargs -0 -P$(nproc) -n1 scripts/eofnl > eofnl.log; then
if ! git grep -I -l -e . -z './*' ':!*.patch' \
| xargs -0 -P$(nproc) -n1 scripts/eofnl > eofnl.log; then
echo " Incorrect end-of-file formatting detected"
cat eofnl.log
rc=1
@ -203,9 +202,9 @@ else
fi
rm -f scripts/includes.log
if hash pycodestyle 2>/dev/null; then
if hash pycodestyle 2> /dev/null; then
PEP8=pycodestyle
elif hash pep8 2>/dev/null; then
elif hash pep8 2> /dev/null; then
PEP8=pep8
fi
@ -228,7 +227,7 @@ else
echo "You do not have pycodestyle or pep8 installed so your Python style is not being checked!"
fi
if hash shellcheck 2>/dev/null; then
if hash shellcheck 2> /dev/null; then
echo -n "Checking Bash style..."
shellcheck_v=$(shellcheck --version | grep -P "version: [0-9\.]+" | cut -d " " -f2)

View File

@ -10,7 +10,7 @@ function pci_can_use() {
local i
# The '\ ' part is important
if [[ " $PCI_BLACKLIST " =~ \ $1\ ]] ; then
if [[ " $PCI_BLACKLIST " =~ \ $1\ ]]; then
return 1
fi
@ -20,7 +20,7 @@ function pci_can_use() {
fi
for i in $PCI_WHITELIST; do
if [ "$i" == "$1" ] ; then
if [ "$i" == "$1" ]; then
return 0
fi
done
@ -28,7 +28,7 @@ function pci_can_use() {
return 1
}
cache_pci_init () {
cache_pci_init() {
local -gA pci_bus_cache
[[ -z ${pci_bus_cache[*]} || $CMD == reset ]] || return 1
@ -36,22 +36,22 @@ cache_pci_init () {
pci_bus_cache=()
}
cache_pci () {
cache_pci() {
local pci=$1 class=$2 vendor=$3 device=$4
if [[ -n $class ]]; then
class=0x${class/0x}
class=0x${class/0x/}
pci_bus_cache["$class"]="${pci_bus_cache["$class"]:+${pci_bus_cache["$class"]} }$pci"
fi
if [[ -n $vendor && -n $device ]]; then
vendor=0x${vendor/0x} device=0x${device/0x}
vendor=0x${vendor/0x/} device=0x${device/0x/}
pci_bus_cache["$vendor"]="${pci_bus_cache["$vendor"]:+${pci_bus_cache["$vendor"]} }$pci"
pci_bus_cache["$device"]="${pci_bus_cache["$device"]:+${pci_bus_cache["$device"]} }$pci"
pci_bus_cache["$vendor:$device"]="${pci_bus_cache["$vendor:$device"]:+${pci_bus_cache["$vendor:$device"]} }$pci"
fi
}
cache_pci_bus_sysfs () {
cache_pci_bus_sysfs() {
[[ -e /sys/bus/pci/devices ]] || return 1
cache_pci_init || return 0
@ -60,13 +60,13 @@ cache_pci_bus_sysfs () {
local class vendor device
for pci in /sys/bus/pci/devices/*; do
class=$(<"$pci/class") vendor=$(<"$pci/vendor") device=$(<"$pci/device")
class=$(< "$pci/class") vendor=$(< "$pci/vendor") device=$(< "$pci/device")
cache_pci "${pci##*/}" "$class" "$vendor" "$device"
done
}
cache_pci_bus_lspci () {
hash lspci 2>/dev/null || return 1
cache_pci_bus_lspci() {
hash lspci 2> /dev/null || return 1
cache_pci_init || return 0
@ -86,8 +86,8 @@ cache_pci_bus_lspci () {
done < <(lspci -Dnmm)
}
cache_pci_bus_pciconf () {
hash pciconf 2>/dev/null || return 1
cache_pci_bus_pciconf() {
hash pciconf 2> /dev/null || return 1
cache_pci_init || return 0
@ -95,25 +95,25 @@ cache_pci_bus_pciconf () {
local pci domain bus device function
while read -r pci class _ vd _; do
IFS=":" read -r domain bus device function _ <<<"${pci##*pci}"
IFS=":" read -r domain bus device function _ <<< "${pci##*pci}"
pci=$(printf '%04x:%02x:%02x:%x' \
"$domain" "$bus" "$device" "$function")
class=$(printf '0x%06x' $(( class )))
vendor=$(printf '0x%04x' $(( vd & 0xffff )))
device=$(printf '0x%04x' $(( (vd >> 16) & 0xffff )))
class=$(printf '0x%06x' $((class)))
vendor=$(printf '0x%04x' $((vd & 0xffff)))
device=$(printf '0x%04x' $(((vd >> 16) & 0xffff)))
cache_pci "$pci" "$class" "$vendor" "$device"
done < <(pciconf -l)
}
cache_pci_bus () {
cache_pci_bus() {
case "$(uname -s)" in
Linux) cache_pci_bus_lspci || cache_pci_bus_sysfs ;;
FreeBSD) cache_pci_bus_pciconf ;;
esac
}
iter_all_pci_sysfs () {
iter_all_pci_sysfs() {
cache_pci_bus_sysfs || return 1
# default to class of the nvme devices
@ -121,9 +121,9 @@ iter_all_pci_sysfs () {
local pci pcis
[[ -n ${pci_bus_cache["$find"]} ]] || return 0
read -ra pcis <<<"${pci_bus_cache["$find"]}"
read -ra pcis <<< "${pci_bus_cache["$find"]}"
if (( findx )); then
if ((findx)); then
printf '%s\n' "${pcis[@]::findx}"
else
printf '%s\n' "${pcis[@]}"
@ -139,22 +139,22 @@ function iter_all_pci_class_code() {
subclass="$(printf %02x $((0x$2)))"
progif="$(printf %02x $((0x$3)))"
if hash lspci &>/dev/null; then
if hash lspci &> /dev/null; then
if [ "$progif" != "00" ]; then
lspci -mm -n -D | \
grep -i -- "-p${progif}" | \
awk -v cc="\"${class}${subclass}\"" -F " " \
lspci -mm -n -D \
| grep -i -- "-p${progif}" \
| awk -v cc="\"${class}${subclass}\"" -F " " \
'{if (cc ~ $2) print $1}' | tr -d '"'
else
lspci -mm -n -D | \
awk -v cc="\"${class}${subclass}\"" -F " " \
lspci -mm -n -D \
| awk -v cc="\"${class}${subclass}\"" -F " " \
'{if (cc ~ $2) print $1}' | tr -d '"'
fi
elif hash pciconf &>/dev/null; then
local addr=($(pciconf -l | grep -i "class=0x${class}${subclass}${progif}" | \
cut -d$'\t' -f1 | sed -e 's/^[a-zA-Z0-9_]*@pci//g' | tr ':' ' '))
elif hash pciconf &> /dev/null; then
local addr=($(pciconf -l | grep -i "class=0x${class}${subclass}${progif}" \
| cut -d$'\t' -f1 | sed -e 's/^[a-zA-Z0-9_]*@pci//g' | tr ':' ' '))
printf "%04x:%02x:%02x:%x\n" ${addr[0]} ${addr[1]} ${addr[2]} ${addr[3]}
elif iter_all_pci_sysfs "$(printf '0x%06x' $(( 0x$progif | 0x$subclass << 8 | 0x$class << 16 )))"; then
elif iter_all_pci_sysfs "$(printf '0x%06x' $((0x$progif | 0x$subclass << 8 | 0x$class << 16)))"; then
:
else
echo "Missing PCI enumeration utility" >&2
@ -169,12 +169,12 @@ function iter_all_pci_dev_id() {
ven_id="$(printf %04x $((0x$1)))"
dev_id="$(printf %04x $((0x$2)))"
if hash lspci &>/dev/null; then
if hash lspci &> /dev/null; then
lspci -mm -n -D | awk -v ven="\"$ven_id\"" -v dev="\"${dev_id}\"" -F " " \
'{if (ven ~ $3 && dev ~ $4) print $1}' | tr -d '"'
elif hash pciconf &>/dev/null; then
local addr=($(pciconf -l | grep -i "chip=0x${dev_id}${ven_id}" | \
cut -d$'\t' -f1 | sed -e 's/^[a-zA-Z0-9_]*@pci//g' | tr ':' ' '))
elif hash pciconf &> /dev/null; then
local addr=($(pciconf -l | grep -i "chip=0x${dev_id}${ven_id}" \
| cut -d$'\t' -f1 | sed -e 's/^[a-zA-Z0-9_]*@pci//g' | tr ':' ' '))
printf "%04x:%02x:%02x:%x\n" ${addr[0]} ${addr[1]} ${addr[2]} ${addr[3]}
elif iter_all_pci_sysfs "0x$ven_id:0x$dev_id"; then
:

View File

@ -2,13 +2,11 @@
set -e
function err()
{
function err() {
echo "$@" >&2
}
function usage()
{
function usage() {
err "Detect compiler and linker versions, generate mk/cc.mk"
err ""
err "Usage: ./detect_cc.sh [OPTION]..."
@ -24,11 +22,9 @@ function usage()
err " --cross-prefix=prefix Use the given prefix for the cross compiler toolchain"
}
for i in "$@"; do
case "$i" in
-h|--help)
-h | --help)
usage
exit 0
;;
@ -64,6 +60,7 @@ for i in "$@"; do
err "Unrecognized option $i"
usage
exit 1
;;
esac
done
@ -105,6 +102,7 @@ case "$LD_TYPE" in
*)
err "Unsupported linker: $LD"
exit 1
;;
esac
CCAR="ar"
@ -128,7 +126,7 @@ if [ -n "$CROSS_PREFIX" ]; then
# Try to fix this automatically. Maybe the user set CROSS_PREFIX but not CC.
CC=$CROSS_PREFIX-$CC
if hash $CC 2>/dev/null; then
if hash $CC 2> /dev/null; then
expected_prefix=$($CC -dumpmachine)
if [ "$expected_prefix" = "$CROSS_PREFIX" ]; then
@ -151,7 +149,7 @@ if [ -n "$CROSS_PREFIX" ]; then
# Try to fix this automatically. Maybe the user set CROSS_PREFIX but not CXX.
CXX=$CROSS_PREFIX-$CXX
if hash $CXX 2>/dev/null; then
if hash $CXX 2> /dev/null; then
expected_prefix=$($CXX -dumpmachine)
if [ "$expected_prefix" = "$CROSS_PREFIX" ]; then

View File

@ -4,7 +4,7 @@ set -e
rootdir=$(readlink -f $(dirname $0))/..
function usage {
function usage() {
echo "Usage: [-j] $0 -n BDEV_NAME -d BASE_BDEV [-u UUID] [-c CACHE]"
echo "UUID is required when restoring device state"
echo
@ -14,8 +14,7 @@ function usage {
echo "CACHE - name of the bdev to be used as write buffer cache"
}
function create_json_config()
{
function create_json_config() {
echo "{"
echo '"subsystem": "bdev",'
echo '"config": ['
@ -44,10 +43,14 @@ while getopts ":c:d:hn:u:" arg; do
d) base_bdev=$OPTARG ;;
u) uuid=$OPTARG ;;
c) cache=$OPTARG ;;
h) usage
exit 0 ;;
*) usage
exit 1 ;;
h)
usage
exit 0
;;
*)
usage
exit 1
;;
esac
done

View File

@ -5,22 +5,18 @@ set -e
rootdir=$(readlink -f $(dirname $0))/..
source "$rootdir/scripts/common.sh"
function create_classic_config()
{
function create_classic_config() {
echo "[Nvme]"
for (( i=0; i < ${#bdfs[@]}; i++))
do
for ((i = 0; i < ${#bdfs[@]}; i++)); do
echo " TransportID \"trtype:PCIe traddr:${bdfs[i]}\" Nvme$i"
done
}
function create_json_config()
{
function create_json_config() {
echo "{"
echo '"subsystem": "bdev",'
echo '"config": ['
for (( i=0; i < ${#bdfs[@]}; i++))
do
for ((i = 0; i < ${#bdfs[@]}; i++)); do
echo '{'
echo '"params": {'
echo '"trtype": "PCIe",'
@ -28,7 +24,7 @@ function create_json_config()
echo "\"traddr\": \"${bdfs[i]}\""
echo '},'
echo '"method": "bdev_nvme_attach_controller"'
if [ -z ${bdfs[i+1]} ]; then
if [ -z ${bdfs[i + 1]} ]; then
echo '}'
else
echo '},'

View File

@ -3,8 +3,7 @@
set -e
function usage()
{
function usage() {
echo ""
echo "This script is intended to automate the installation of package dependencies to build SPDK."
echo "Please run this script as root user or with sudo -E."
@ -21,8 +20,7 @@ function usage()
exit 0
}
function install_all_dependencies ()
{
function install_all_dependencies() {
INSTALL_DEV_TOOLS=true
INSTALL_PMEM=true
INSTALL_FUSE=true
@ -41,26 +39,30 @@ while getopts 'abdfhipr-:' optchar; do
case "$optchar" in
-)
case "$OPTARG" in
help) usage;;
all) install_all_dependencies;;
developer-tools) INSTALL_DEV_TOOLS=true;;
pmem) INSTALL_PMEM=true;;
fuse) INSTALL_FUSE=true;;
rdma) INSTALL_RDMA=true;;
docs) INSTALL_DOCS=true;;
*) echo "Invalid argument '$OPTARG'"
usage;;
help) usage ;;
all) install_all_dependencies ;;
developer-tools) INSTALL_DEV_TOOLS=true ;;
pmem) INSTALL_PMEM=true ;;
fuse) INSTALL_FUSE=true ;;
rdma) INSTALL_RDMA=true ;;
docs) INSTALL_DOCS=true ;;
*)
echo "Invalid argument '$OPTARG'"
usage
;;
esac
;;
h) usage;;
a) install_all_dependencies;;
d) INSTALL_DEV_TOOLS=true;;
p) INSTALL_PMEM=true;;
f) INSTALL_FUSE=true;;
r) INSTALL_RDMA=true;;
b) INSTALL_DOCS=true;;
*) echo "Invalid argument '$OPTARG'"
usage;;
h) usage ;;
a) install_all_dependencies ;;
d) INSTALL_DEV_TOOLS=true ;;
p) INSTALL_PMEM=true ;;
f) INSTALL_FUSE=true ;;
r) INSTALL_RDMA=true ;;
b) INSTALL_DOCS=true ;;
*)
echo "Invalid argument '$OPTARG'"
usage
;;
esac
done
@ -174,7 +176,7 @@ elif [ -f /etc/debian_version ]; then
fi
if [[ $INSTALL_FUSE == "true" ]]; then
# Additional dependencies for FUSE and NVMe-CUSE
if [[ $NAME == "Ubuntu" ]] && (( VERSION_ID_NUM > 1400 && VERSION_ID_NUM < 1900 )); then
if [[ $NAME == "Ubuntu" ]] && ((VERSION_ID_NUM > 1400 && VERSION_ID_NUM < 1900)); then
echo "Ubuntu $VERSION_ID does not have libfuse3-dev in mainline repository."
echo "You can install it manually"
else
@ -220,7 +222,7 @@ elif [ -f /etc/SuSE-release ] || [ -f /etc/SUSE-brand ]; then
# Additional dependencies for building docs
zypper install -y doxygen mscgen graphviz
fi
elif [ $(uname -s) = "FreeBSD" ] ; then
elif [ $(uname -s) = "FreeBSD" ]; then
# Minimal install
pkg install -y gmake cunit openssl git bash misc/e2fsprogs-libuuid python \
ncurses

View File

@ -20,7 +20,7 @@ function configure_performance() {
echo -n "Moving all interrupts off of core 0..."
count=$(($(nproc) / 4))
cpumask="e"
for ((i=1; i<count; i++)); do
for ((i = 1; i < count; i++)); do
if [ $((i % 8)) -eq 0 ]; then
cpumask=",$cpumask"
fi

View File

@ -12,7 +12,7 @@ bad_driver=true
driver_to_bind=uio_pci_generic
num_vfs=16
qat_pci_bdfs=( $(lspci -Dd:37c8 | awk '{print $1}') )
qat_pci_bdfs=($(lspci -Dd:37c8 | awk '{print $1}'))
if [ ${#qat_pci_bdfs[@]} -eq 0 ]; then
echo "No QAT devices found. Exiting"
exit 0
@ -48,8 +48,8 @@ done
# Confirm we have all of the virtual functions we asked for.
qat_vf_bdfs=( $(lspci -Dd:37c9 | awk '{print $1}') )
if (( ${#qat_vf_bdfs[@]} != ${#qat_pci_bdfs[@]}*num_vfs )); then
qat_vf_bdfs=($(lspci -Dd:37c9 | awk '{print $1}'))
if ((${#qat_vf_bdfs[@]} != ${#qat_pci_bdfs[@]} * num_vfs)); then
echo "Failed to prepare the VFs. Aborting"
exit 1
fi

View File

@ -5,15 +5,17 @@ set -e
rootdir=$(readlink -f $(dirname $0))/..
source "$rootdir/scripts/common.sh"
function usage()
{
function usage() {
if [ $(uname) = Linux ]; then
options="[config|reset|status|cleanup|help]"
else
options="[config|reset|help]"
fi
[[ -n $2 ]] && ( echo "$2"; echo ""; )
[[ -n $2 ]] && (
echo "$2"
echo ""
)
echo "Helper script for allocating hugepages and binding NVMe, I/OAT, VMD and Virtio devices"
echo "to a generic VFIO kernel driver. If VFIO is not available on the system, this script"
echo "will fall back to UIO. NVMe and Virtio devices with active mountpoints will be ignored."
@ -63,15 +65,15 @@ function usage()
# back that with a /sys/modules. We also check
# /sys/bus/pci/drivers/ as neither lsmod nor /sys/modules might
# contain needed info (like in Fedora-like OS).
function check_for_driver {
function check_for_driver() {
if lsmod | grep -q ${1//-/_}; then
return 1
fi
if [[ -d /sys/module/${1} || \
-d /sys/module/${1//-/_} || \
-d /sys/bus/pci/drivers/${1} || \
-d /sys/bus/pci/drivers/${1//-/_} ]]; then
if [[ -d /sys/module/${1} || -d \
/sys/module/${1//-/_} || -d \
/sys/bus/pci/drivers/${1} || -d \
/sys/bus/pci/drivers/${1//-/_} ]]; then
return 2
fi
return 0
@ -137,7 +139,7 @@ function linux_hugetlbfs_mounts() {
mount | grep ' type hugetlbfs ' | awk '{ print $3 }'
}
function get_nvme_name_from_bdf {
function get_nvme_name_from_bdf() {
local blknames=()
set +e
@ -157,7 +159,7 @@ function get_nvme_name_from_bdf {
printf '%s\n' "${blknames[@]}"
}
function get_virtio_names_from_bdf {
function get_virtio_names_from_bdf() {
blk_devs=$(lsblk --nodeps --output NAME)
virtio_names=()
@ -170,7 +172,7 @@ function get_virtio_names_from_bdf {
eval "$2=( " "${virtio_names[@]}" " )"
}
function configure_linux_pci {
function configure_linux_pci() {
local driver_path=""
driver_name=""
if [[ -n "${DRIVER_OVERRIDE}" ]]; then
@ -186,11 +188,11 @@ function configure_linux_pci {
if [[ "$driver_name" = "igb_uio" ]]; then
modprobe uio
fi
elif [[ -n "$(ls /sys/kernel/iommu_groups)" || \
(-e /sys/module/vfio/parameters/enable_unsafe_noiommu_mode && \
elif [[ -n "$(ls /sys/kernel/iommu_groups)" || (-e \
/sys/module/vfio/parameters/enable_unsafe_noiommu_mode && \
"$(cat /sys/module/vfio/parameters/enable_unsafe_noiommu_mode)" == "Y") ]]; then
driver_name=vfio-pci
elif modinfo uio_pci_generic >/dev/null 2>&1; then
elif modinfo uio_pci_generic > /dev/null 2>&1; then
driver_name=uio_pci_generic
elif [[ -r "$rootdir/dpdk/build/kmod/igb_uio.ko" ]]; then
driver_path="$rootdir/dpdk/build/kmod/igb_uio.ko"
@ -242,8 +244,7 @@ function configure_linux_pci {
grep "PCI_DEVICE_ID_INTEL_IOAT" $rootdir/include/spdk/pci_ids.h \
| awk -F"x" '{print $2}' > $TMP
while IFS= read -r dev_id
do
while IFS= read -r dev_id; do
for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do
if ! pci_can_use $bdf; then
pci_dev_echo "$bdf" "Skipping un-whitelisted I/OAT device"
@ -261,8 +262,7 @@ function configure_linux_pci {
grep "PCI_DEVICE_ID_INTEL_IDXD" $rootdir/include/spdk/pci_ids.h \
| awk -F"x" '{print $2}' > $TMP
while IFS= read -r dev_id
do
while IFS= read -r dev_id; do
for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do
if ! pci_can_use $bdf; then
pci_dev_echo "$bdf" "Skipping un-whitelisted IDXD device"
@ -280,8 +280,7 @@ function configure_linux_pci {
grep "PCI_DEVICE_ID_VIRTIO" $rootdir/include/spdk/pci_ids.h \
| awk -F"x" '{print $2}' > $TMP
while IFS= read -r dev_id
do
while IFS= read -r dev_id; do
for bdf in ${pci_bus_cache["0x1af4:0x$dev_id"]}; do
if ! pci_can_use $bdf; then
pci_dev_echo "$bdf" "Skipping un-whitelisted Virtio device at $bdf"
@ -307,8 +306,7 @@ function configure_linux_pci {
grep "PCI_DEVICE_ID_INTEL_VMD" $rootdir/include/spdk/pci_ids.h \
| awk -F"x" '{print $2}' > $TMP
while IFS= read -r dev_id
do
while IFS= read -r dev_id; do
for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do
if [[ -z "$PCI_WHITELIST" ]] || ! pci_can_use $bdf; then
echo "Skipping un-whitelisted VMD device at $bdf"
@ -324,7 +322,7 @@ function configure_linux_pci {
echo "1" > "/sys/bus/pci/rescan"
}
function cleanup_linux {
function cleanup_linux() {
shopt -s extglob nullglob
dirs_to_clean=""
dirs_to_clean="$(echo {/var/run,/tmp}/dpdk/spdk{,_pid}+([0-9])) "
@ -338,12 +336,12 @@ function cleanup_linux {
done
shopt -u extglob nullglob
files_to_clean+="$(ls -1 /dev/shm/* | \
grep -E '(spdk_tgt|iscsi|vhost|nvmf|rocksdb|bdevio|bdevperf|vhost_fuzz|nvme_fuzz)_trace|spdk_iscsi_conns' || true) "
files_to_clean+="$(ls -1 /dev/shm/* \
| grep -E '(spdk_tgt|iscsi|vhost|nvmf|rocksdb|bdevio|bdevperf|vhost_fuzz|nvme_fuzz)_trace|spdk_iscsi_conns' || true) "
files_to_clean="$(readlink -e assert_not_empty $files_to_clean || true)"
if [[ -z "$files_to_clean" ]]; then
echo "Clean"
return 0;
return 0
fi
shopt -s extglob
@ -380,7 +378,7 @@ function cleanup_linux {
unset dirs_to_clean files_to_clean opened_files
}
function configure_linux {
function configure_linux() {
configure_linux_pci
hugetlbfs_mounts=$(linux_hugetlbfs_mounts)
@ -415,8 +413,8 @@ function configure_linux {
fi
MEMLOCK_AMNT=$(ulimit -l)
if [ "$MEMLOCK_AMNT" != "unlimited" ] ; then
MEMLOCK_MB=$(( MEMLOCK_AMNT / 1024 ))
if [ "$MEMLOCK_AMNT" != "unlimited" ]; then
MEMLOCK_MB=$((MEMLOCK_AMNT / 1024))
echo ""
echo "Current user memlock limit: ${MEMLOCK_MB} MB"
echo ""
@ -425,7 +423,7 @@ function configure_linux {
echo -n "To change this, please adjust limits.conf memlock "
echo "limit for current user."
if [ $MEMLOCK_AMNT -lt 65536 ] ; then
if [ $MEMLOCK_AMNT -lt 65536 ]; then
echo ""
echo "## WARNING: memlock limit is less than 64MB"
echo -n "## DPDK with VFIO may not be able to initialize "
@ -442,7 +440,7 @@ function configure_linux {
fi
}
function reset_linux_pci {
function reset_linux_pci() {
# NVMe
set +e
check_for_driver nvme
@ -470,8 +468,7 @@ function reset_linux_pci {
check_for_driver ioatdma
driver_loaded=$?
set -e
while IFS= read -r dev_id
do
while IFS= read -r dev_id; do
for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do
if ! pci_can_use $bdf; then
pci_dev_echo "$bdf" "Skipping un-whitelisted I/OAT device"
@ -495,8 +492,7 @@ function reset_linux_pci {
check_for_driver idxd
driver_loaded=$?
set -e
while IFS= read -r dev_id
do
while IFS= read -r dev_id; do
for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do
if ! pci_can_use $bdf; then
pci_dev_echo "$bdf" "Skipping un-whitelisted IDXD device"
@ -522,8 +518,7 @@ function reset_linux_pci {
# virtio-pci but just virtio_scsi instead. Also need to make sure we get the
# underscore vs. dash right in the virtio_scsi name.
modprobe virtio-pci || true
while IFS= read -r dev_id
do
while IFS= read -r dev_id; do
for bdf in ${pci_bus_cache["0x1af4:0x$dev_id"]}; do
if ! pci_can_use $bdf; then
pci_dev_echo "$bdf" "Skipping un-whitelisted Virtio device at"
@ -544,8 +539,7 @@ function reset_linux_pci {
check_for_driver vmd
driver_loaded=$?
set -e
while IFS= read -r dev_id
do
while IFS= read -r dev_id; do
for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do
if ! pci_can_use $bdf; then
echo "Skipping un-whitelisted VMD device at $bdf"
@ -563,7 +557,7 @@ function reset_linux_pci {
echo "1" > "/sys/bus/pci/rescan"
}
function reset_linux {
function reset_linux() {
reset_linux_pci
for mount in $(linux_hugetlbfs_mounts); do
rm -f "$mount"/spdk*map_*
@ -571,7 +565,7 @@ function reset_linux {
rm -f /run/.spdk*
}
function status_linux {
function status_linux() {
echo "Hugepages"
printf "%-6s %10s %8s / %6s\n" "node" "hugesize" "free" "total"
@ -606,7 +600,7 @@ function status_linux {
echo -e "BDF\t\tVendor\tDevice\tNUMA\tDriver\t\tDevice name"
for bdf in ${pci_bus_cache["0x010802"]}; do
driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent |awk -F"=" '{print $2}')
driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent | awk -F"=" '{print $2}')
if [ "$numa_nodes" = "0" ]; then
node="-"
else
@ -615,11 +609,11 @@ function status_linux {
device=$(cat /sys/bus/pci/devices/$bdf/device)
vendor=$(cat /sys/bus/pci/devices/$bdf/vendor)
if [ "$driver" = "nvme" ] && [ -d /sys/bus/pci/devices/$bdf/nvme ]; then
name="\t"$(ls /sys/bus/pci/devices/$bdf/nvme);
name="\t"$(ls /sys/bus/pci/devices/$bdf/nvme)
else
name="-";
name="-"
fi
echo -e "$bdf\t${vendor#0x}\t${device#0x}\t$node\t${driver:--}\t\t$name";
echo -e "$bdf\t${vendor#0x}\t${device#0x}\t$node\t${driver:--}\t\t$name"
done
echo ""
@ -631,7 +625,7 @@ function status_linux {
echo -e "BDF\t\tVendor\tDevice\tNUMA\tDriver"
for dev_id in $TMP; do
for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do
driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent |awk -F"=" '{print $2}')
driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent | awk -F"=" '{print $2}')
if [ "$numa_nodes" = "0" ]; then
node="-"
else
@ -652,7 +646,7 @@ function status_linux {
echo -e "BDF\t\tVendor\tDevice\tNUMA\tDriver"
for dev_id in $TMP; do
for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do
driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent |awk -F"=" '{print $2}')
driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent | awk -F"=" '{print $2}')
if [ "$numa_nodes" = "0" ]; then
node="-"
else
@ -673,7 +667,7 @@ function status_linux {
echo -e "BDF\t\tVendor\tDevice\tNUMA\tDriver\t\tDevice name"
for dev_id in $TMP; do
for bdf in ${pci_bus_cache["0x1af4:0x$dev_id"]}; do
driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent |awk -F"=" '{print $2}')
driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent | awk -F"=" '{print $2}')
if [ "$numa_nodes" = "0" ]; then
node="-"
else
@ -695,14 +689,14 @@ function status_linux {
echo -e "BDF\t\tNuma Node\tDriver Name"
for dev_id in $TMP; do
for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do
driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent |awk -F"=" '{print $2}')
node=$(cat /sys/bus/pci/devices/$bdf/numa_node);
driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent | awk -F"=" '{print $2}')
node=$(cat /sys/bus/pci/devices/$bdf/numa_node)
echo -e "$bdf\t$node\t\t$driver"
done
done
}
function configure_freebsd_pci {
function configure_freebsd_pci() {
local devs ids id
local BDFS
@ -730,7 +724,7 @@ function configure_freebsd_pci {
kldload nic_uio.ko
}
function configure_freebsd {
function configure_freebsd() {
configure_freebsd_pci
# If contigmem is already loaded but the HUGEMEM specified doesn't match the
# previous value, unload contigmem so that we can reload with the new value.
@ -746,7 +740,7 @@ function configure_freebsd {
fi
}
function reset_freebsd {
function reset_freebsd() {
kldunload contigmem.ko || true
kldunload nic_uio.ko || true
}
@ -774,14 +768,14 @@ fi
if [ -z "$TARGET_USER" ]; then
TARGET_USER="$SUDO_USER"
if [ -z "$TARGET_USER" ]; then
TARGET_USER=$(logname 2>/dev/null) || true
TARGET_USER=$(logname 2> /dev/null) || true
fi
fi
if [ $(uname) = Linux ]; then
HUGEPGSZ=$(( $(grep Hugepagesize /proc/meminfo | cut -d : -f 2 | tr -dc '0-9') ))
HUGEPGSZ_MB=$(( HUGEPGSZ / 1024 ))
: ${NRHUGE=$(( (HUGEMEM + HUGEPGSZ_MB - 1) / HUGEPGSZ_MB ))}
HUGEPGSZ=$(($(grep Hugepagesize /proc/meminfo | cut -d : -f 2 | tr -dc '0-9')))
HUGEPGSZ_MB=$((HUGEPGSZ / 1024))
: ${NRHUGE=$(((HUGEMEM + HUGEPGSZ_MB - 1) / HUGEPGSZ_MB))}
if [ "$mode" == "config" ]; then
configure_linux

View File

@ -11,8 +11,8 @@ set -e
VAGRANT_TARGET="$PWD"
DIR="$( cd "$( dirname $0 )" && pwd )"
SPDK_DIR="$( cd "${DIR}/../../" && pwd )"
DIR="$(cd "$(dirname $0)" && pwd)"
SPDK_DIR="$(cd "${DIR}/../../" && pwd)"
# The command line help
display_help() {
@ -145,7 +145,7 @@ while getopts ":b:n:s:x:p:u:vcraldHh-:" opt; do
esac
done
shift "$((OPTIND-1))" # Discard the options and sentinel --
shift "$((OPTIND - 1))" # Discard the options and sentinel --
SPDK_VAGRANT_DISTRO="$*"
@ -195,15 +195,15 @@ else
TMP=""
for args in $NVME_FILE; do
while IFS=, read -r path type namespace; do
TMP+="$path,";
TMP+="$path,"
if [ -z "$type" ]; then
type="nvme"
fi
NVME_DISKS_TYPE+="$type,";
NVME_DISKS_TYPE+="$type,"
if [ -z "$namespace" ] && [ -n "$SPDK_QEMU_EMULATOR" ]; then
namespace="1"
fi
NVME_DISKS_NAMESPACES+="$namespace,";
NVME_DISKS_NAMESPACES+="$namespace,"
if [ ${NVME_AUTO_CREATE} = 1 ]; then
$SPDK_DIR/scripts/vagrant/create_nvme_img.sh -t $type -n $path
fi
@ -296,7 +296,7 @@ if [ ${DRY_RUN} != 1 ]; then
vagrant plugin install vagrant-proxyconf
fi
if echo "$SPDK_VAGRANT_DISTRO" | grep -q freebsd; then
cat >~/vagrant_pkg.conf <<EOF
cat > ~/vagrant_pkg.conf << EOF
pkg_env: {
http_proxy: ${http_proxy}
}
@ -309,8 +309,8 @@ EOF
vagrant ssh -c 'sudo spdk_repo/spdk/scripts/vagrant/update.sh'
vagrant halt
vagrant package --output spdk_${SPDK_VAGRANT_DISTRO}.box
vagrant box add spdk/${SPDK_VAGRANT_DISTRO} spdk_${SPDK_VAGRANT_DISTRO}.box &&
rm spdk_${SPDK_VAGRANT_DISTRO}.box
vagrant box add spdk/${SPDK_VAGRANT_DISTRO} spdk_${SPDK_VAGRANT_DISTRO}.box \
&& rm spdk_${SPDK_VAGRANT_DISTRO}.box
vagrant destroy
fi
echo ""

View File

@ -8,8 +8,8 @@ set -e
VAGRANT_TARGET="$PWD"
DIR="$( cd "$( dirname $0 )" && pwd )"
SPDK_DIR="$( cd "${DIR}/../../" && pwd )"
DIR="$(cd "$(dirname $0)" && pwd)"
SPDK_DIR="$(cd "${DIR}/../../" && pwd)"
USE_SSH_DIR=""
MOVE_TO_DEFAULT_DIR=false
INSTALL_DEPS=false
@ -68,8 +68,7 @@ export SPDK_DIR
export SPDK_VAGRANT_HTTP_PROXY
export INSTALL_DEPS
shift "$((OPTIND-1))" # Discard the options and sentinel --
shift "$((OPTIND - 1))" # Discard the options and sentinel --
SPDK_VAGRANT_DISTRO="$*"

View File

@ -73,18 +73,23 @@ V=1
OPTIND=1 # Reset in case getopts has been used previously in the shell.
while getopts "d:qhn" opt; do
case "$opt" in
d) SPDK_SOURCE_PATH=$($READLINK -f $OPTARG)
d)
SPDK_SOURCE_PATH=$($READLINK -f $OPTARG)
echo Using SPDK source at ${SPDK_SOURCE_PATH}
METHOD=1
;;
q) V=0
q)
V=0
;;
n) NOOP=1
n)
NOOP=1
;;
h) display_help >&2
h)
display_help >&2
exit 0
;;
*) echo "Invalid option"
*)
echo "Invalid option"
echo ""
display_help >&2
exit 1

View File

@ -9,8 +9,8 @@ fi
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
SPDK_DIR="$( cd "${DIR}/../../" && pwd )"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
SPDK_DIR="$(cd "${DIR}/../../" && pwd)"
echo "SPDK_DIR = $SPDK_DIR"
# Bug fix for vagrant rsync problem
@ -59,9 +59,9 @@ else
fi
# Figure out what system we are running on
if [ -f /etc/lsb-release ];then
if [ -f /etc/lsb-release ]; then
. /etc/lsb-release
elif [ -f /etc/redhat-release ];then
elif [ -f /etc/redhat-release ]; then
yum update -y
yum install -y redhat-lsb
DISTRIB_ID=$(lsb_release -si)

View File

@ -30,7 +30,7 @@ function raid_unmap_data_verify() {
# confirm random data is written correctly in raid0 device
cmp -b -n $rw_len $tmp_file $nbd
for (( i=0; i<${#unmap_blk_offs[@]}; i++ )); do
for ((i = 0; i < ${#unmap_blk_offs[@]}; i++)); do
unmap_off=$((blksize * ${unmap_blk_offs[$i]}))
unmap_len=$((blksize * ${unmap_blk_nums[$i]}))

View File

@ -8,7 +8,7 @@ source $testdir/nbd_common.sh
rpc_py="$rootdir/scripts/rpc.py"
conf_file="$testdir/bdev.json"
# Make sure the configuration is clean
:>"$conf_file"
: > "$conf_file"
function cleanup() {
rm -f "/tmp/aiofile"
@ -28,7 +28,7 @@ function start_spdk_tgt() {
}
function setup_bdev_conf() {
"$rpc_py" <<-RPC
"$rpc_py" <<- RPC
bdev_split_create Malloc1 2
bdev_split_create -s 4 Malloc2 8
bdev_malloc_create -b Malloc0 32 512
@ -69,7 +69,8 @@ function setup_gpt_conf() {
dev=/dev/${nvme_dev##*/}
if ! pt=$(parted "$dev" -ms print 2>&1); then
[[ $pt == *"$dev: unrecognised disk label"* ]] || continue
gpt_nvme=$dev; break
gpt_nvme=$dev
break
fi
done
if [[ -n $gpt_nvme ]]; then
@ -78,7 +79,7 @@ function setup_gpt_conf() {
# change the GUID to SPDK GUID value
# FIXME: Hardcode this in some common place, this value should not be changed much
IFS="()" read -r _ SPDK_GPT_GUID _ < <(grep SPDK_GPT_PART_TYPE_GUID module/bdev/gpt/gpt.h)
SPDK_GPT_GUID=${SPDK_GPT_GUID//, /-} SPDK_GPT_GUID=${SPDK_GPT_GUID//0x}
SPDK_GPT_GUID=${SPDK_GPT_GUID//, /-} SPDK_GPT_GUID=${SPDK_GPT_GUID//0x/}
sgdisk -t "1:$SPDK_GPT_GUID" "$gpt_nvme"
sgdisk -t "2:$SPDK_GPT_GUID" "$gpt_nvme"
"$rootdir/scripts/setup.sh"
@ -101,7 +102,7 @@ function setup_gpt_conf() {
function setup_crypto_aesni_conf() {
# Malloc0 and Malloc1 use AESNI
"$rpc_py" <<-RPC
"$rpc_py" <<- RPC
bdev_malloc_create -b Malloc0 16 512
bdev_malloc_create -b Malloc1 16 512
bdev_crypto_create Malloc0 crypto_ram crypto_aesni_mb 0123456789123456
@ -112,7 +113,7 @@ function setup_crypto_aesni_conf() {
function setup_crypto_qat_conf() {
# Malloc0 will use QAT AES_CBC
# Malloc1 will use QAT AES_XTS
"$rpc_py" <<-RPC
"$rpc_py" <<- RPC
bdev_malloc_create -b Malloc0 16 512
bdev_malloc_create -b Malloc1 16 512
bdev_crypto_create Malloc0 crypto_ram crypto_qat 0123456789123456
@ -227,20 +228,19 @@ function get_io_result() {
iostat_result=$(awk '{print $6}' <<< $iostat_result)
fi
echo ${iostat_result/.*}
echo ${iostat_result/.*/}
}
function run_qos_test() {
local qos_limit=$1
local qos_result=0
qos_result=$(get_io_result $2 $3)
if [ $2 = BANDWIDTH ]; then
qos_limit=$((qos_limit*1024))
qos_limit=$((qos_limit * 1024))
fi
lower_limit=$((qos_limit*9/10))
upper_limit=$((qos_limit*11/10))
lower_limit=$((qos_limit * 9 / 10))
upper_limit=$((qos_limit * 11 / 10))
# QoS realization is related with bytes transfered. It currently has some variation.
if [ $qos_result -lt $lower_limit ] || [ $qos_result -gt $upper_limit ]; then
@ -261,7 +261,7 @@ function qos_function_test() {
io_result=$(get_io_result IOPS $QOS_DEV_1)
# Set the IOPS limit as one quarter of the measured performance without QoS
iops_limit=$(((io_result/4)/qos_lower_iops_limit*qos_lower_iops_limit))
iops_limit=$(((io_result / 4) / qos_lower_iops_limit * qos_lower_iops_limit))
if [ $iops_limit -gt $qos_lower_iops_limit ]; then
# Run bdevperf with IOPS rate limit on bdev 1
@ -271,7 +271,7 @@ function qos_function_test() {
# Run bdevperf with bandwidth rate limit on bdev 2
# Set the bandwidth limit as 1/10 of the measure performance without QoS
bw_limit=$(get_io_result BANDWIDTH $QOS_DEV_2)
bw_limit=$((bw_limit/1024/10))
bw_limit=$((bw_limit / 1024 / 10))
if [ $bw_limit -lt $qos_lower_bw_limit ]; then
bw_limit=$qos_lower_bw_limit
fi
@ -325,34 +325,40 @@ fi
test_type=${1:-bdev}
start_spdk_tgt
case "$test_type" in
bdev )
setup_bdev_conf;;
nvme )
setup_nvme_conf;;
gpt )
setup_gpt_conf;;
crypto_aesni )
setup_crypto_aesni_conf;;
crypto_qat )
setup_crypto_qat_conf;;
pmem )
setup_pmem_conf;;
rbd )
setup_rbd_conf;;
* )
bdev)
setup_bdev_conf
;;
nvme)
setup_nvme_conf
;;
gpt)
setup_gpt_conf
;;
crypto_aesni)
setup_crypto_aesni_conf
;;
crypto_qat)
setup_crypto_qat_conf
;;
pmem)
setup_pmem_conf
;;
rbd)
setup_rbd_conf
;;
*)
echo "invalid test name"
exit 1
;;
esac
# Generate json config and use it throughout all the tests
cat <<-CONF >"$conf_file"
cat <<- CONF > "$conf_file"
{"subsystems":[
$("$rpc_py" save_subsystem_config -n bdev)
]}
CONF
bdevs=$("$rpc_py" bdev_get_bdevs | jq -r '.[] | select(.claimed == false)')
bdevs_name=$(echo $bdevs | jq -r '.name')
bdev_list=($bdevs_name)
@ -386,7 +392,7 @@ fi
# Temporarily disabled - infinite loop
# if [ $RUN_NIGHTLY -eq 1 ]; then
# run_test "bdev_reset" $testdir/bdevperf/bdevperf --json "$conf_file" -q 16 -w reset -o 4096 -t 60
# run_test "bdev_reset" $testdir/bdevperf/bdevperf --json "$conf_file" -q 16 -w reset -o 4096 -t 60
# fi
# Bdev and configuration cleanup below this line

View File

@ -6,7 +6,7 @@ function nbd_start_disks() {
local nbd_list=($3)
local i
for (( i=0; i<${#nbd_list[@]}; i++ )); do
for ((i = 0; i < ${#nbd_list[@]}; i++)); do
$rootdir/scripts/rpc.py -s $rpc_server nbd_start_disk ${bdev_list[$i]} ${nbd_list[$i]}
# Wait for nbd device ready
waitfornbd $(basename ${nbd_list[$i]})
@ -19,7 +19,7 @@ function nbd_start_disks_without_nbd_idx() {
local i
local nbd_device
for (( i=0; i<${#bdev_list[@]}; i++ )); do
for ((i = 0; i < ${#bdev_list[@]}; i++)); do
nbd_device=$($rootdir/scripts/rpc.py -s $rpc_server nbd_start_disk ${bdev_list[$i]})
# Wait for nbd device ready
waitfornbd $(basename ${nbd_device})
@ -29,7 +29,7 @@ function nbd_start_disks_without_nbd_idx() {
function waitfornbd_exit() {
local nbd_name=$1
for ((i=1; i<=20; i++)); do
for ((i = 1; i <= 20; i++)); do
if grep -q -w $nbd_name /proc/partitions; then
sleep 0.1
else

View File

@ -1,7 +1,7 @@
#!/usr/bin/env bash
SYSTEM=$(uname -s)
if [ $SYSTEM = "FreeBSD" ] ; then
if [ $SYSTEM = "FreeBSD" ]; then
echo "blobfs.sh cannot run on FreeBSD currently."
exit 0
fi
@ -28,7 +28,7 @@ function cleanup() {
rm -f $conf_file
}
function blobfs_start_app {
function blobfs_start_app() {
$rootdir/test/app/bdev_svc/bdev_svc -r $rpc_server -c ${conf_file} &
blobfs_pid=$!

View File

@ -10,7 +10,7 @@ dump_db_bench_on_err() {
# Dump entire *.txt to stderr to clearly see what might have failed
xtrace_disable
mapfile -t step_map <"$db_bench"
mapfile -t step_map < "$db_bench"
printf '%s\n' "${step_map[@]/#/* $step (FAILED)}" >&2
xtrace_restore
}
@ -60,7 +60,7 @@ fi
EXTRA_CXXFLAGS=""
GCC_VERSION=$(cc -dumpversion | cut -d. -f1)
if (( GCC_VERSION >= 9 )); then
if ((GCC_VERSION >= 9)); then
EXTRA_CXXFLAGS+="-Wno-deprecated-copy -Wno-pessimizing-move -Wno-error=stringop-truncation"
fi

View File

@ -1,7 +1,7 @@
#!/usr/bin/env bash
SYSTEM=$(uname -s)
if [ $SYSTEM = "FreeBSD" ] ; then
if [ $SYSTEM = "FreeBSD" ]; then
echo "blob_io_wait.sh cannot run on FreeBSD currently."
exit 0
fi

View File

@ -1,7 +1,7 @@
#!/usr/bin/env bash
SYSTEM=$(uname -s)
if [ $SYSTEM = "FreeBSD" ] ; then
if [ $SYSTEM = "FreeBSD" ]; then
echo "blobstore.sh cannot run on FreeBSD currently."
exit 0
fi
@ -16,8 +16,8 @@ $rootdir/scripts/gen_nvme.sh > $testdir/blobcli.conf
# generate random data file for import/export diff
dd if=/dev/urandom of=$testdir/test.pattern bs=1M count=1
(cd $testdir &&
$rootdir/examples/blob/cli/blobcli -c $testdir/blobcli.conf -b Nvme0n1 -T $testdir/test.bs > $testdir/btest.out)
(cd $testdir \
&& $rootdir/examples/blob/cli/blobcli -c $testdir/blobcli.conf -b Nvme0n1 -T $testdir/test.bs > $testdir/btest.out)
# the test script will import the test pattern generated by dd and then export
# it to a file so we can compare and confirm basic read and write

View File

@ -12,8 +12,8 @@ VHOST_APP=("$_app_dir/vhost/vhost")
# Check if apps should execute under debug flags
if [[ -e $_root/include/spdk/config.h ]]; then
if [[ $(<"$_root/include/spdk/config.h") == *"#define SPDK_CONFIG_DEBUG"* ]] \
&& (( SPDK_AUTOTEST_DEBUG_APPS )); then
if [[ $(< "$_root/include/spdk/config.h") == *"#define SPDK_CONFIG_DEBUG"* ]] \
&& ((SPDK_AUTOTEST_DEBUG_APPS)); then
VHOST_FUZZ_APP+=("--log-flags=all")
ISCSI_APP+=("--log-flags=all")
NVMF_APP+=("--log-flags=all")

View File

@ -22,7 +22,7 @@ source "$rootdir/test/common/applications.sh"
if [[ -e $rootdir/test/common/build_config.sh ]]; then
source "$rootdir/test/common/build_config.sh"
elif [[ -e $rootdir/mk/config.mk ]]; then
build_config=$(<"$rootdir/mk/config.mk")
build_config=$(< "$rootdir/mk/config.mk")
source <(echo "${build_config//\?=/=}")
else
source "$rootdir/CONFIG"
@ -39,8 +39,7 @@ function xtrace_enable() {
# Keep it as alias to avoid xtrace_enable backtrace always pointing to xtrace_restore.
# xtrace_enable will appear as called directly from the user script, from the same line
# that "called" xtrace_restore.
alias xtrace_restore=\
'if [ -z $XTRACE_NESTING_LEVEL ]; then
alias xtrace_restore='if [ -z $XTRACE_NESTING_LEVEL ]; then
if [[ "$PREV_BASH_OPTS" == *"x"* ]]; then
XTRACE_DISABLED="no"; PREV_BASH_OPTS=""; set -x; xtrace_enable;
fi
@ -55,42 +54,78 @@ fi'
export RUN_NIGHTLY
# Set defaults for missing test config options
: ${SPDK_AUTOTEST_DEBUG_APPS:=0}; export SPDK_AUTOTEST_DEBUG_APPS
: ${SPDK_RUN_VALGRIND=0}; export SPDK_RUN_VALGRIND
: ${SPDK_RUN_FUNCTIONAL_TEST=0}; export SPDK_RUN_FUNCTIONAL_TEST
: ${SPDK_TEST_UNITTEST=0}; export SPDK_TEST_UNITTEST
: ${SPDK_TEST_AUTOBUILD=0}; export SPDK_TEST_AUTOBUILD
: ${SPDK_TEST_ISAL=0}; export SPDK_TEST_ISAL
: ${SPDK_TEST_ISCSI=0}; export SPDK_TEST_ISCSI
: ${SPDK_TEST_ISCSI_INITIATOR=0}; export SPDK_TEST_ISCSI_INITIATOR
: ${SPDK_TEST_NVME=0}; export SPDK_TEST_NVME
: ${SPDK_TEST_NVME_CLI=0}; export SPDK_TEST_NVME_CLI
: ${SPDK_TEST_NVME_CUSE=0}; export SPDK_TEST_NVME_CUSE
: ${SPDK_TEST_NVMF=0}; export SPDK_TEST_NVMF
: ${SPDK_TEST_NVMF_TRANSPORT="rdma"}; export SPDK_TEST_NVMF_TRANSPORT
: ${SPDK_TEST_RBD=0}; export SPDK_TEST_RBD
: ${SPDK_TEST_VHOST=0}; export SPDK_TEST_VHOST
: ${SPDK_TEST_BLOCKDEV=0}; export SPDK_TEST_BLOCKDEV
: ${SPDK_TEST_IOAT=0}; export SPDK_TEST_IOAT
: ${SPDK_TEST_BLOBFS=0}; export SPDK_TEST_BLOBFS
: ${SPDK_TEST_VHOST_INIT=0}; export SPDK_TEST_VHOST_INIT
: ${SPDK_TEST_PMDK=0}; export SPDK_TEST_PMDK
: ${SPDK_TEST_LVOL=0}; export SPDK_TEST_LVOL
: ${SPDK_TEST_JSON=0}; export SPDK_TEST_JSON
: ${SPDK_TEST_REDUCE=0}; export SPDK_TEST_REDUCE
: ${SPDK_TEST_VPP=0}; export SPDK_TEST_VPP
: ${SPDK_RUN_ASAN=0}; export SPDK_RUN_ASAN
: ${SPDK_RUN_UBSAN=0}; export SPDK_RUN_UBSAN
: ${SPDK_RUN_INSTALLED_DPDK=0}; export SPDK_RUN_INSTALLED_DPDK
: ${SPDK_RUN_NON_ROOT=0}; export SPDK_RUN_NON_ROOT
: ${SPDK_TEST_CRYPTO=0}; export SPDK_TEST_CRYPTO
: ${SPDK_TEST_FTL=0}; export SPDK_TEST_FTL
: ${SPDK_TEST_OCF=0}; export SPDK_TEST_OCF
: ${SPDK_TEST_FTL_EXTENDED=0}; export SPDK_TEST_FTL_EXTENDED
: ${SPDK_TEST_VMD=0}; export SPDK_TEST_VMD
: ${SPDK_TEST_OPAL=0}; export SPDK_TEST_OPAL
: ${SPDK_AUTOTEST_X=true}; export SPDK_AUTOTEST_X
: ${SPDK_TEST_RAID5=0}; export SPDK_TEST_RAID5
: ${SPDK_AUTOTEST_DEBUG_APPS:=0}
export SPDK_AUTOTEST_DEBUG_APPS
: ${SPDK_RUN_VALGRIND=0}
export SPDK_RUN_VALGRIND
: ${SPDK_RUN_FUNCTIONAL_TEST=0}
export SPDK_RUN_FUNCTIONAL_TEST
: ${SPDK_TEST_UNITTEST=0}
export SPDK_TEST_UNITTEST
: ${SPDK_TEST_AUTOBUILD=0}
export SPDK_TEST_AUTOBUILD
: ${SPDK_TEST_ISAL=0}
export SPDK_TEST_ISAL
: ${SPDK_TEST_ISCSI=0}
export SPDK_TEST_ISCSI
: ${SPDK_TEST_ISCSI_INITIATOR=0}
export SPDK_TEST_ISCSI_INITIATOR
: ${SPDK_TEST_NVME=0}
export SPDK_TEST_NVME
: ${SPDK_TEST_NVME_CLI=0}
export SPDK_TEST_NVME_CLI
: ${SPDK_TEST_NVME_CUSE=0}
export SPDK_TEST_NVME_CUSE
: ${SPDK_TEST_NVMF=0}
export SPDK_TEST_NVMF
: ${SPDK_TEST_NVMF_TRANSPORT="rdma"}
export SPDK_TEST_NVMF_TRANSPORT
: ${SPDK_TEST_RBD=0}
export SPDK_TEST_RBD
: ${SPDK_TEST_VHOST=0}
export SPDK_TEST_VHOST
: ${SPDK_TEST_BLOCKDEV=0}
export SPDK_TEST_BLOCKDEV
: ${SPDK_TEST_IOAT=0}
export SPDK_TEST_IOAT
: ${SPDK_TEST_BLOBFS=0}
export SPDK_TEST_BLOBFS
: ${SPDK_TEST_VHOST_INIT=0}
export SPDK_TEST_VHOST_INIT
: ${SPDK_TEST_PMDK=0}
export SPDK_TEST_PMDK
: ${SPDK_TEST_LVOL=0}
export SPDK_TEST_LVOL
: ${SPDK_TEST_JSON=0}
export SPDK_TEST_JSON
: ${SPDK_TEST_REDUCE=0}
export SPDK_TEST_REDUCE
: ${SPDK_TEST_VPP=0}
export SPDK_TEST_VPP
: ${SPDK_RUN_ASAN=0}
export SPDK_RUN_ASAN
: ${SPDK_RUN_UBSAN=0}
export SPDK_RUN_UBSAN
: ${SPDK_RUN_INSTALLED_DPDK=0}
export SPDK_RUN_INSTALLED_DPDK
: ${SPDK_RUN_NON_ROOT=0}
export SPDK_RUN_NON_ROOT
: ${SPDK_TEST_CRYPTO=0}
export SPDK_TEST_CRYPTO
: ${SPDK_TEST_FTL=0}
export SPDK_TEST_FTL
: ${SPDK_TEST_OCF=0}
export SPDK_TEST_OCF
: ${SPDK_TEST_FTL_EXTENDED=0}
export SPDK_TEST_FTL_EXTENDED
: ${SPDK_TEST_VMD=0}
export SPDK_TEST_VMD
: ${SPDK_TEST_OPAL=0}
export SPDK_TEST_OPAL
: ${SPDK_AUTOTEST_X=true}
export SPDK_AUTOTEST_X
: ${SPDK_TEST_RAID5=0}
export SPDK_TEST_RAID5
# Export PYTHONPATH with addition of RPC framework. New scripts can be created
# specific use cases for tests.
@ -194,7 +229,7 @@ for i in "$@"; do
done
# start rpc.py coprocess if it's not started yet
if [[ -z $RPC_PIPE_PID ]] || ! kill -0 "$RPC_PIPE_PID" &>/dev/null; then
if [[ -z $RPC_PIPE_PID ]] || ! kill -0 "$RPC_PIPE_PID" &> /dev/null; then
coproc RPC_PIPE { "$rootdir/scripts/rpc.py" --server; }
exec {RPC_PIPE_OUTPUT}<&${RPC_PIPE[0]} {RPC_PIPE_INPUT}>&${RPC_PIPE[1]}
# all descriptors will automatically close together with this bash
@ -211,8 +246,8 @@ function get_config_params() {
xtrace_disable
config_params='--enable-debug --enable-werror'
if echo -e "#include <libunwind.h>\nint main(int argc, char *argv[]) {return 0;}\n" | \
gcc -o /dev/null -lunwind -x c - 2>/dev/null; then
if echo -e "#include <libunwind.h>\nint main(int argc, char *argv[]) {return 0;}\n" \
| gcc -o /dev/null -lunwind -x c - 2> /dev/null; then
config_params+=' --enable-log-bt'
fi
@ -375,7 +410,7 @@ function rpc_cmd_simple_data_json() {
while read -r elem val; do
jq_out["$elem"]=$val
done < <(rpc_cmd "$@" | jq -jr "$jq")
(( ${#jq_out[@]} > 0 )) || return 1
((${#jq_out[@]} > 0)) || return 1
}
# invert error code of any command and also trigger ERR on 0 (unlike bash ! prefix)
@ -428,7 +463,7 @@ function timing_finish() {
--nametype 'Step:' \
--countname seconds \
$output_dir/timing.txt \
>$output_dir/timing.svg
> $output_dir/timing.svg
fi
}
@ -439,8 +474,8 @@ function create_test_list() {
# Follow up with search in test directory recursively.
completion+=$(grep -rshI --include="*.sh" --exclude="autotest_common.sh" -e "run_test " $rootdir/test)
printf "%s" "$completion" | grep -v "#" \
| sed 's/^.*run_test/run_test/' | awk '{print $2}' | \
sed 's/\"//g' | sort > $output_dir/all_tests.txt || true
| sed 's/^.*run_test/run_test/' | awk '{print $2}' \
| sed 's/\"//g' | sort > $output_dir/all_tests.txt || true
xtrace_restore
}
@ -464,15 +499,14 @@ function gdb_attach() {
function process_core() {
ret=0
while IFS= read -r -d '' core;
do
while IFS= read -r -d '' core; do
exe=$(eu-readelf -n "$core" | grep psargs | sed "s/.*psargs: \([^ \'\" ]*\).*/\1/")
if [[ ! -f "$exe" ]]; then
exe=$(eu-readelf -n "$core" | grep -oP -m1 "$exe.+")
fi
echo "exe for $core is $exe"
if [[ -n "$exe" ]]; then
if hash gdb &>/dev/null; then
if hash gdb &> /dev/null; then
gdb -batch -ex "thread apply all bt full" $exe $core
fi
cp $exe $output_dir
@ -521,7 +555,7 @@ function waitforlisten() {
xtrace_disable
local ret=0
local i
for (( i = 40; i != 0; i-- )); do
for ((i = 40; i != 0; i--)); do
# if the process is no longer running, then exit the script
# since it means the application crashed
if ! kill -s 0 $1; then
@ -530,7 +564,7 @@ function waitforlisten() {
break
fi
if $rootdir/scripts/rpc.py -t 1 -s "$rpc_addr" rpc_get_methods &>/dev/null; then
if $rootdir/scripts/rpc.py -t 1 -s "$rpc_addr" rpc_get_methods &> /dev/null; then
break
fi
@ -538,7 +572,7 @@ function waitforlisten() {
done
xtrace_restore
if (( i == 0 )); then
if ((i == 0)); then
echo "ERROR: timeout while waiting for process (pid: $1) to start listening on '$rpc_addr'"
ret=1
fi
@ -549,7 +583,7 @@ function waitfornbd() {
local nbd_name=$1
local i
for ((i=1; i<=20; i++)); do
for ((i = 1; i <= 20; i++)); do
if grep -q -w $nbd_name /proc/partitions; then
break
else
@ -562,7 +596,7 @@ function waitfornbd() {
# here trying to read the first block of the nbd block device to a temp
# file. Note that dd returns success when reading an empty file, so we
# need to check the size of the output file instead.
for ((i=1; i<=20; i++)); do
for ((i = 1; i <= 20; i++)); do
dd if=/dev/$nbd_name of=/tmp/nbdtest bs=4096 count=1 iflag=direct
size=$(stat -c %s /tmp/nbdtest)
rm -f /tmp/nbdtest
@ -580,7 +614,7 @@ function waitforbdev() {
local bdev_name=$1
local i
for ((i=1; i<=20; i++)); do
for ((i = 1; i <= 20; i++)); do
if $rpc_py bdev_get_bdevs | jq -r '.[] .name' | grep -qw $bdev_name; then
return 0
fi
@ -611,7 +645,7 @@ function make_filesystem() {
if [ $i -ge 15 ]; then
return 1
fi
i=$((i+1))
i=$((i + 1))
sleep 1
done
@ -715,7 +749,7 @@ function _start_stub() {
# but ASLR can still be unreliable in some cases.
# We will reenable it again after multi-process testing is complete in kill_stub().
# Save current setting so it can be restored upon calling kill_stub().
_randomize_va_space=$(</proc/sys/kernel/randomize_va_space)
_randomize_va_space=$(< /proc/sys/kernel/randomize_va_space)
echo 0 > /proc/sys/kernel/randomize_va_space
$rootdir/test/app/stub/stub $1 &
stubpid=$!
@ -739,7 +773,7 @@ function kill_stub() {
if [[ -e /proc/$stubpid ]]; then
kill $1 $stubpid
wait $stubpid
fi 2>/dev/null || :
fi 2> /dev/null || :
rm -f /var/run/spdk_stub0
# Re-enable ASLR now that we are done with multi-process testing
# Note: "1" enables ASLR w/o randomizing data segments, "2" adds data segment
@ -811,12 +845,12 @@ function print_backtrace() {
local bt="" cmdline=()
if [[ -f $src ]]; then
bt=$(nl -w 4 -ba -nln $src | grep -B 5 -A 5 "^${line_nr}[^0-9]" | \
sed "s/^/ /g" | sed "s/^ $line_nr /=> $line_nr /g")
bt=$(nl -w 4 -ba -nln $src | grep -B 5 -A 5 "^${line_nr}[^0-9]" \
| sed "s/^/ /g" | sed "s/^ $line_nr /=> $line_nr /g")
fi
# If extdebug set the BASH_ARGC[i], try to fetch all the args
if (( BASH_ARGC[i] > 0 )); then
if ((BASH_ARGC[i] > 0)); then
# Use argc as index to reverse the stack
local argc=${BASH_ARGC[i]} arg
for arg in "${args[@]::BASH_ARGC[i]}"; do
@ -825,7 +859,10 @@ function print_backtrace() {
args=("${args[@]:BASH_ARGC[i]}")
fi
echo "in $src:$line_nr -> $func($(IFS=","; printf '%s\n' "${cmdline[*]:-[]}"))"
echo "in $src:$line_nr -> $func($(
IFS=","
printf '%s\n' "${cmdline[*]:-[]}"
))"
echo " ..."
echo "${bt:-backtrace unavailable}"
echo " ..."
@ -836,8 +873,7 @@ function print_backtrace() {
return 0
}
function discover_bdevs()
{
function discover_bdevs() {
local rootdir=$1
local config_file=$2
local cfg_type=$3
@ -856,11 +892,11 @@ function discover_bdevs()
# Start the bdev service to query for the list of available
# bdevs.
$rootdir/test/app/bdev_svc/bdev_svc -r $rpc_server -i 0 \
$cfg_type $config_file &>/dev/null &
$cfg_type $config_file &> /dev/null &
stubpid=$!
while ! [ -e /var/run/spdk_bdev0 ]; do
# If this counter drops to zero, errexit will be caught to abort the test
(( wait_for_spdk_bdev-- ))
((wait_for_spdk_bdev--))
sleep 1
done
@ -873,8 +909,7 @@ function discover_bdevs()
rm -f /var/run/spdk_bdev0
}
function waitforserial()
{
function waitforserial() {
local i=0
local nvme_device_counter=1
if [[ -n "$2" ]]; then
@ -883,7 +918,7 @@ function waitforserial()
while [ $(lsblk -l -o NAME,SERIAL | grep -c $1) -lt $nvme_device_counter ]; do
[ $i -lt 15 ] || break
i=$((i+1))
i=$((i + 1))
echo "Waiting for devices"
sleep 1
done
@ -895,12 +930,11 @@ function waitforserial()
return 0
}
function waitforserial_disconnect()
{
function waitforserial_disconnect() {
local i=0
while lsblk -o NAME,SERIAL | grep -q -w $1; do
[ $i -lt 15 ] || break
i=$((i+1))
i=$((i + 1))
echo "Waiting for disconnect devices"
sleep 1
done
@ -912,12 +946,11 @@ function waitforserial_disconnect()
return 0
}
function waitforblk()
{
function waitforblk() {
local i=0
while ! lsblk -l -o NAME | grep -q -w $1; do
[ $i -lt 15 ] || break
i=$((i+1))
i=$((i + 1))
sleep 1
done
@ -928,12 +961,11 @@ function waitforblk()
return 0
}
function waitforblk_disconnect()
{
function waitforblk_disconnect() {
local i=0
while lsblk -l -o NAME | grep -q -w $1; do
[ $i -lt 15 ] || break
i=$((i+1))
i=$((i + 1))
sleep 1
done
@ -944,12 +976,11 @@ function waitforblk_disconnect()
return 0
}
function waitforfile()
{
function waitforfile() {
local i=0
while [ ! -e $1 ]; do
[ $i -lt 200 ] || break
i=$((i+1))
i=$((i + 1))
sleep 0.1
done
@ -960,8 +991,7 @@ function waitforfile()
return 0
}
function fio_config_gen()
{
function fio_config_gen() {
local config_file=$1
local workload=$2
local bdev_type=$3
@ -1011,8 +1041,7 @@ EOL
fi
}
function fio_bdev()
{
function fio_bdev() {
# Setup fio binary cmd line
local fio_dir=$CONFIG_FIO_SOURCE_DIR
local bdev_plugin="$rootdir/examples/bdev/fio_plugin/fio_plugin"
@ -1024,8 +1053,7 @@ function fio_bdev()
LD_PRELOAD="$asan_lib $bdev_plugin" "$fio_dir"/fio "$@"
}
function fio_nvme()
{
function fio_nvme() {
# Setup fio binary cmd line
local fio_dir=$CONFIG_FIO_SOURCE_DIR
local nvme_plugin="$rootdir/examples/nvme/fio_plugin/fio_plugin"
@ -1036,8 +1064,7 @@ function fio_nvme()
LD_PRELOAD="$asan_lib $nvme_plugin" "$fio_dir"/fio "$@"
}
function get_lvs_free_mb()
{
function get_lvs_free_mb() {
local lvs_uuid=$1
local lvs_info
local fc
@ -1047,12 +1074,11 @@ function get_lvs_free_mb()
cs=$(jq ".[] | select(.uuid==\"$lvs_uuid\") .cluster_size" <<< "$lvs_info")
# Change to MB's
free_mb=$((fc*cs/1024/1024))
free_mb=$((fc * cs / 1024 / 1024))
echo "$free_mb"
}
function get_bdev_size()
{
function get_bdev_size() {
local bdev_name=$1
local bdev_info
local bs
@ -1062,12 +1088,11 @@ function get_bdev_size()
nb=$(jq ".[] .num_blocks" <<< "$bdev_info")
# Change to MB's
bdev_size=$((bs*nb/1024/1024))
bdev_size=$((bs * nb / 1024 / 1024))
echo "$bdev_size"
}
function autotest_cleanup()
{
function autotest_cleanup() {
$rootdir/scripts/setup.sh reset
$rootdir/scripts/setup.sh cleanup
if [ $(uname -s) = "Linux" ]; then
@ -1080,8 +1105,7 @@ function autotest_cleanup()
rm -rf "$asan_suppression_file"
}
function freebsd_update_contigmem_mod()
{
function freebsd_update_contigmem_mod() {
if [ $(uname) = FreeBSD ]; then
kldunload contigmem.ko || true
if [ -n "$WITH_DPDK_DIR" ]; then
@ -1099,7 +1123,7 @@ function freebsd_update_contigmem_mod()
fi
}
function get_nvme_name_from_bdf {
function get_nvme_name_from_bdf() {
blkname=()
nvme_devs=$(lsblk -d --output NAME | grep "^nvme") || true
@ -1120,7 +1144,7 @@ function get_nvme_name_from_bdf {
printf '%s\n' "${blkname[@]}"
}
function opal_revert_cleanup {
function opal_revert_cleanup() {
$rootdir/app/spdk_tgt/spdk_tgt &
spdk_tgt_pid=$!
waitforlisten $spdk_tgt_pid

View File

@ -37,8 +37,7 @@ else
PACKAGEMNG='undefined'
fi
function install_rxe_cfg()
{
function install_rxe_cfg() {
if echo $CONF | grep -q librxe; then
# rxe_cfg is used in the NVMe-oF tests
# The librxe-dev repository provides a command line tool called rxe_cfg which makes it
@ -60,8 +59,7 @@ function install_rxe_cfg()
fi
}
function install_iscsi_adm()
{
function install_iscsi_adm() {
if echo $CONF | grep -q iscsi; then
# iscsiadm is used in the iscsi_tgt tests
# The version of iscsiadm that ships with fedora 26 was broken as of November 3 2017.
@ -95,8 +93,7 @@ function install_iscsi_adm()
fi
}
function install_qat()
{
function install_qat() {
if [ "$PACKAGEMNG" = "dnf" ]; then
sudo dnf install -y libudev-devel
@ -136,8 +133,7 @@ function install_qat()
fi
}
function install_rocksdb()
{
function install_rocksdb() {
if echo $CONF | grep -q rocksdb; then
# Rocksdb is installed for use with the blobfs tests.
if [ ! -d /usr/src/rocksdb ]; then
@ -151,8 +147,7 @@ function install_rocksdb()
fi
}
function install_fio()
{
function install_fio() {
if echo $CONF | grep -q fio; then
# This version of fio is installed in /usr/src/fio to enable
# building the spdk fio plugin.
@ -166,15 +161,15 @@ function install_fio()
sudo mv fio /usr/src/
fi
(
git -C /usr/src/fio checkout master &&
git -C /usr/src/fio pull &&
git -C /usr/src/fio checkout $fio_version &&
if [ $OSID == 'freebsd' ]; then
gmake -C /usr/src/fio -j${jobs} &&
sudo gmake -C /usr/src/fio install
git -C /usr/src/fio checkout master \
&& git -C /usr/src/fio pull \
&& git -C /usr/src/fio checkout $fio_version \
&& if [ $OSID == 'freebsd' ]; then
gmake -C /usr/src/fio -j${jobs} \
&& sudo gmake -C /usr/src/fio install
else
make -C /usr/src/fio -j${jobs} &&
sudo make -C /usr/src/fio install
make -C /usr/src/fio -j${jobs} \
&& sudo make -C /usr/src/fio install
fi
)
else
@ -183,8 +178,7 @@ function install_fio()
fi
}
function install_flamegraph()
{
function install_flamegraph() {
if echo $CONF | grep -q flamegraph; then
# Flamegraph is used when printing out timing graphs for the tests.
if [ ! -d /usr/local/FlameGraph ]; then
@ -197,8 +191,7 @@ function install_flamegraph()
fi
}
function install_qemu()
{
function install_qemu() {
if echo $CONF | grep -q qemu; then
# Two versions of QEMU are used in the tests.
# Stock QEMU is used for vhost. A special fork
@ -249,8 +242,7 @@ function install_qemu()
fi
}
function install_vpp()
{
function install_vpp() {
if echo $CONF | grep -q vpp; then
if [ -d /usr/local/src/vpp ]; then
echo "vpp already cloned."
@ -282,8 +274,7 @@ function install_vpp()
fi
}
function install_nvmecli()
{
function install_nvmecli() {
if echo $CONF | grep -q nvmecli; then
SPDK_NVME_CLI_BRANCH=spdk-1.6
if [ ! -d nvme-cli ]; then
@ -300,9 +291,7 @@ function install_nvmecli()
fi
}
function install_libiscsi()
{
function install_libiscsi() {
if echo $CONF | grep -q libiscsi; then
# We currently don't make any changes to the libiscsi repository for our tests, but it is possible that we will need
# to later. Cloning from git is just future proofing the machines.
@ -311,7 +300,7 @@ function install_libiscsi()
else
echo "libiscsi already checked out. Skipping"
fi
( cd libiscsi && ./autogen.sh && ./configure --prefix=/usr/local/libiscsi)
(cd libiscsi && ./autogen.sh && ./configure --prefix=/usr/local/libiscsi)
make -C ./libiscsi -j${jobs}
sudo make -C ./libiscsi install
fi
@ -320,16 +309,15 @@ function install_libiscsi()
function install_git() {
sudo yum install -y zlib-devel curl-devel
tar -xzof <(wget -qO- "$GIT_REPO_GIT")
(cd git-${GIT_VERSION} && \
make configure && \
./configure --prefix=/usr/local/git && \
sudo make -j${jobs} install)
(cd git-${GIT_VERSION} \
&& make configure \
&& ./configure --prefix=/usr/local/git \
&& sudo make -j${jobs} install)
sudo sh -c "echo 'export PATH=/usr/local/git/bin:$PATH' >> /etc/bashrc"
exec $SHELL
}
function usage()
{
function usage() {
echo "This script is intended to automate the environment setup for a linux virtual machine."
echo "Please run this script as your regular user. The script will make calls to sudo as needed."
echo ""
@ -343,15 +331,15 @@ function usage()
}
# Get package manager #
if hash yum &>/dev/null; then
if hash yum &> /dev/null; then
PACKAGEMNG=yum
elif hash dnf &>/dev/null; then
elif hash dnf &> /dev/null; then
PACKAGEMNG=dnf
elif hash apt-get &>/dev/null; then
elif hash apt-get &> /dev/null; then
PACKAGEMNG=apt-get
elif hash pacman &>/dev/null; then
elif hash pacman &> /dev/null; then
PACKAGEMNG=pacman
elif hash pkg &>/dev/null; then
elif hash pkg &> /dev/null; then
PACKAGEMNG=pkg
else
echo 'Supported package manager not found. Script supports "dnf" and "apt-get".'
@ -369,22 +357,26 @@ while getopts 'iuht:c:-:' optchar; do
case "$optchar" in
-)
case "$OPTARG" in
help) usage;;
upgrade) UPGRADE=true;;
install-deps) INSTALL=true;;
test-conf=*) CONF="${OPTARG#*=}";;
conf-path=*) CONF_PATH="${OPTARG#*=}";;
*) echo "Invalid argument '$OPTARG'"
usage;;
help) usage ;;
upgrade) UPGRADE=true ;;
install-deps) INSTALL=true ;;
test-conf=*) CONF="${OPTARG#*=}" ;;
conf-path=*) CONF_PATH="${OPTARG#*=}" ;;
*)
echo "Invalid argument '$OPTARG'"
usage
;;
esac
;;
h) usage;;
u) UPGRADE=true;;
i) INSTALL=true;;
t) CONF="$OPTARG";;
c) CONF_PATH="$OPTARG";;
*) echo "Invalid argument '$OPTARG'"
usage;;
h) usage ;;
u) UPGRADE=true ;;
i) INSTALL=true ;;
t) CONF="$OPTARG" ;;
c) CONF_PATH="$OPTARG" ;;
*)
echo "Invalid argument '$OPTARG'"
usage
;;
esac
done
@ -399,25 +391,39 @@ fi
cd ~
GIT_VERSION=2.25.1
: ${GIT_REPO_SPDK=https://github.com/spdk/spdk.git}; export GIT_REPO_SPDK
: ${GIT_REPO_DPDK=https://github.com/spdk/dpdk.git}; export GIT_REPO_DPDK
: ${GIT_REPO_LIBRXE=https://github.com/SoftRoCE/librxe-dev.git}; export GIT_REPO_LIBRXE
: ${GIT_REPO_OPEN_ISCSI=https://github.com/open-iscsi/open-iscsi}; export GIT_REPO_OPEN_ISCSI
: ${GIT_REPO_ROCKSDB=https://review.spdk.io/spdk/rocksdb}; export GIT_REPO_ROCKSDB
: ${GIT_REPO_FIO=http://git.kernel.dk/fio.git}; export GIT_REPO_FIO
: ${GIT_REPO_FLAMEGRAPH=https://github.com/brendangregg/FlameGraph.git}; export GIT_REPO_FLAMEGRAPH
: ${GIT_REPO_QEMU=https://github.com/spdk/qemu}; export GIT_REPO_QEMU
: ${GIT_REPO_VPP=https://gerrit.fd.io/r/vpp}; export GIT_REPO_VPP
: ${GIT_REPO_LIBISCSI=https://github.com/sahlberg/libiscsi}; export GIT_REPO_LIBISCSI
: ${GIT_REPO_SPDK_NVME_CLI=https://github.com/spdk/nvme-cli}; export GIT_REPO_SPDK_NVME_CLI
: ${GIT_REPO_INTEL_IPSEC_MB=https://github.com/spdk/intel-ipsec-mb.git}; export GIT_REPO_INTEL_IPSEC_MB
: ${DRIVER_LOCATION_QAT=https://01.org/sites/default/files/downloads//qat1.7.l.4.9.0-00008.tar.gz}; export DRIVER_LOCATION_QAT
: ${GIT_REPO_GIT=https://github.com/git/git/archive/v${GIT_VERSION}.tar.gz}; export GIT_REPO_GIT
: ${GIT_REPO_SPDK=https://github.com/spdk/spdk.git}
export GIT_REPO_SPDK
: ${GIT_REPO_DPDK=https://github.com/spdk/dpdk.git}
export GIT_REPO_DPDK
: ${GIT_REPO_LIBRXE=https://github.com/SoftRoCE/librxe-dev.git}
export GIT_REPO_LIBRXE
: ${GIT_REPO_OPEN_ISCSI=https://github.com/open-iscsi/open-iscsi}
export GIT_REPO_OPEN_ISCSI
: ${GIT_REPO_ROCKSDB=https://review.spdk.io/spdk/rocksdb}
export GIT_REPO_ROCKSDB
: ${GIT_REPO_FIO=http://git.kernel.dk/fio.git}
export GIT_REPO_FIO
: ${GIT_REPO_FLAMEGRAPH=https://github.com/brendangregg/FlameGraph.git}
export GIT_REPO_FLAMEGRAPH
: ${GIT_REPO_QEMU=https://github.com/spdk/qemu}
export GIT_REPO_QEMU
: ${GIT_REPO_VPP=https://gerrit.fd.io/r/vpp}
export GIT_REPO_VPP
: ${GIT_REPO_LIBISCSI=https://github.com/sahlberg/libiscsi}
export GIT_REPO_LIBISCSI
: ${GIT_REPO_SPDK_NVME_CLI=https://github.com/spdk/nvme-cli}
export GIT_REPO_SPDK_NVME_CLI
: ${GIT_REPO_INTEL_IPSEC_MB=https://github.com/spdk/intel-ipsec-mb.git}
export GIT_REPO_INTEL_IPSEC_MB
: ${DRIVER_LOCATION_QAT=https://01.org/sites/default/files/downloads//qat1.7.l.4.9.0-00008.tar.gz}
export DRIVER_LOCATION_QAT
: ${GIT_REPO_GIT=https://github.com/git/git/archive/v${GIT_VERSION}.tar.gz}
export GIT_REPO_GIT
if [ $PACKAGEMNG == 'pkg' ]; then
jobs=$(( $(sysctl -n hw.ncpu) * 2 ))
jobs=$(($(sysctl -n hw.ncpu) * 2))
else
jobs=$(($(nproc)*2))
jobs=$(($(nproc) * 2))
fi
if $UPGRADE; then
@ -474,8 +480,7 @@ if $INSTALL; then
libtool \
libmount-devel \
iscsi-initiator-utils \
isns-utils-devel\
pmempool \
isns-utils-devel pmempool \
perl-open \
glib2-devel \
pixman-devel \
@ -711,18 +716,18 @@ if [ $OSID != 'freebsd' ]; then
if [ $LIBRXE_INSTALL = true ]; then
#Ubuntu18 integrates librxe to rdma-core, libibverbs-dev no longer ships infiniband/driver.h.
#Don't compile librxe on ubuntu18 or later version, install package rdma-core instead.
install_rxe_cfg&
install_rxe_cfg &
fi
install_iscsi_adm&
install_libiscsi&
install_vpp&
install_nvmecli&
install_qat&
install_rocksdb&
install_flamegraph&
install_qemu&
install_iscsi_adm &
install_libiscsi &
install_vpp &
install_nvmecli &
install_qat &
install_rocksdb &
install_flamegraph &
install_qemu &
fi
install_fio&
install_fio &
wait
# create autorun-spdk.conf in home folder. This is sourced by the autotest_common.sh file.

View File

@ -63,11 +63,13 @@ function run_bdevperf() {
test_type=$1
case "$test_type" in
qat )
pmd=1;;
isal )
pmd=2;;
* )
qat)
pmd=1
;;
isal)
pmd=2
;;
*)
echo "invalid pmd name"
exit 1
;;

View File

@ -10,7 +10,7 @@ device=$1
use_append=$2
rpc_py=$rootdir/scripts/rpc.py
for (( i=0; i<${#tests[@]}; i++ )) do
for ((i = 0; i < ${#tests[@]}; i++)); do
timing_enter "${tests[$i]}"
"$rootdir/test/bdev/bdevperf/bdevperf" -z -T ftl0 ${tests[$i]} --json <(gen_ftl_nvme_conf) &
bdevperf_pid=$!

View File

@ -1,24 +1,24 @@
# Common utility functions to be sourced by the libftl test scripts
function get_chunk_size() {
$rootdir/examples/nvme/identify/identify -r "trtype:PCIe traddr:$1" |
grep 'Logical blks per chunk' | sed 's/[^0-9]//g'
$rootdir/examples/nvme/identify/identify -r "trtype:PCIe traddr:$1" \
| grep 'Logical blks per chunk' | sed 's/[^0-9]//g'
}
function get_num_group() {
$rootdir/examples/nvme/identify/identify -r "trtype:PCIe traddr:$1" |
grep 'Groups' | sed 's/[^0-9]//g'
$rootdir/examples/nvme/identify/identify -r "trtype:PCIe traddr:$1" \
| grep 'Groups' | sed 's/[^0-9]//g'
}
function get_num_pu() {
$rootdir/examples/nvme/identify/identify -r "trtype:PCIe traddr:$1" |
grep 'PUs' | sed 's/[^0-9]//g'
$rootdir/examples/nvme/identify/identify -r "trtype:PCIe traddr:$1" \
| grep 'PUs' | sed 's/[^0-9]//g'
}
function has_separate_md() {
local md_type
md_type=$($rootdir/examples/nvme/identify/identify -r "trtype:PCIe traddr:$1" | \
grep 'Metadata Transferred' | cut -d: -f2)
md_type=$($rootdir/examples/nvme/identify/identify -r "trtype:PCIe traddr:$1" \
| grep 'Metadata Transferred' | cut -d: -f2)
if [[ "$md_type" =~ Separate ]]; then
return 0
else
@ -48,7 +48,7 @@ function create_nv_cache_bdev() {
}
function gen_ftl_nvme_conf() {
jq . <<-JSON
jq . <<- JSON
{
"subsystems": [
{

View File

@ -14,7 +14,7 @@ while getopts ':u:c:' opt; do
?) echo "Usage: $0 [-u UUID] [-c NV_CACHE_PCI_BDF] OCSSD_PCI_BDF" && exit 1 ;;
esac
done
shift $((OPTIND -1))
shift $((OPTIND - 1))
device=$1
@ -37,7 +37,8 @@ pu_count=$((num_group * num_pu))
# Write one band worth of data + one extra chunk
data_size=$((chunk_size * (pu_count + 1)))
"$rootdir/app/spdk_tgt/spdk_tgt" --json <(gen_ftl_nvme_conf) & svcpid=$!
"$rootdir/app/spdk_tgt/spdk_tgt" --json <(gen_ftl_nvme_conf) &
svcpid=$!
waitforlisten $svcpid
if [ -n "$nv_cache" ]; then
@ -69,7 +70,8 @@ $rpc_py nbd_stop_disk /dev/nbd0
kill -9 $svcpid
rm -f /dev/shm/spdk_tgt_trace.pid$svcpid
"$rootdir/app/spdk_tgt/spdk_tgt" --json <(gen_ftl_nvme_conf) -L ftl_init & svcpid=$!
"$rootdir/app/spdk_tgt/spdk_tgt" --json <(gen_ftl_nvme_conf) -L ftl_init &
svcpid=$!
waitforlisten $svcpid
$rpc_py load_config < $testdir/config/ftl.json

View File

@ -35,7 +35,8 @@ export FTL_JSON_CONF=$testdir/config/ftl.json
trap "fio_kill; exit 1" SIGINT SIGTERM EXIT
"$rootdir/app/spdk_tgt/spdk_tgt" --json <(gen_ftl_nvme_conf) & svcpid=$!
"$rootdir/app/spdk_tgt/spdk_tgt" --json <(gen_ftl_nvme_conf) &
svcpid=$!
waitforlisten $svcpid
$rpc_py bdev_nvme_attach_controller -b nvme0 -a $device -t pcie
@ -50,7 +51,7 @@ fi
waitforbdev ftl0
(
echo '{"subsystems": [';
echo '{"subsystems": ['
$rpc_py save_subsystem_config -n bdev
echo ']}'
) > $FTL_JSON_CONF

View File

@ -62,7 +62,8 @@ run_test "ftl_json" $testdir/json.sh $device
if [ $SPDK_TEST_FTL_EXTENDED -eq 1 ]; then
run_test "ftl_fio_basic" $testdir/fio.sh $device basic
"$rootdir/app/spdk_tgt/spdk_tgt" --json <(gen_ftl_nvme_conf) & svcpid=$!
"$rootdir/app/spdk_tgt/spdk_tgt" --json <(gen_ftl_nvme_conf) &
svcpid=$!
trap 'killprocess $svcpid; exit 1' SIGINT SIGTERM EXIT

View File

@ -15,7 +15,8 @@ json_kill() {
trap "json_kill; exit 1" SIGINT SIGTERM EXIT
"$rootdir/app/spdk_tgt/spdk_tgt" --json <(gen_ftl_nvme_conf) & svcpid=$!
"$rootdir/app/spdk_tgt/spdk_tgt" --json <(gen_ftl_nvme_conf) &
svcpid=$!
waitforlisten $svcpid
# Create new bdev from json configuration

View File

@ -16,7 +16,7 @@ while getopts ':u:c:' opt; do
?) echo "Usage: $0 [-u UUID] [-c NV_CACHE_PCI_BDF] OCSSD_PCI_BDF" && exit 1 ;;
esac
done
shift $((OPTIND -1))
shift $((OPTIND - 1))
device=$1
num_group=$(get_num_group $device)
num_pu=$(get_num_pu $device)
@ -37,7 +37,8 @@ restore_kill() {
trap "restore_kill; exit 1" SIGINT SIGTERM EXIT
"$rootdir/app/spdk_tgt/spdk_tgt" --json <(gen_ftl_nvme_conf) & svcpid=$!
"$rootdir/app/spdk_tgt/spdk_tgt" --json <(gen_ftl_nvme_conf) &
svcpid=$!
# Wait until spdk_tgt starts
waitforlisten $svcpid
@ -73,7 +74,8 @@ md5sum $mount_dir/testfile > $testdir/testfile.md5
umount $mount_dir
killprocess $svcpid
"$rootdir/app/spdk_tgt/spdk_tgt" --json <(gen_ftl_nvme_conf) -L ftl_init & svcpid=$!
"$rootdir/app/spdk_tgt/spdk_tgt" --json <(gen_ftl_nvme_conf) -L ftl_init &
svcpid=$!
# Wait until spdk_tgt starts
waitforlisten $svcpid

View File

@ -27,12 +27,12 @@ done
timing_enter autofuzz
if [ "$TEST_MODULE" == "nvmf" ]; then
allowed_transports=( "${allowed_nvme_transports[@]}" )
allowed_transports=("${allowed_nvme_transports[@]}")
if [ $TEST_TRANSPORT == "rdma" ]; then
config_params="$config_params --with-rdma"
fi
elif [ "$TEST_MODULE" == "vhost" ]; then
allowed_transports=( "${allowed_vhost_transports[@]}" )
allowed_transports=("${allowed_vhost_transports[@]}")
config_params="$config_params --with-vhost --with-virtio"
else
echo "Invalid module specified. Please specify either nvmf or vhost."

View File

@ -33,12 +33,13 @@ for i in "$@"; do
case "$i" in
--timeout=*)
TEST_TIMEOUT="${i#*=}"
;;
esac
done
timing_enter start_iscsi_tgt
"${ISCSI_APP[@]}" -m $ISCSI_TEST_CORE_MASK &>$output_dir/iscsi_autofuzz_tgt_output.txt &
"${ISCSI_APP[@]}" -m $ISCSI_TEST_CORE_MASK &> $output_dir/iscsi_autofuzz_tgt_output.txt &
iscsipid=$!
trap 'killprocess $iscsipid; exit 1' SIGINT SIGTERM EXIT
@ -58,7 +59,7 @@ sleep 1
trap 'killprocess $iscsipid; iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT
$rootdir/test/app/fuzz/iscsi_fuzz/iscsi_fuzz -m 0xF0 -T $TARGET_IP -t $TEST_TIMEOUT 2>$output_dir/iscsi_autofuzz_logs.txt
$rootdir/test/app/fuzz/iscsi_fuzz/iscsi_fuzz -m 0xF0 -T $TARGET_IP -t $TEST_TIMEOUT 2> $output_dir/iscsi_autofuzz_logs.txt
$rpc_py iscsi_delete_target_node 'iqn.2016-06.io.spdk:disk1'

View File

@ -14,6 +14,7 @@ for i in "$@"; do
case "$i" in
--timeout=*)
TEST_TIMEOUT="${i#*=}"
;;
esac
done
@ -24,7 +25,7 @@ timing_enter nvmf_fuzz_test
echo "[Nvme]" > $testdir/nvmf_fuzz.conf
echo " TransportID \"trtype:$TEST_TRANSPORT adrfam:IPv4 subnqn:nqn.2016-06.io.spdk:cnode1 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT\" Nvme0" >> $testdir/nvmf_fuzz.conf
"${NVMF_APP[@]}" -m 0xF &>"$output_dir/nvmf_autofuzz_tgt_output.txt" &
"${NVMF_APP[@]}" -m 0xF &> "$output_dir/nvmf_autofuzz_tgt_output.txt" &
nvmfpid=$!
trap 'process_shm --id $NVMF_APP_SHM_ID; rm -f $testdir/nvmf_fuzz.conf; killprocess $nvmfpid; nvmftestfini $1; exit 1' SIGINT SIGTERM EXIT
@ -39,7 +40,7 @@ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc0
$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
# Note that we chose a consistent seed to ensure that this test is consistent in nightly builds.
$rootdir/test/app/fuzz/nvme_fuzz/nvme_fuzz -m 0xF0 -r "/var/tmp/nvme_fuzz" -t $TEST_TIMEOUT -C $testdir/nvmf_fuzz.conf -N -a 2>$output_dir/nvmf_autofuzz_logs.txt
$rootdir/test/app/fuzz/nvme_fuzz/nvme_fuzz -m 0xF0 -r "/var/tmp/nvme_fuzz" -t $TEST_TIMEOUT -C $testdir/nvmf_fuzz.conf -N -a 2> $output_dir/nvmf_autofuzz_logs.txt
rm -f $testdir/nvmf_fuzz.conf
$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1

View File

@ -19,6 +19,7 @@ for i in "$@"; do
case "$i" in
--timeout=*)
TEST_TIMEOUT="${i#*=}"
;;
esac
done
@ -29,13 +30,13 @@ timing_enter setup
$rootdir/scripts/setup.sh
timing_exit setup
"${VHOST_APP[@]}" &>"$output_dir/vhost_fuzz_tgt_output.txt" &
"${VHOST_APP[@]}" &> "$output_dir/vhost_fuzz_tgt_output.txt" &
vhostpid=$!
waitforlisten $vhostpid
trap 'killprocess $vhostpid; exit 1' SIGINT SIGTERM exit
"${VHOST_FUZZ_APP[@]}" -t $TEST_TIMEOUT 2>"$output_dir/vhost_autofuzz_output1.txt" &
"${VHOST_FUZZ_APP[@]}" -t $TEST_TIMEOUT 2> "$output_dir/vhost_autofuzz_output1.txt" &
fuzzpid=$!
waitforlisten $fuzzpid $FUZZ_RPC_SOCK

View File

@ -78,8 +78,8 @@ function iscsitestinit() {
function waitforiscsidevices() {
local num=$1
for ((i=1; i<=20; i++)); do
n=$( iscsiadm -m session -P 3 | grep -c "Attached scsi disk sd[a-z]*" || true)
for ((i = 1; i <= 20; i++)); do
n=$(iscsiadm -m session -P 3 | grep -c "Attached scsi disk sd[a-z]*" || true)
if [ $n -ne $num ]; then
sleep 0.1
else
@ -107,7 +107,7 @@ function start_vpp() {
# for VPP side maximal size of MTU for TCP is 1460 and tests doesn't work
# stable with larger packets
MTU=1460
MTU_W_HEADER=$((MTU+20))
MTU_W_HEADER=$((MTU + 20))
ip link set dev $INITIATOR_INTERFACE mtu $MTU
ethtool -K $INITIATOR_INTERFACE tso off
ethtool -k $INITIATOR_INTERFACE
@ -131,13 +131,13 @@ function start_vpp() {
# Wait until VPP starts responding
xtrace_disable
counter=40
while [ $counter -gt 0 ] ; do
while [ $counter -gt 0 ]; do
vppctl show version | grep -E "vpp v[0-9]+\.[0-9]+" && break
counter=$(( counter - 1 ))
counter=$((counter - 1))
sleep 0.5
done
xtrace_restore
if [ $counter -eq 0 ] ; then
if [ $counter -eq 0 ]; then
return 1
fi
@ -171,8 +171,8 @@ function start_vpp() {
sleep 3
# SC1010: ping -M do - in this case do is an option not bash special word
# shellcheck disable=SC1010
ping -c 1 $TARGET_IP -s $(( MTU - 28 )) -M do
vppctl ping $INITIATOR_IP repeat 1 size $(( MTU - (28 + 8) )) verbose | grep -E "$MTU_W_HEADER bytes from $INITIATOR_IP"
ping -c 1 $TARGET_IP -s $((MTU - 28)) -M do
vppctl ping $INITIATOR_IP repeat 1 size $((MTU - (28 + 8))) verbose | grep -E "$MTU_W_HEADER bytes from $INITIATOR_IP"
}
function kill_vpp() {
@ -187,7 +187,7 @@ function kill_vpp() {
}
function initiator_json_config() {
# Prepare config file for iSCSI initiator
jq . <<-JSON
jq . <<- JSON
{
"subsystems": [
{

View File

@ -69,7 +69,7 @@ echo "Error injection test done"
if [ -z "$NO_NVME" ]; then
bdev_size=$(get_bdev_size Nvme0n1)
split_size=$((bdev_size/2))
split_size=$((bdev_size / 2))
if [ $split_size -gt 10000 ]; then
split_size=10000
fi

View File

@ -75,7 +75,7 @@ parted -s /dev/$dev mklabel msdos
parted -s /dev/$dev mkpart primary '0%' '100%'
sleep 1
function filesystem_test {
function filesystem_test() {
fstype=$1
make_filesystem ${fstype} /dev/${dev}1

View File

@ -82,7 +82,7 @@ $rpc_py iscsi_create_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
malloc_bdevs="$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE) "
malloc_bdevs+="$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
$rpc_py bdev_raid_create -n raid0 -z 64 -r 0 -b "$malloc_bdevs"
bdev=$( $rpc_py bdev_malloc_create 1024 512 )
bdev=$($rpc_py bdev_malloc_create 1024 512)
# "raid0:0" ==> use raid0 blockdev for LUN0
# "1:2" ==> map PortalGroup1 to InitiatorGroup2
# "64" ==> iSCSI queue depth 64
@ -131,7 +131,6 @@ $rpc_py bdev_malloc_delete ${bdev}
fio_status=0
wait $fio_pid || fio_status=$?
if [ $fio_status -eq 0 ]; then
echo "iscsi hotplug test: fio successful - expected failure"
exit 1

View File

@ -49,7 +49,7 @@ sleep 1
trap 'killprocess $iscsipid; iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT
$rootdir/test/app/fuzz/iscsi_fuzz/iscsi_fuzz -m 0xF0 -T $TARGET_IP -t 30 2>$output_dir/iscsi_autofuzz_logs.txt
$rootdir/test/app/fuzz/iscsi_fuzz/iscsi_fuzz -m 0xF0 -T $TARGET_IP -t 30 2> $output_dir/iscsi_autofuzz_logs.txt
$rpc_py iscsi_delete_target_node 'iqn.2016-06.io.spdk:disk1'

View File

@ -47,12 +47,12 @@ function rpc_validate_ip() {
echo "Add new IP succeeded."
else
echo "Add new IP failed. Expected to succeed..."
exit 1;
exit 1
fi
# Add same IP again
if $cmd; then
echo "Same IP existed. Expected to fail..."
exit 1;
exit 1
fi
cmd="$rpc_py -s $1 net_interface_delete_ip_address 1 $MIGRATION_ADDRESS"
@ -60,12 +60,12 @@ function rpc_validate_ip() {
echo "Delete existing IP succeeded."
else
echo "Delete existing IP failed. Expected to succeed..."
exit 1;
exit 1
fi
# Delete same IP again
if $cmd; then
echo "No required IP existed. Expected to fail..."
exit 1;
exit 1
fi
}

View File

@ -69,7 +69,7 @@ sleep 1
timing_enter discovery
iscsiadm -m discovery -t sendtargets -p $TARGET_IP:$ISCSI_PORT
iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
waitforiscsidevices $(( NUM_LVS * NUM_LVOL ))
waitforiscsidevices $((NUM_LVS * NUM_LVOL))
timing_exit discovery
timing_enter fio

View File

@ -11,7 +11,7 @@ if [ ! -x $FIO_PATH/fio ]; then
error "Invalid path of fio binary"
fi
function run_spdk_iscsi_fio(){
function run_spdk_iscsi_fio() {
$FIO_PATH/fio $testdir/perf.job "$@" --output-format=json
}
@ -23,8 +23,7 @@ iscsiadm -m node --login -p $IP_T:$ISCSI_PORT
waitforiscsidevices 1
disks=($(iscsiadm -m session -P 3 | grep "Attached scsi disk" | awk '{print $4}'))
for (( i=0; i < ${#disks[@]}; i++ ))
do
for ((i = 0; i < ${#disks[@]}; i++)); do
filename+=$(printf /dev/%s: "${disks[i]}")
waitforfile $filename
echo noop > /sys/block/${disks[i]}/queue/scheduler

View File

@ -20,9 +20,11 @@ NUM_JOBS=1
ISCSI_TGT_CM=0x02
# Performance test for iscsi_tgt, run on devices with proper hardware support (target and inititator)
function usage()
{
[[ -n $2 ]] && ( echo "$2"; echo ""; )
function usage() {
[[ -n $2 ]] && (
echo "$2"
echo ""
)
echo "Usage: $(basename $1) [options]"
echo "-h, --help Print help and exit"
echo " --fiopath=PATH Path to fio directory on initiator. [default=$FIO_PATH]"
@ -37,18 +39,30 @@ while getopts 'h-:' optchar; do
case "$optchar" in
-)
case "$OPTARG" in
help) usage $0; exit 0 ;;
help)
usage $0
exit 0
;;
fiopath=*) FIO_BIN="${OPTARG#*=}" ;;
disk_no=*) DISKNO="${OPTARG#*=}" ;;
target_ip=*) TARGET_IP="${OPTARG#*=}" ;;
initiator_ip=*) INITIATOR_IP="${OPTARG#*=}" ;;
init_mgmnt_ip=*) IP_I_SSH="${OPTARG#*=}" ;;
iscsi_tgt_mask=*) ISCSI_TGT_CM="${OPTARG#*=}" ;;
*) usage $0 echo "Invalid argument '$OPTARG'"; exit 1 ;;
*)
usage $0 echo "Invalid argument '$OPTARG'"
exit 1
;;
esac
;;
h) usage $0; exit 0 ;;
*) usage $0 "Invalid argument '$optchar'"; exit 1 ;;
h)
usage $0
exit 0
;;
*)
usage $0 "Invalid argument '$optchar'"
exit 1
;;
esac
done
@ -68,7 +82,7 @@ if [ $EUID -ne 0 ]; then
error "INFO: This script must be run with root privileges"
fi
function ssh_initiator(){
function ssh_initiator() {
ssh -i $HOME/.ssh/spdk_vhost_id_rsa root@$IP_I_SSH "$@"
}
@ -100,8 +114,7 @@ fi
$rpc_py iscsi_create_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT
$rpc_py iscsi_create_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
for (( i=0; i < DISKNO; i++ ))
do
for ((i = 0; i < DISKNO; i++)); do
$rpc_py iscsi_create_target_node Target${i} Target${i}_alias "${bdevs[i]}:0" "$PORTAL_TAG:$INITIATOR_TAG" 64 -d
done
@ -110,7 +123,7 @@ rm -f $testdir/perf.job
timing_exit iscsi_config
timing_enter iscsi_initiator
ssh_initiator bash -s - < $testdir/iscsi_initiator.sh $FIO_PATH $TARGET_IP
ssh_initiator bash -s - $FIO_PATH $TARGET_IP < $testdir/iscsi_initiator.sh
timing_exit iscsi_initiator
ssh_initiator "cat perf_output/iscsi_fio.json" > $iscsi_fio_results

View File

@ -28,16 +28,16 @@ function run_fio() {
end_io_count=$(jq -r '.bdevs[0].num_read_ops' <<< "$iostats")
end_bytes_read=$(jq -r '.bdevs[0].bytes_read' <<< "$iostats")
IOPS_RESULT=$(((end_io_count-start_io_count)/run_time))
BANDWIDTH_RESULT=$(((end_bytes_read-start_bytes_read)/run_time))
IOPS_RESULT=$(((end_io_count - start_io_count) / run_time))
BANDWIDTH_RESULT=$(((end_bytes_read - start_bytes_read) / run_time))
}
function verify_qos_limits() {
local result=$1
local limit=$2
[ "$(bc <<< "$result > $limit*0.85")" -eq 1 ] && \
[ "$(bc <<< "$result < $limit*1.05")" -eq 1 ]
[ "$(bc <<< "$result > $limit*0.85")" -eq 1 ] \
&& [ "$(bc <<< "$result < $limit*1.05")" -eq 1 ]
}
if [ -z "$TARGET_IP" ]; then
@ -87,19 +87,19 @@ trap 'iscsicleanup; killprocess $pid; iscsitestfini $1 $2; exit 1' SIGINT SIGTER
run_fio Malloc0
# Set IOPS/bandwidth limit to 50% of the actual unrestrained performance.
IOPS_LIMIT=$((IOPS_RESULT/2))
BANDWIDTH_LIMIT=$((BANDWIDTH_RESULT/2))
IOPS_LIMIT=$((IOPS_RESULT / 2))
BANDWIDTH_LIMIT=$((BANDWIDTH_RESULT / 2))
# Set READ bandwidth limit to 50% of the RW bandwidth limit to be able
# to differentiate those two.
READ_BANDWIDTH_LIMIT=$((BANDWIDTH_LIMIT/2))
READ_BANDWIDTH_LIMIT=$((BANDWIDTH_LIMIT / 2))
# Also round them down to nearest multiple of either 1000 IOPS or 1MB BW
# which are the minimal QoS granularities
IOPS_LIMIT=$((IOPS_LIMIT/1000*1000))
BANDWIDTH_LIMIT_MB=$((BANDWIDTH_LIMIT/1024/1024))
BANDWIDTH_LIMIT=$((BANDWIDTH_LIMIT_MB*1024*1024))
READ_BANDWIDTH_LIMIT_MB=$((READ_BANDWIDTH_LIMIT/1024/1024))
READ_BANDWIDTH_LIMIT=$((READ_BANDWIDTH_LIMIT_MB*1024*1024))
IOPS_LIMIT=$((IOPS_LIMIT / 1000 * 1000))
BANDWIDTH_LIMIT_MB=$((BANDWIDTH_LIMIT / 1024 / 1024))
BANDWIDTH_LIMIT=$((BANDWIDTH_LIMIT_MB * 1024 * 1024))
READ_BANDWIDTH_LIMIT_MB=$((READ_BANDWIDTH_LIMIT / 1024 / 1024))
READ_BANDWIDTH_LIMIT=$((READ_BANDWIDTH_LIMIT_MB * 1024 * 1024))
# Limit the I/O rate by RPC, then confirm the observed rate matches.
$rpc_py bdev_set_qos_limit Malloc0 --rw_ios_per_sec $IOPS_LIMIT

View File

@ -37,10 +37,10 @@ rbd_bdev="$($rpc_py bdev_rbd_create $RBD_POOL $RBD_NAME 4096)"
$rpc_py bdev_get_bdevs
$rpc_py bdev_rbd_resize $rbd_bdev 2000
num_block=$($rpc_py bdev_get_bdevs|grep num_blocks|sed 's/[^[:digit:]]//g')
num_block=$($rpc_py bdev_get_bdevs | grep num_blocks | sed 's/[^[:digit:]]//g')
# get the bdev size in MiB.
total_size=$(( num_block * 4096/ 1048576 ))
if [ $total_size != 2000 ];then
total_size=$((num_block * 4096 / 1048576))
if [ $total_size != 2000 ]; then
echo "resize failed."
exit 1
fi

View File

@ -8,13 +8,13 @@ source $rootdir/test/iscsi_tgt/common.sh
function waitfortcp() {
local addr="$2"
if hash ip &>/dev/null; then
if hash ip &> /dev/null; then
local have_ip_cmd=true
else
local have_ip_cmd=false
fi
if hash ss &>/dev/null; then
if hash ss &> /dev/null; then
local have_ss_cmd=true
else
local have_ss_cmd=false
@ -25,7 +25,7 @@ function waitfortcp() {
xtrace_disable
local ret=0
local i
for (( i = 40; i != 0; i-- )); do
for ((i = 40; i != 0; i--)); do
# if the process is no longer running, then exit the script
# since it means the application crashed
if ! kill -s 0 $1; then
@ -55,7 +55,7 @@ function waitfortcp() {
done
xtrace_restore
if (( i == 0 )); then
if ((i == 0)); then
echo "ERROR: timeout while waiting for process (pid: $1) to start listening on '$addr'"
ret=1
fi
@ -94,14 +94,15 @@ timing_enter sock_client
echo "Testing client path"
# start echo server using socat
$SOCAT_APP tcp-l:$ISCSI_PORT,fork,bind=$INITIATOR_IP exec:'/bin/cat' & server_pid=$!
$SOCAT_APP tcp-l:$ISCSI_PORT,fork,bind=$INITIATOR_IP exec:'/bin/cat' &
server_pid=$!
trap 'killprocess $server_pid;iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT
waitfortcp $server_pid $INITIATOR_IP:$ISCSI_PORT
# send message using hello_sock client
message="**MESSAGE:This is a test message from the client**"
response=$( echo $message | $HELLO_SOCK_APP -H $INITIATOR_IP -P $ISCSI_PORT -N $TEST_TYPE)
response=$(echo $message | $HELLO_SOCK_APP -H $INITIATOR_IP -P $ISCSI_PORT -N $TEST_TYPE)
if ! echo "$response" | grep -q "$message"; then
exit 1
@ -120,13 +121,14 @@ timing_exit sock_client
timing_enter sock_server
# start echo server using hello_sock echo server
$HELLO_SOCK_APP -H $TARGET_IP -P $ISCSI_PORT -S -N $TEST_TYPE & server_pid=$!
$HELLO_SOCK_APP -H $TARGET_IP -P $ISCSI_PORT -S -N $TEST_TYPE &
server_pid=$!
trap 'killprocess $server_pid; iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT
waitforlisten $server_pid
# send message to server using socat
message="**MESSAGE:This is a test message to the server**"
response=$( echo $message | $SOCAT_APP - tcp:$TARGET_IP:$ISCSI_PORT 2>/dev/null )
response=$(echo $message | $SOCAT_APP - tcp:$TARGET_IP:$ISCSI_PORT 2> /dev/null)
if [ "$message" != "$response" ]; then
exit 1

View File

@ -51,7 +51,7 @@ echo "iscsi_tgt is listening. Running tests..."
timing_exit start_iscsi_tgt
mkdir -p ${TRACE_TMP_FOLDER}
./app/trace_record/spdk_trace_record -s iscsi -p ${iscsi_pid} -f ${TRACE_RECORD_OUTPUT} -q 1>${TRACE_RECORD_NOTICE_LOG} &
./app/trace_record/spdk_trace_record -s iscsi -p ${iscsi_pid} -f ${TRACE_RECORD_OUTPUT} -q 1> ${TRACE_RECORD_NOTICE_LOG} &
record_pid=$!
echo "Trace record pid: $record_pid"
@ -71,7 +71,7 @@ sleep 1
iscsiadm -m discovery -t sendtargets -p $TARGET_IP:$ISCSI_PORT
iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
waitforiscsidevices $(( CONNECTION_NUMBER + 1 ))
waitforiscsidevices $((CONNECTION_NUMBER + 1))
trap 'iscsicleanup; killprocess $iscsi_pid; killprocess $record_pid; delete_tmp_files; iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT
@ -119,16 +119,16 @@ if [ $len_arr_record_num -ne $len_arr_trace_tool_num ]; then
fi
#trace entries num check
for i in $(seq 0 $((len_arr_record_num - 1))); do
if [ ${arr_record_num[$i]} -le ${NUM_TRACE_ENTRIES} ]; then
if [ ${arr_record_num[$i]} -le ${NUM_TRACE_ENTRIES} ]; then
echo "trace record test on iscsi: failure on inefficient entries number check"
set -e
exit 1
fi
if [ ${arr_record_num[$i]} -ne ${arr_trace_tool_num[$i]} ]; then
fi
if [ ${arr_record_num[$i]} -ne ${arr_trace_tool_num[$i]} ]; then
echo "trace record test on iscsi: failure on entries number check"
set -e
exit 1
fi
fi
done
trap - SIGINT SIGTERM EXIT

View File

@ -14,13 +14,13 @@ if [[ $SPDK_TEST_VHOST -ne 1 && $SPDK_TEST_VHOST_INIT -eq 1 ]]; then
echo " Setting SPDK_TEST_VHOST=1 for duration of current script."
fi
if (( SPDK_TEST_BLOCKDEV + \
SPDK_TEST_ISCSI +
SPDK_TEST_NVMF +
SPDK_TEST_VHOST +
SPDK_TEST_VHOST_INIT +
SPDK_TEST_PMDK +
SPDK_TEST_RBD == 0 )); then
if ((SPDK_TEST_BLOCKDEV + \
SPDK_TEST_ISCSI + \
SPDK_TEST_NVMF + \
SPDK_TEST_VHOST + \
SPDK_TEST_VHOST_INIT + \
SPDK_TEST_PMDK + \
SPDK_TEST_RBD == 0)); then
echo "WARNING: No tests are enabled so not running JSON configuration tests"
exit 0
fi
@ -74,7 +74,7 @@ function tgt_check_notifications() {
last_event_id=${event_line##*:}
# set rc=false in case of failure so all errors can be printed
if (( $# == 0 )); then
if (($# == 0)); then
echo "ERROR: got extra event: $event_line"
rc=false
continue
@ -88,7 +88,7 @@ function tgt_check_notifications() {
$rc
if (( $# != 0 )); then
if (($# != 0)); then
echo "ERROR: missing events:"
echo "$@"
return 1
@ -129,8 +129,8 @@ function json_config_test_shutdown_app() {
# spdk_kill_instance RPC will trigger ASAN
kill -SIGINT ${app_pid[$app]}
for (( i=0; i<30; i++ )); do
if ! kill -0 ${app_pid[$app]} 2>/dev/null; then
for ((i = 0; i < 30; i++)); do
if ! kill -0 ${app_pid[$app]} 2> /dev/null; then
app_pid[$app]=
break
fi
@ -152,7 +152,7 @@ function create_bdev_subsystem_config() {
if [[ $SPDK_TEST_BLOCKDEV -eq 1 ]]; then
local lvol_store_base_bdev=Nvme0n1
if ! tgt_rpc get_bdevs --name ${lvol_store_base_bdev} >/dev/null; then
if ! tgt_rpc get_bdevs --name ${lvol_store_base_bdev} > /dev/null; then
if [[ $(uname -s) = Linux ]]; then
lvol_store_base_bdev=aio_disk
echo "WARNING: No NVMe drive found. Using '$lvol_store_base_bdev' instead."
@ -191,7 +191,7 @@ function create_bdev_subsystem_config() {
# This AIO bdev must be large enough to be used as LVOL store
dd if=/dev/zero of=/tmp/sample_aio bs=1024 count=102400
tgt_rpc bdev_aio_create /tmp/sample_aio aio_disk 1024
expected_notifications+=( bdev_register:aio_disk )
expected_notifications+=(bdev_register:aio_disk)
fi
# For LVOLs use split to check for proper order of initialization.
@ -231,13 +231,13 @@ function create_bdev_subsystem_config() {
rm -f $pmem_pool_file
tgt_rpc create_pmem_pool $pmem_pool_file 128 4096
tgt_rpc bdev_pmem_create -n pmem1 $pmem_pool_file
expected_notifications+=( bdev_register:pmem1 )
expected_notifications+=(bdev_register:pmem1)
fi
if [[ $SPDK_TEST_RBD -eq 1 ]]; then
rbd_setup 127.0.0.1
tgt_rpc bdev_rbd_create $RBD_POOL $RBD_NAME 4096
expected_notifications+=( bdev_register:Ceph0 )
expected_notifications+=(bdev_register:Ceph0)
fi
tgt_check_notifications "${expected_notifications[@]}"
@ -285,9 +285,9 @@ function create_vhost_subsystem_config() {
tgt_rpc vhost_create_blk_controller VhostBlkCtrlr0 MallocForVhost0p5
# FIXME: enable after vhost-nvme is properly implemented against the latest rte_vhost (DPDK 19.05+)
# tgt_rpc vhost_create_nvme_controller VhostNvmeCtrlr0 16
# tgt_rpc vhost_nvme_controller_add_ns VhostNvmeCtrlr0 MallocForVhost0p6
# FIXME: enable after vhost-nvme is properly implemented against the latest rte_vhost (DPDK 19.05+)
# tgt_rpc vhost_create_nvme_controller VhostNvmeCtrlr0 16
# tgt_rpc vhost_nvme_controller_add_ns VhostNvmeCtrlr0 MallocForVhost0p6
timing_exit "${FUNCNAME[0]}"
}
@ -331,9 +331,7 @@ function create_virtio_initiator_config() {
timing_exit "${FUNCNAME[0]}"
}
function json_config_test_init()
{
function json_config_test_init() {
timing_enter "${FUNCNAME[0]}"
timing_enter json_config_setup_target
@ -343,7 +341,7 @@ function json_config_test_init()
# Load nvme configuration. The load_config will issue framework_start_init automatically
(
echo '{"subsystems": [';
echo '{"subsystems": ['
$rootdir/scripts/gen_nvme.sh --json | jq -r "del(.config[] | select(.params.name!=\"Nvme0\"))"
echo ']}'
) | tgt_rpc load_config
@ -412,13 +410,13 @@ function json_config_clear() {
# It causes that configuration may not be fully cleaned at this moment and
# we should to wait a while. (See github issue #789)
count=100
while [ $count -gt 0 ] ; do
while [ $count -gt 0 ]; do
$rootdir/scripts/rpc.py -s "${app_socket[$1]}" save_config | $config_filter -method delete_global_parameters | $config_filter -method check_empty && break
count=$(( count -1 ))
count=$((count - 1))
sleep 0.1
done
if [ $count -eq 0 ] ; then
if [ $count -eq 0 ]; then
return 1
fi
}
@ -465,7 +463,7 @@ fi
echo "INFO: changing configuration and checking if this can be detected..."
# Self test to check if configuration diff can be detected.
tgt_rpc bdev_malloc_delete MallocBdevForConfigChangeCheck
if $rootdir/test/json_config/json_diff.sh <(tgt_rpc save_config) "${configs_path[target]}" >/dev/null; then
if $rootdir/test/json_config/json_diff.sh <(tgt_rpc save_config) "${configs_path[target]}" > /dev/null; then
echo "ERROR: intentional configuration difference not detected!"
false
else

View File

@ -31,7 +31,7 @@ function test_construct_lvs() {
[ "$cluster_size" = "$LVS_DEFAULT_CLUSTER_SIZE" ]
total_clusters=$(jq -r '.[0].total_data_clusters' <<< "$lvs")
[ "$(jq -r '.[0].free_clusters' <<< "$lvs")" = "$total_clusters" ]
[ "$(( total_clusters * cluster_size ))" = "$LVS_DEFAULT_CAPACITY" ]
[ "$((total_clusters * cluster_size))" = "$LVS_DEFAULT_CAPACITY" ]
# remove the lvs and verify it's gone
rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid"
@ -100,11 +100,11 @@ function test_construct_lvs_different_cluster_size() {
# use the second malloc for some more lvs creation negative tests
malloc2_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
# capacity bigger than malloc's
rpc_cmd bdev_lvol_create_lvstore "$malloc2_name" lvs2_test -c $(( MALLOC_SIZE + 1 )) && false
rpc_cmd bdev_lvol_create_lvstore "$malloc2_name" lvs2_test -c $((MALLOC_SIZE + 1)) && false
# capacity equal to malloc's (no space left for metadata)
rpc_cmd bdev_lvol_create_lvstore "$malloc2_name" lvs2_test -c $MALLOC_SIZE && false
# capacity smaller than malloc's, but still no space left for metadata
rpc_cmd bdev_lvol_create_lvstore "$malloc2_name" lvs2_test -c $(( MALLOC_SIZE - 1 )) && false
rpc_cmd bdev_lvol_create_lvstore "$malloc2_name" lvs2_test -c $((MALLOC_SIZE - 1)) && false
# cluster size smaller than the minimum (8192)
rpc_cmd bdev_lvol_create_lvstore "$malloc2_name" lvs2_test -c 8191 && false
@ -154,7 +154,7 @@ function test_construct_lvs_clear_methods() {
[ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ]
[ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test/lvol_test" ]
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( LVS_DEFAULT_CAPACITY / MALLOC_BS ))" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((LVS_DEFAULT_CAPACITY / MALLOC_BS))" ]
# clean up
rpc_cmd bdev_lvol_delete "$lvol_uuid"
@ -184,7 +184,7 @@ function test_construct_lvol_fio_clear_method_none() {
-c "$clear_method" \
-u "$lvstore_uuid" \
"$lvol_name" \
$(( jq_out["cluster_size"] / 1024**2 )))
$((jq_out["cluster_size"] / 1024 ** 2)))
nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" "$nbd_name"
run_fio_test "$nbd_name" 0 "${jq_out["cluster_size"]}" write 0xdd
@ -203,12 +203,12 @@ function test_construct_lvol_fio_clear_method_none() {
metadata_pages=$(calc "1 + ${jq_out["total_data_clusters"]} + ceil(5 + ceil(${jq_out["total_data_clusters"]} / 8) / 4096) * 3")
last_metadata_lba=$(( metadata_pages * 4096 / MALLOC_BS ))
offset_metadata_end=$(( last_metadata_lba * MALLOC_BS ))
last_metadata_lba=$((metadata_pages * 4096 / MALLOC_BS))
offset_metadata_end=$((last_metadata_lba * MALLOC_BS))
last_cluster_of_metadata=$(calc "ceil($metadata_pages / ${jq_out["cluster_size"]} / 4096)")
last_cluster_of_metadata=$(( last_cluster_of_metadata == 0 ? 1 : last_cluster_of_metadata ))
offset=$(( last_cluster_of_metadata * jq_out["cluster_size"] ))
size_metadata_end=$(( offset - offset_metadata_end ))
last_cluster_of_metadata=$((last_cluster_of_metadata == 0 ? 1 : last_cluster_of_metadata))
offset=$((last_cluster_of_metadata * jq_out["cluster_size"]))
size_metadata_end=$((offset - offset_metadata_end))
# Check if data on area between end of metadata and first cluster of lvol bdev remained unchaged.
run_fio_test "$nbd_name" "$offset_metadata_end" "$size_metadata_end" "read" 0x00
@ -233,7 +233,7 @@ function test_construct_lvol_fio_clear_method_unmap() {
malloc_dev=$(rpc_cmd bdev_malloc_create 256 "$MALLOC_BS")
nbd_start_disks "$DEFAULT_RPC_ADDR" "$malloc_dev" "$nbd_name"
run_fio_test "$nbd_name" 0 $(( 256 * 1024**2 )) write 0xdd
run_fio_test "$nbd_name" 0 $((256 * 1024 ** 2)) write 0xdd
nbd_stop_disks "$DEFAULT_RPC_ADDR" "$nbd_name"
lvstore_uuid=$(rpc_cmd bdev_lvol_create_lvstore --clear-method none "$malloc_dev" "$lvstore_name")
@ -243,7 +243,7 @@ function test_construct_lvol_fio_clear_method_unmap() {
-c "$clear_method" \
-u "$lvstore_uuid" \
"$lvol_name" \
$(( jq_out["cluster_size"] / 1024**2 )))
$((jq_out["cluster_size"] / 1024 ** 2)))
nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" "$nbd_name"
run_fio_test "$nbd_name" 0 "${jq_out["cluster_size"]}" read 0xdd
@ -262,12 +262,12 @@ function test_construct_lvol_fio_clear_method_unmap() {
metadata_pages=$(calc "1 + ${jq_out["total_data_clusters"]} + ceil(5 + ceil(${jq_out["total_data_clusters"]} / 8) / 4096) * 3")
last_metadata_lba=$(( metadata_pages * 4096 / MALLOC_BS ))
offset_metadata_end=$(( last_metadata_lba * MALLOC_BS ))
last_metadata_lba=$((metadata_pages * 4096 / MALLOC_BS))
offset_metadata_end=$((last_metadata_lba * MALLOC_BS))
last_cluster_of_metadata=$(calc "ceil($metadata_pages / ${jq_out["cluster_size"]} / 4096)")
last_cluster_of_metadata=$(( last_cluster_of_metadata == 0 ? 1 : last_cluster_of_metadata ))
offset=$(( last_cluster_of_metadata * jq_out["cluster_size"] ))
size_metadata_end=$(( offset - offset_metadata_end ))
last_cluster_of_metadata=$((last_cluster_of_metadata == 0 ? 1 : last_cluster_of_metadata))
offset=$((last_cluster_of_metadata * jq_out["cluster_size"]))
size_metadata_end=$((offset - offset_metadata_end))
# Check if data on area between end of metadata and first cluster of lvol bdev remained unchaged.
run_fio_test "$nbd_name" "$offset_metadata_end" "$size_metadata_end" "read" 0xdd
@ -294,7 +294,7 @@ function test_construct_lvol() {
[ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ]
[ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test/lvol_test" ]
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( LVS_DEFAULT_CAPACITY / MALLOC_BS ))" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((LVS_DEFAULT_CAPACITY / MALLOC_BS))" ]
[ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid" ]
# clean up and create another lvol, this time use lvs alias instead of uuid
@ -307,7 +307,7 @@ function test_construct_lvol() {
[ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ]
[ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test/lvol_test" ]
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( LVS_DEFAULT_CAPACITY / MALLOC_BS ))" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((LVS_DEFAULT_CAPACITY / MALLOC_BS))" ]
[ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid" ]
# clean up
@ -326,10 +326,10 @@ function test_construct_multi_lvols() {
lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
# create 4 lvols
lvol_size_mb=$(( LVS_DEFAULT_CAPACITY_MB / 4 ))
lvol_size_mb=$((LVS_DEFAULT_CAPACITY_MB / 4))
# round down lvol size to the nearest cluster size boundary
lvol_size_mb=$(( lvol_size_mb / LVS_DEFAULT_CLUSTER_SIZE_MB * LVS_DEFAULT_CLUSTER_SIZE_MB ))
lvol_size=$(( lvol_size_mb * 1024 * 1024 ))
lvol_size_mb=$((lvol_size_mb / LVS_DEFAULT_CLUSTER_SIZE_MB * LVS_DEFAULT_CLUSTER_SIZE_MB))
lvol_size=$((lvol_size_mb * 1024 * 1024))
for i in $(seq 1 4); do
lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" "lvol_test${i}" "$lvol_size_mb")
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
@ -338,7 +338,7 @@ function test_construct_multi_lvols() {
[ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ]
[ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test/lvol_test${i}" ]
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol_size / MALLOC_BS ))" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
done
lvols=$(rpc_cmd bdev_get_bdevs | jq -r '[ .[] | select(.product_name == "Logical Volume") ]')
@ -361,7 +361,7 @@ function test_construct_multi_lvols() {
[ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ]
[ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test/lvol_test${i}" ]
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol_size / MALLOC_BS ))" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
done
lvols=$(rpc_cmd bdev_get_bdevs | jq -r '[ .[] | select(.product_name == "Logical Volume") ]')
@ -394,13 +394,13 @@ function test_construct_lvols_conflict_alias() {
lvol1=$(rpc_cmd bdev_get_bdevs -b "$lvol1_uuid")
# use a different size for second malloc to keep those differentiable
malloc2_size_mb=$(( MALLOC_SIZE_MB / 2 ))
malloc2_size_mb=$((MALLOC_SIZE_MB / 2))
# create an lvol store 2
malloc2_name=$(rpc_cmd bdev_malloc_create $malloc2_size_mb $MALLOC_BS)
lvs2_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc2_name" lvs_test2)
lvol2_size_mb=$(round_down $(( LVS_DEFAULT_CAPACITY_MB / 2 )) )
lvol2_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 2)))
# create an lvol on lvs2
lvol2_uuid=$(rpc_cmd bdev_lvol_create -l lvs_test2 lvol_test "$lvol2_size_mb")
@ -474,7 +474,7 @@ function test_construct_lvol_alias_conflict() {
lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
# create valid lvol
lvol_size_mb=$(round_down $(( LVS_DEFAULT_CAPACITY_MB / 2 )) )
lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 2)))
lvol1_uuid=$(rpc_cmd bdev_lvol_create -l lvs_test lvol_test "$lvol_size_mb")
lvol1=$(rpc_cmd bdev_get_bdevs -b "$lvol1_uuid")
@ -500,8 +500,8 @@ function test_construct_nested_lvol() {
# create a nested lvs
nested_lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$lvol_uuid" nested_lvs)
nested_lvol_size_mb=$(( LVS_DEFAULT_CAPACITY_MB - LVS_DEFAULT_CLUSTER_SIZE_MB ))
nested_lvol_size=$(( nested_lvol_size_mb * 1024 * 1024 ))
nested_lvol_size_mb=$((LVS_DEFAULT_CAPACITY_MB - LVS_DEFAULT_CLUSTER_SIZE_MB))
nested_lvol_size=$((nested_lvol_size_mb * 1024 * 1024))
# create a nested lvol
nested_lvol1_uuid=$(rpc_cmd bdev_lvol_create -u "$nested_lvs_uuid" nested_lvol1 "$nested_lvol_size_mb")
@ -511,7 +511,7 @@ function test_construct_nested_lvol() {
[ "$(jq -r '.[0].uuid' <<< "$nested_lvol1")" = "$nested_lvol1_uuid" ]
[ "$(jq -r '.[0].aliases[0]' <<< "$nested_lvol1")" = "nested_lvs/nested_lvol1" ]
[ "$(jq -r '.[0].block_size' <<< "$nested_lvol1")" = "$MALLOC_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$nested_lvol1")" = "$(( nested_lvol_size / MALLOC_BS ))" ]
[ "$(jq -r '.[0].num_blocks' <<< "$nested_lvol1")" = "$((nested_lvol_size / MALLOC_BS))" ]
[ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$nested_lvol1")" = "$nested_lvs_uuid" ]
# try to create another nested lvol on a lvs that's already full

View File

@ -3,10 +3,10 @@ MALLOC_BS=512
AIO_SIZE_MB=400
AIO_BS=4096
LVS_DEFAULT_CLUSTER_SIZE_MB=4
LVS_DEFAULT_CLUSTER_SIZE=$(( LVS_DEFAULT_CLUSTER_SIZE_MB * 1024 * 1024 ))
LVS_DEFAULT_CLUSTER_SIZE=$((LVS_DEFAULT_CLUSTER_SIZE_MB * 1024 * 1024))
# reserve some MBs for lvolstore metadata
LVS_DEFAULT_CAPACITY_MB=$(( MALLOC_SIZE_MB - LVS_DEFAULT_CLUSTER_SIZE_MB ))
LVS_DEFAULT_CAPACITY=$(( LVS_DEFAULT_CAPACITY_MB * 1024 * 1024 ))
LVS_DEFAULT_CAPACITY_MB=$((MALLOC_SIZE_MB - LVS_DEFAULT_CLUSTER_SIZE_MB))
LVS_DEFAULT_CAPACITY=$((LVS_DEFAULT_CAPACITY_MB * 1024 * 1024))
function get_bdev_jq() {
rpc_cmd_simple_data_json bdev "$@"
@ -28,7 +28,7 @@ function round_down() {
if [ -n "$2" ]; then
CLUSTER_SIZE_MB=$2
fi
echo $(( $1 / CLUSTER_SIZE_MB * CLUSTER_SIZE_MB ))
echo $(($1 / CLUSTER_SIZE_MB * CLUSTER_SIZE_MB))
}
function run_fio_test() {

View File

@ -38,7 +38,7 @@ function test_hotremove_lvol_store_multiple_lvols() {
lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
# calculate lvol size
lvol_size_mb=$( round_down $(( (MALLOC_SIZE_MB- LVS_DEFAULT_CLUSTER_SIZE_MB) / 4 )) )
lvol_size_mb=$(round_down $(((MALLOC_SIZE_MB - LVS_DEFAULT_CLUSTER_SIZE_MB) / 4)))
# create 4 lvols
for i in $(seq 1 4); do
@ -115,7 +115,7 @@ function test_bdev_lvol_delete_lvstore_with_clones() {
[[ ${jq_out["name"]} == "$lvstore_name" ]]
[[ ${jq_out["base_bdev"]} == "$malloc_dev" ]]
size=$(( jq_out["free_clusters"] * jq_out["cluster_size"] / 4 / 1024**2 ))
size=$((jq_out["free_clusters"] * jq_out["cluster_size"] / 4 / 1024 ** 2))
bdev_uuid=$(rpc_cmd bdev_lvol_create -u "$lvstore_uuid" "$lbd_name" "$size")
@ -169,7 +169,7 @@ function test_unregister_lvol_bdev() {
[[ ${jq_out["name"]} == "$lvstore_name" ]]
[[ ${jq_out["base_bdev"]} == "$malloc_dev" ]]
size=$(( jq_out["free_clusters"] * jq_out["cluster_size"] / 4 / 1024**2 ))
size=$((jq_out["free_clusters"] * jq_out["cluster_size"] / 4 / 1024 ** 2))
bdev_uuid=$(rpc_cmd bdev_lvol_create -u "$lvstore_uuid" "$lbd_name" "$size")

View File

@ -13,8 +13,8 @@ function test_rename_positive() {
bdev_aliases=("lvs_test/lvol_test"{0..3})
# Calculate size and create two lvol bdevs on top
lvol_size_mb=$( round_down $(( LVS_DEFAULT_CAPACITY_MB / 4 )) )
lvol_size=$(( lvol_size_mb * 1024 * 1024 ))
lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 4)))
lvol_size=$((lvol_size_mb * 1024 * 1024))
# Create 4 lvol bdevs on top of previously created lvol store
bdev_uuids=()
@ -23,7 +23,7 @@ function test_rename_positive() {
lvol=$(rpc_cmd bdev_get_bdevs -b $lvol_uuid)
[ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid" ]
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol_size / MALLOC_BS ))" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
[ "$(jq '.[0].aliases|sort' <<< "$lvol")" = "$(jq '.|sort' <<< '["'${bdev_aliases[i]}'"]')" ]
bdev_uuids+=("$lvol_uuid")
done
@ -46,13 +46,13 @@ function test_rename_positive() {
cluster_size=$(jq -r '.[0].cluster_size' <<< "$lvs")
[ "$cluster_size" = "$LVS_DEFAULT_CLUSTER_SIZE" ]
total_clusters=$(jq -r '.[0].total_data_clusters' <<< "$lvs")
[ "$(( total_clusters * cluster_size ))" = "$LVS_DEFAULT_CAPACITY" ]
[ "$((total_clusters * cluster_size))" = "$LVS_DEFAULT_CAPACITY" ]
for i in "${!bdev_uuids[@]}"; do
lvol=$(rpc_cmd bdev_get_bdevs -b "${bdev_uuids[i]}")
[ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid" ]
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol_size / MALLOC_BS ))" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
[ "$(jq -r '.[0].aliases|sort' <<< "$lvol")" = "$(jq '.|sort' <<< '["'${bdev_aliases[i]}'"]')" ]
done
@ -68,7 +68,7 @@ function test_rename_positive() {
lvol=$(rpc_cmd bdev_get_bdevs -b "${bdev_uuids[i]}")
[ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid" ]
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol_size / MALLOC_BS ))" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
[ "$(jq -r '.[0].aliases|sort' <<< "$lvol")" = "$(jq '.|sort' <<< '["'${new_bdev_aliases[i]}'"]')" ]
done
@ -104,8 +104,8 @@ function test_rename_lvs_negative() {
bdev_aliases_2=("lvs_test2/lvol_test_2_"{0..3})
# Calculate size and create two lvol bdevs on top
lvol_size_mb=$( round_down $(( LVS_DEFAULT_CAPACITY_MB / 4 )) )
lvol_size=$(( lvol_size_mb * 1024 * 1024 ))
lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 4)))
lvol_size=$((lvol_size_mb * 1024 * 1024))
# # Create 4 lvol bdevs on top of each lvol store
bdev_uuids_1=()
@ -115,7 +115,7 @@ function test_rename_lvs_negative() {
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
[ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid1" ]
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol_size / MALLOC_BS ))" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
[ "$(jq '.[0].aliases|sort' <<< "$lvol")" = "$(jq '.|sort' <<< '["'${bdev_aliases_1[i]}'"]')" ]
bdev_uuids_1+=("$lvol_uuid")
@ -123,7 +123,7 @@ function test_rename_lvs_negative() {
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
[ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid2" ]
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol_size / MALLOC_BS ))" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
[ "$(jq '.[0].aliases|sort' <<< "$lvol")" = "$(jq '.|sort' <<< '["'${bdev_aliases_2[i]}'"]')" ]
bdev_uuids_2+=("$lvol_uuid")
done
@ -148,13 +148,13 @@ function test_rename_lvs_negative() {
lvol=$(rpc_cmd bdev_get_bdevs -b "${bdev_uuids_1[i]}")
[ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid1" ]
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol_size / MALLOC_BS ))" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
[ "$(jq '.[0].aliases|sort' <<< "$lvol")" = "$(jq '.|sort' <<< '["'${bdev_aliases_1[i]}'"]')" ]
lvol=$(rpc_cmd bdev_get_bdevs -b "${bdev_uuids_2[i]}")
[ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid2" ]
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol_size / MALLOC_BS ))" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
[ "$(jq '.[0].aliases|sort' <<< "$lvol")" = "$(jq '.|sort' <<< '["'${bdev_aliases_2[i]}'"]')" ]
done
@ -181,8 +181,8 @@ function test_lvol_rename_negative() {
lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
# Calculate lvol bdev size
lvol_size_mb=$( round_down $(( LVS_DEFAULT_CAPACITY_MB / 2 )) )
lvol_size=$(( lvol_size_mb * 1024 * 1024 ))
lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 2)))
lvol_size=$((lvol_size_mb * 1024 * 1024))
# Create two lvol bdevs on top of previously created lvol store
lvol_uuid1=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test1 "$lvol_size_mb")
@ -196,7 +196,7 @@ function test_lvol_rename_negative() {
lvol=$(rpc_cmd bdev_get_bdevs -b $lvol_uuid1)
[ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid" ]
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol_size / MALLOC_BS ))" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
[ "$(jq -r '.[0].aliases|sort' <<< "$lvol")" = "$(jq '.|sort' <<< '["lvs_test/lvol_test1"]')" ]
rpc_cmd bdev_lvol_delete lvs_test/lvol_test1

View File

@ -13,8 +13,8 @@ function test_resize_lvol() {
lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
# calculate lvol size
lvol_size_mb=$( round_down $(( LVS_DEFAULT_CAPACITY_MB / 4 )) )
lvol_size=$(( lvol_size_mb * 1024 * 1024 ))
lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 4)))
lvol_size=$((lvol_size_mb * 1024 * 1024))
# create an lvol on top
lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb")
@ -23,28 +23,28 @@ function test_resize_lvol() {
[ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ]
[ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test/lvol_test" ]
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol_size / MALLOC_BS ))" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
# resize the lvol to twice its original size
lvol_size_mb=$(( lvol_size_mb * 2 ))
lvol_size=$(( lvol_size_mb * 1024 * 1024 ))
lvol_size_mb=$((lvol_size_mb * 2))
lvol_size=$((lvol_size_mb * 1024 * 1024))
rpc_cmd bdev_lvol_resize "$lvol_uuid" "$lvol_size_mb"
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol_size / MALLOC_BS ))" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
# resize the lvol to four times its original size, use its name instead of uuid
lvol_size_mb=$(( lvol_size_mb * 2 ))
lvol_size=$(( lvol_size_mb * 1024 * 1024 ))
lvol_size_mb=$((lvol_size_mb * 2))
lvol_size=$((lvol_size_mb * 1024 * 1024))
rpc_cmd bdev_lvol_resize lvs_test/lvol_test "$lvol_size_mb"
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol_size / MALLOC_BS ))" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
# resize the lvol to 0 using lvol bdev alias
lvol_size_mb=0
lvol_size=0
rpc_cmd bdev_lvol_resize "lvs_test/lvol_test" "$lvol_size_mb"
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol_size / MALLOC_BS ))" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
# clean up
rpc_cmd bdev_lvol_delete "$lvol_uuid"
@ -70,13 +70,13 @@ function test_resize_lvol_negative() {
rpc_cmd bdev_lvol_resize "$dummy_uuid" 0 && false
# just make sure the size of the real lvol did not change
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( LVS_DEFAULT_CAPACITY / MALLOC_BS ))" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((LVS_DEFAULT_CAPACITY / MALLOC_BS))" ]
# try to resize an lvol to a size bigger than lvs
rpc_cmd bdev_lvol_resize "$lvol_uuid" "$MALLOC_SIZE_MB" && false
# just make sure the size of the real lvol did not change
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( LVS_DEFAULT_CAPACITY / MALLOC_BS ))" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((LVS_DEFAULT_CAPACITY / MALLOC_BS))" ]
# clean up
rpc_cmd bdev_lvol_delete "$lvol_uuid"
@ -93,8 +93,8 @@ function test_resize_lvol_with_io_traffic() {
lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
# calculate lvol size
lvol_size_mb=$( round_down $(( LVS_DEFAULT_CAPACITY_MB / 2 )) )
lvol_size=$(( lvol_size_mb * 1024 * 1024 ))
lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 2)))
lvol_size=$((lvol_size_mb * 1024 * 1024))
# create an lvol on top
lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb")
@ -103,26 +103,26 @@ function test_resize_lvol_with_io_traffic() {
[ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ]
[ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test/lvol_test" ]
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol_size / MALLOC_BS ))" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
# prepare to do some I/O
trap 'nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0; exit 1' SIGINT SIGTERM EXIT
nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" /dev/nbd0
# write to the entire lvol
count=$(( lvol_size / LVS_DEFAULT_CLUSTER_SIZE ))
count=$((lvol_size / LVS_DEFAULT_CLUSTER_SIZE))
dd if=/dev/urandom of=/dev/nbd0 oflag=direct bs="$LVS_DEFAULT_CLUSTER_SIZE" count=$count
# writing beyond lvol size should fail
offset=$(( lvol_size / LVS_DEFAULT_CLUSTER_SIZE + 1 ))
offset=$((lvol_size / LVS_DEFAULT_CLUSTER_SIZE + 1))
dd if=/dev/urandom of=/dev/nbd0 oflag=direct bs="$LVS_DEFAULT_CLUSTER_SIZE" seek=$offset count=1 && false
# resize the lvol to twice its original size
lvol_size_mb=$(( lvol_size_mb * 2 ))
lvol_size=$(( lvol_size_mb * 1024 * 1024 ))
lvol_size_mb=$((lvol_size_mb * 2))
lvol_size=$((lvol_size_mb * 1024 * 1024))
rpc_cmd bdev_lvol_resize "$lvol_uuid" "$lvol_size_mb"
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol_size / MALLOC_BS ))" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
# writing beyond the original lvol size should now succeed, we need
# to restart NBD though as it may still use the old, cached size
@ -137,7 +137,7 @@ function test_resize_lvol_with_io_traffic() {
# resize lvol down to a single cluster
rpc_cmd bdev_lvol_resize "$lvol_uuid" "$LVS_DEFAULT_CLUSTER_SIZE_MB"
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( LVS_DEFAULT_CLUSTER_SIZE / MALLOC_BS ))" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((LVS_DEFAULT_CLUSTER_SIZE / MALLOC_BS))" ]
# make sure we can't write beyond the first cluster
trap 'nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0; exit 1' SIGINT SIGTERM EXIT
@ -168,7 +168,7 @@ function test_destroy_after_bdev_lvol_resize_positive() {
[[ ${jq_out["uuid"]} == "$lvstore_uuid" ]]
[[ ${jq_out["name"]} == "$lvstore_name" ]]
bdev_size=$(round_down $(( LVS_DEFAULT_CAPACITY_MB / 4 )))
bdev_size=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 4)))
bdev_uuid=$(rpc_cmd bdev_lvol_create -u "$lvstore_uuid" "$lbd_name" "$bdev_size")
# start resizing in the following fashion:
@ -180,19 +180,19 @@ function test_destroy_after_bdev_lvol_resize_positive() {
local resize
for resize in \
"$bdev_size" \
$(( bdev_size + 4 )) \
$(( bdev_size * 2 )) \
$(( bdev_size * 3 )) \
$(( bdev_size * 4 - 4 )) \
$((bdev_size + 4)) \
$((bdev_size * 2)) \
$((bdev_size * 3)) \
$((bdev_size * 4 - 4)) \
0; do
resize=$(round_down $(( resize / 4 )))
resize=$(round_down $((resize / 4)))
rpc_cmd bdev_lvol_resize "$bdev_uuid" "$resize"
get_bdev_jq bdev_get_bdevs -b "$bdev_uuid"
[[ ${jq_out["name"]} == "$bdev_uuid" ]]
[[ ${jq_out["name"]} == "${jq_out["uuid"]}" ]]
(( jq_out["block_size"] == MALLOC_BS ))
(( jq_out["num_blocks"] * jq_out["block_size"] == resize * 1024**2 ))
((jq_out["block_size"] == MALLOC_BS))
((jq_out["num_blocks"] * jq_out["block_size"] == resize * 1024 ** 2))
done
# cleanup

View File

@ -11,20 +11,20 @@ function test_snapshot_compare_with_lvol_bdev() {
lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
# Create two lvol bdevs
lvol_size_mb=$( round_down $(( LVS_DEFAULT_CAPACITY_MB / 6 )) )
lvol_size=$(( lvol_size_mb * 1024 * 1024 ))
lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 6)))
lvol_size=$((lvol_size_mb * 1024 * 1024))
lvol_uuid1=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test1 "$lvol_size_mb" -t)
lvol_uuid2=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test2 "$lvol_size_mb")
# Fill thin provisoned lvol bdev with 50% of its space
nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid1" /dev/nbd0
count=$(( lvol_size / LVS_DEFAULT_CLUSTER_SIZE / 2 ))
count=$((lvol_size / LVS_DEFAULT_CLUSTER_SIZE / 2))
dd if=/dev/urandom of=/dev/nbd0 oflag=direct bs="$LVS_DEFAULT_CLUSTER_SIZE" count=$count
nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
# Fill whole thick provisioned lvol bdev
nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid2" /dev/nbd0
count=$(( lvol_size / LVS_DEFAULT_CLUSTER_SIZE ))
count=$((lvol_size / LVS_DEFAULT_CLUSTER_SIZE))
dd if=/dev/urandom of=/dev/nbd0 oflag=direct bs="$LVS_DEFAULT_CLUSTER_SIZE" count=$count
nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
@ -35,7 +35,7 @@ function test_snapshot_compare_with_lvol_bdev() {
nbd_start_disks "$DEFAULT_RPC_ADDR" "$snapshot_uuid1" /dev/nbd0
# Try to perform write operation on created snapshot
# Check if filling snapshot of lvol bdev fails
count=$(( lvol_size / LVS_DEFAULT_CLUSTER_SIZE ))
count=$((lvol_size / LVS_DEFAULT_CLUSTER_SIZE))
dd if=/dev/urandom of=/dev/nbd0 oflag=direct bs="$LVS_DEFAULT_CLUSTER_SIZE" count=$count && false
nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
@ -52,7 +52,7 @@ function test_snapshot_compare_with_lvol_bdev() {
cmp "$lvol_nbd2" "$snapshot_nbd2"
# Fill second half of thin provisioned lvol bdev
count=$(( lvol_size / LVS_DEFAULT_CLUSTER_SIZE / 2 ))
count=$((lvol_size / LVS_DEFAULT_CLUSTER_SIZE / 2))
dd if=/dev/urandom of="$lvol_nbd1" oflag=direct seek=$count bs="$LVS_DEFAULT_CLUSTER_SIZE" count=$count
# Compare thin provisioned lvol bdev with its snapshot and check if it fails
@ -77,7 +77,6 @@ function test_snapshot_compare_with_lvol_bdev() {
check_leftover_devices
}
# Check that when writing to lvol bdev
# creating snapshot ends with success
function test_create_snapshot_with_io() {
@ -85,8 +84,8 @@ function test_create_snapshot_with_io() {
lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
# Create lvol bdev
lvol_size_mb=$( round_down $(( LVS_DEFAULT_CAPACITY_MB / 2 )) )
lvol_size=$(( lvol_size_mb * 1024 * 1024 ))
lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 2)))
lvol_size=$((lvol_size_mb * 1024 * 1024))
lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb" -t)
@ -111,14 +110,13 @@ function test_create_snapshot_with_io() {
check_leftover_devices
}
# Check that creating snapshot of snapshot will fail
function test_create_snapshot_of_snapshot() {
malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
# Create lvol bdev
lvol_size_mb=$( round_down $(( LVS_DEFAULT_CAPACITY_MB / 3 )) )
lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 3)))
lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb")
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
@ -149,8 +147,8 @@ function test_clone_snapshot_relations() {
lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
# Calculate size and create lvol bdev
lvol_size_mb=$( round_down $(( LVS_DEFAULT_CAPACITY_MB / 6 )) )
lvol_size=$(( lvol_size_mb * 1024 * 1024 ))
lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 6)))
lvol_size=$((lvol_size_mb * 1024 * 1024))
lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb")
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
@ -176,7 +174,7 @@ function test_clone_snapshot_relations() {
# Perform write operation to first clone
# Change first half of its space
nbd_start_disks "$DEFAULT_RPC_ADDR" "$clone_uuid1" /dev/nbd0
fill_size=$(( lvol_size / 2 ))
fill_size=$((lvol_size / 2))
run_fio_test /dev/nbd0 0 $fill_size "write" "0xaa"
# Compare snapshot with second clone. Data on both bdevs should be the same
@ -229,14 +227,14 @@ function test_clone_inflate() {
lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
# Calculate size and create lvol bdev
lvol_size_mb=$( round_down $(( LVS_DEFAULT_CAPACITY_MB / 4 )) )
lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 4)))
lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb")
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
# Fill lvol bdev with 100% of its space
nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" /dev/nbd0
run_fio_test /dev/nbd0 0 $(( lvol_size_mb * 1024 * 1024 )) "write" "0xcc"
run_fio_test /dev/nbd0 0 $((lvol_size_mb * 1024 * 1024)) "write" "0xcc"
nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
# Create snapshots of lvol bdev
@ -249,9 +247,9 @@ function test_clone_inflate() {
# Fill part of clone with data of known pattern
nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" /dev/nbd0
first_fill=0
second_fill=$(( lvol_size_mb * 1024 * 1024 * 3 / 4 ))
run_fio_test /dev/nbd0 $first_fill $(( 1024 * 1024 )) "write" "0xdd"
run_fio_test /dev/nbd0 $second_fill $(( 1024 * 1024 )) "write" "0xdd"
second_fill=$((lvol_size_mb * 1024 * 1024 * 3 / 4))
run_fio_test /dev/nbd0 $first_fill $((1024 * 1024)) "write" "0xdd"
run_fio_test /dev/nbd0 $second_fill $((1024 * 1024)) "write" "0xdd"
nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
# Do inflate
@ -264,10 +262,10 @@ function test_clone_inflate() {
# Check data consistency
nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" /dev/nbd0
run_fio_test /dev/nbd0 $first_fill $(( 1024 * 1024 )) "read" "0xdd"
run_fio_test /dev/nbd0 $(( (first_fill + 1) * 1024 * 1024 )) $(( second_fill - 1024 * 1024 )) "read" "0xcc"
run_fio_test /dev/nbd0 $second_fill $(( 1024 * 1024 )) "read" "0xdd"
run_fio_test /dev/nbd0 $(( second_fill + 1024 * 1024 )) $(( lvol_size_mb * 1024 * 1024 - ( second_fill + 1024 * 1024 ) )) "read" "0xcc"
run_fio_test /dev/nbd0 $first_fill $((1024 * 1024)) "read" "0xdd"
run_fio_test /dev/nbd0 $(((first_fill + 1) * 1024 * 1024)) $((second_fill - 1024 * 1024)) "read" "0xcc"
run_fio_test /dev/nbd0 $second_fill $((1024 * 1024)) "read" "0xdd"
run_fio_test /dev/nbd0 $((second_fill + 1024 * 1024)) $((lvol_size_mb * 1024 * 1024 - (second_fill + 1024 * 1024))) "read" "0xcc"
nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
# Clean up
@ -285,7 +283,7 @@ function test_clone_decouple_parent() {
lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
# Calculate size and create lvol bdev
lvol_size_mb=$(( 5 * LVS_DEFAULT_CLUSTER_SIZE_MB ))
lvol_size_mb=$((5 * LVS_DEFAULT_CLUSTER_SIZE_MB))
lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb" -t)
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
@ -295,17 +293,17 @@ function test_clone_decouple_parent() {
# Fill first four out of 5 clusters of clone with data of known pattern
nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" /dev/nbd0
begin_fill=0
end_fill=$(( lvol_size_mb * 4 * 1024 * 1024 / 5 ))
end_fill=$((lvol_size_mb * 4 * 1024 * 1024 / 5))
run_fio_test /dev/nbd0 $begin_fill $end_fill "write" "0xdd"
# Create snapshot (snapshot<-lvol_bdev)
snapshot_uuid=$(rpc_cmd bdev_lvol_snapshot lvs_test/lvol_test lvol_snapshot)
# Fill second and fourth cluster of clone with data of known pattern
start_fill=$(( lvol_size_mb * 1024 * 1024 / 5 ))
start_fill=$((lvol_size_mb * 1024 * 1024 / 5))
fill_range=$start_fill
run_fio_test /dev/nbd0 $start_fill $fill_range "write" "0xcc"
start_fill=$(( lvol_size_mb * 3 * 1024 * 1024 / 5 ))
start_fill=$((lvol_size_mb * 3 * 1024 * 1024 / 5))
run_fio_test /dev/nbd0 $start_fill $fill_range "write" "0xcc"
# Create snapshot (snapshot<-snapshot2<-lvol_bdev)
@ -316,9 +314,9 @@ function test_clone_decouple_parent() {
run_fio_test /dev/nbd0 $start_fill $fill_range "write" "0xee"
# Check data consistency
pattern=( "0xdd" "0xee" "0xdd" "0xcc" "0x00" )
pattern=("0xdd" "0xee" "0xdd" "0xcc" "0x00")
for i in "${!pattern[@]}"; do
start_fill=$(( lvol_size_mb * i * 1024 * 1024 / 5 ))
start_fill=$((lvol_size_mb * i * 1024 * 1024 / 5))
run_fio_test /dev/nbd0 $start_fill $fill_range "read" "${pattern[i]}"
done
@ -341,7 +339,7 @@ function test_clone_decouple_parent() {
# Check data consistency
for i in "${!pattern[@]}"; do
start_fill=$(( lvol_size_mb * i * 1024 * 1024 / 5 ))
start_fill=$((lvol_size_mb * i * 1024 * 1024 / 5))
run_fio_test /dev/nbd0 $start_fill $fill_range "read" "${pattern[i]}"
done
@ -361,7 +359,7 @@ function test_clone_decouple_parent() {
# Check data consistency
for i in "${!pattern[@]}"; do
start_fill=$(( lvol_size_mb * i * 1024 * 1024 / 5 ))
start_fill=$((lvol_size_mb * i * 1024 * 1024 / 5))
run_fio_test /dev/nbd0 $start_fill $fill_range "read" "${pattern[i]}"
done
@ -378,7 +376,7 @@ function test_lvol_bdev_readonly() {
lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
# Calculate size and create lvol bdev
lvol_size_mb=$( round_down $(( LVS_DEFAULT_CAPACITY_MB / 2 )) )
lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 2)))
lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb")
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
@ -413,8 +411,8 @@ function test_delete_snapshot_with_clone() {
lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
# Calculate size and create lvol bdev
lvol_size_mb=$( round_down $(( LVS_DEFAULT_CAPACITY_MB / 2 )) )
lvol_size=$(( lvol_size_mb * 1024 * 1024 ))
lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 2)))
lvol_size=$((lvol_size_mb * 1024 * 1024))
lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb")
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
@ -427,7 +425,7 @@ function test_delete_snapshot_with_clone() {
snapshot_uuid=$(rpc_cmd bdev_lvol_snapshot lvs_test/lvol_test lvol_snapshot)
# Fill first half of lvol bdev
half_size=$(( lvol_size / 2 - 1 ))
half_size=$((lvol_size / 2 - 1))
run_fio_test /dev/nbd0 0 $half_size "write" "0xee"
# Check if snapshot was unchanged
@ -447,7 +445,7 @@ function test_delete_snapshot_with_clone() {
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
[ "$(jq '.[].driver_specific.lvol.clone' <<< "$lvol")" = "false" ]
run_fio_test /dev/nbd0 0 $half_size "read" "0xee"
run_fio_test /dev/nbd0 $(( half_size + 1 )) $half_size "read" "0xcc"
run_fio_test /dev/nbd0 $((half_size + 1)) $half_size "read" "0xcc"
# Clean up
nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
@ -463,8 +461,8 @@ function test_delete_snapshot_with_snapshot() {
lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
# Calculate size and create lvol bdev
lvol_size_mb=$( round_down $(( LVS_DEFAULT_CAPACITY_MB / 5 )) )
lvol_size=$(( lvol_size_mb * 1024 * 1024 ))
lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 5)))
lvol_size=$((lvol_size_mb * 1024 * 1024))
lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb")
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
@ -479,9 +477,9 @@ function test_delete_snapshot_with_snapshot() {
[ "$(jq '.[].driver_specific.lvol.base_snapshot' <<< "$lvol")" = '"lvol_snapshot"' ]
# Fill second 1/3 of lvol bdev
first_part=$(( lvol_size / 3 ))
second_part=$(( lvol_size * 2 / 3 ))
run_fio_test /dev/nbd0 $first_part $(( second_part - first_part )) "write" "0xee"
first_part=$((lvol_size / 3))
second_part=$((lvol_size * 2 / 3))
run_fio_test /dev/nbd0 $first_part $((second_part - first_part)) "write" "0xee"
# Check if snapshot was unchanged
nbd_start_disks "$DEFAULT_RPC_ADDR" "$snapshot_uuid" /dev/nbd1
@ -502,22 +500,22 @@ function test_delete_snapshot_with_snapshot() {
# Verify snapshots
run_fio_test /dev/nbd1 0 $size "read" "0xcc"
nbd_start_disks "$DEFAULT_RPC_ADDR" "$snapshot_uuid2" /dev/nbd2
run_fio_test /dev/nbd2 0 $(( first_part - 1 )) "read" "0xcc"
run_fio_test /dev/nbd2 $first_part $(( second_part - first_part )) "read" "0xee"
run_fio_test /dev/nbd2 $second_part $(( lvol_size - second_part )) "read" "0xcc"
run_fio_test /dev/nbd2 0 $((first_part - 1)) "read" "0xcc"
run_fio_test /dev/nbd2 $first_part $((second_part - first_part)) "read" "0xee"
run_fio_test /dev/nbd2 $second_part $((lvol_size - second_part)) "read" "0xcc"
# Verify lvol bdev
run_fio_test /dev/nbd0 $first_part $(( second_part - first_part )) "read" "0xee"
run_fio_test /dev/nbd0 $second_part $(( lvol_size - second_part )) "read" "0xcc"
run_fio_test /dev/nbd0 $first_part $((second_part - first_part)) "read" "0xee"
run_fio_test /dev/nbd0 $second_part $((lvol_size - second_part)) "read" "0xcc"
[ "$(jq '.[].driver_specific.lvol.clone' <<< "$lvol")" = "true" ]
[ "$(jq '.[].driver_specific.lvol.base_snapshot' <<< "$lvol")" = '"lvol_snapshot2"' ]
# Fill third part of lvol bdev
run_fio_test /dev/nbd0 $second_part $(( lvol_size - second_part )) "write" "0xdd"
run_fio_test /dev/nbd0 $second_part $((lvol_size - second_part)) "write" "0xdd"
# Verify snapshots
run_fio_test /dev/nbd1 0 $size "read" "0xcc"
run_fio_test /dev/nbd0 $second_part $(( lvol_size - second_part )) "read" "0xdd"
run_fio_test /dev/nbd0 $second_part $((lvol_size - second_part)) "read" "0xdd"
nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd2
nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd1
@ -530,8 +528,8 @@ function test_delete_snapshot_with_snapshot() {
[ "$(jq '.[].driver_specific.lvol.clone' <<< "$lvol")" = "true" ]
[ "$(jq '.[].driver_specific.lvol.base_snapshot' <<< "$lvol")" = '"lvol_snapshot"' ]
[ "$(jq '.[].driver_specific.lvol.clones|sort' <<< "$snapshot")" = "$(jq '.|sort' <<< '["lvol_test"]')" ]
run_fio_test /dev/nbd0 $first_part $(( second_part - first_part )) "read" "0xee"
run_fio_test /dev/nbd0 $second_part $(( lvol_size - second_part )) "read" "0xdd"
run_fio_test /dev/nbd0 $first_part $((second_part - first_part)) "read" "0xee"
run_fio_test /dev/nbd0 $second_part $((lvol_size - second_part)) "read" "0xdd"
# Clean up
nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
@ -561,7 +559,7 @@ function test_bdev_lvol_delete_ordering() {
[[ ${jq_out["name"]} == "$lvstore_name" ]]
[[ ${jq_out["base_bdev"]} == "$malloc_dev" ]]
size=$(( jq_out["free_clusters"] * jq_out["cluster_size"] / 4 / 1024**2 ))
size=$((jq_out["free_clusters"] * jq_out["cluster_size"] / 4 / 1024 ** 2))
bdev_uuid=$(rpc_cmd bdev_lvol_create -t -u "$lvstore_uuid" "$lbd_name" "$size")

View File

@ -24,14 +24,14 @@ function test_tasting() {
rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid" && false
# Create a valid lvs
lvs1_cluster_size=$(( 1 * 1024 * 1024 ))
lvs2_cluster_size=$(( 32 * 1024 * 1024 ))
lvs1_cluster_size=$((1 * 1024 * 1024))
lvs2_cluster_size=$((32 * 1024 * 1024))
lvs_uuid1=$(rpc_cmd bdev_lvol_create_lvstore aio_bdev0 lvs_test1 -c $lvs1_cluster_size)
lvs_uuid2=$(rpc_cmd bdev_lvol_create_lvstore aio_bdev1 lvs_test2 -c $lvs2_cluster_size)
# Create 5 lvols on first lvs
lvol_size_mb=$(round_down $(( LVS_DEFAULT_CAPACITY_MB / 10 )))
lvol_size=$(( lvol_size_mb * 1024 * 1024 ))
lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 10)))
lvol_size=$((lvol_size_mb * 1024 * 1024))
for i in $(seq 1 5); do
lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid1" "lvol_test${i}" "$lvol_size_mb")
@ -41,12 +41,12 @@ function test_tasting() {
[ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ]
[ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test1/lvol_test${i}" ]
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$AIO_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol_size / AIO_BS ))" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / AIO_BS))" ]
done
# Create 5 lvols on second lvs
lvol2_size_mb=$(round_down $(( ( AIO_SIZE_MB - 16 ) / 5 )) 32)
lvol2_size=$(( lvol2_size_mb * 1024 * 1024 ))
lvol2_size_mb=$(round_down $(((AIO_SIZE_MB - 16) / 5)) 32)
lvol2_size=$((lvol2_size_mb * 1024 * 1024))
for i in $(seq 1 5); do
lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid2" "lvol_test${i}" "$lvol2_size_mb")
@ -56,7 +56,7 @@ function test_tasting() {
[ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ]
[ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test2/lvol_test${i}" ]
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$AIO_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol2_size / AIO_BS ))" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol2_size / AIO_BS))" ]
done
old_lvols=$(rpc_cmd bdev_get_bdevs | jq -r '[ .[] | select(.product_name == "Logical Volume") ]')
@ -78,11 +78,11 @@ function test_tasting() {
new_lvols=$(rpc_cmd bdev_get_bdevs | jq -r '[ .[] | select(.product_name == "Logical Volume") ]')
[ "$(jq length <<< "$new_lvols")" == "10" ]
new_lvs=$(rpc_cmd bdev_lvol_get_lvstores | jq .)
if ! diff <(jq '. | sort' <<<"$old_lvs") <(jq '. | sort' <<<"$new_lvs"); then
if ! diff <(jq '. | sort' <<< "$old_lvs") <(jq '. | sort' <<< "$new_lvs"); then
echo "ERROR: old and loaded lvol store is not the same"
return 1
fi
if ! diff <(jq '. | sort' <<<"$old_lvols") <(jq '. | sort' <<<"$new_lvols"); then
if ! diff <(jq '. | sort' <<< "$old_lvols") <(jq '. | sort' <<< "$new_lvols"); then
echo "ERROR: old and loaded lvols are not the same"
return 1
fi
@ -96,7 +96,7 @@ function test_tasting() {
[ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ]
[ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test1/lvol_test${i}" ]
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$AIO_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol_size / AIO_BS ))" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / AIO_BS))" ]
done
for i in $(seq 1 10); do
@ -132,7 +132,7 @@ function test_delete_lvol_store_persistent_positive() {
get_bdev_jq bdev_get_bdevs -b "$bdev_aio_name"
[[ ${jq_out["name"]} == "$bdev_aio_name" ]]
[[ ${jq_out["product_name"]} == "AIO disk" ]]
(( jq_out["block_size"] == bdev_block_size ))
((jq_out["block_size"] == bdev_block_size))
lvstore_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$bdev_aio_name" "$lvstore_name")

View File

@ -15,7 +15,7 @@ function test_thin_lvol_check_space() {
free_clusters_start="$(jq -r '.[0].free_clusters' <<< "$lvs")"
# Create thin provision lvol bdev with size equals to lvol store space
lvol_size_mb=$( round_down $(( LVS_DEFAULT_CAPACITY_MB )) )
lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB)))
lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb" -t)
lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid")
@ -28,31 +28,31 @@ function test_thin_lvol_check_space() {
run_fio_test /dev/nbd0 0 $size "write" "0xcc"
lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid")
free_clusters_first_fio="$(jq -r '.[0].free_clusters' <<< "$lvs")"
[ $(( free_clusters_first_fio + 1 )) == $free_clusters_start ]
[ $((free_clusters_first_fio + 1)) == $free_clusters_start ]
# Write data (lvs cluster size) to lvol bdev with offset set to one and half of cluster size
offset=$(( LVS_DEFAULT_CLUSTER_SIZE * 3 / 2 ))
offset=$((LVS_DEFAULT_CLUSTER_SIZE * 3 / 2))
size=$LVS_DEFAULT_CLUSTER_SIZE
run_fio_test /dev/nbd0 $offset $size "write" "0xcc"
lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid")
free_clusters_second_fio="$(jq -r '.[0].free_clusters' <<< "$lvs")"
[ $(( free_clusters_second_fio + 3 )) == $free_clusters_start ]
[ $((free_clusters_second_fio + 3)) == $free_clusters_start ]
# write data to lvol bdev to the end of its size
size=$(( LVS_DEFAULT_CLUSTER_SIZE * free_clusters_first_fio ))
offset=$(( 3 * LVS_DEFAULT_CLUSTER_SIZE ))
size=$((LVS_DEFAULT_CLUSTER_SIZE * free_clusters_first_fio))
offset=$((3 * LVS_DEFAULT_CLUSTER_SIZE))
run_fio_test /dev/nbd0 $offset $size "write" "0xcc"
lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid")
# Check that lvol store free clusters number equals to 0
free_clusters_third_fio="$(jq -r '.[0].free_clusters' <<< "$lvs")"
[ $(( free_clusters_third_fio )) == 0 ]
[ $((free_clusters_third_fio)) == 0 ]
nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
rpc_cmd bdev_lvol_delete "$lvol_uuid"
rpc_cmd bdev_get_bdevs -b "$lvol_uuid" && false
lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid")
free_clusters_end="$(jq -r '.[0].free_clusters' <<< "$lvs")"
[ $(( free_clusters_end )) == $free_clusters_start ]
[ $((free_clusters_end)) == $free_clusters_start ]
# Clean up
rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid"
@ -71,10 +71,10 @@ function test_thin_lvol_check_zeroes() {
# Create thick and thin provisioned lvol bdevs with size equals to lvol store space
lbd_name0=lvol_test0
lbd_name1=lvol_test1
lvol_size_mb=$(( LVS_DEFAULT_CAPACITY_MB ))
lvol_size_mb=$((LVS_DEFAULT_CAPACITY_MB))
# Round down lvol size to the nearest cluster size boundary
lvol_size_mb=$(( lvol_size_mb / LVS_DEFAULT_CLUSTER_SIZE_MB * LVS_DEFAULT_CLUSTER_SIZE_MB ))
lvol_size=$(( lvol_size_mb * 1024 * 1024 ))
lvol_size_mb=$((lvol_size_mb / LVS_DEFAULT_CLUSTER_SIZE_MB * LVS_DEFAULT_CLUSTER_SIZE_MB))
lvol_size=$((lvol_size_mb * 1024 * 1024))
lvol_uuid0=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" $lbd_name0 "$lvol_size_mb")
lvol_uuid1=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" $lbd_name1 "$lvol_size_mb" -t)
@ -104,10 +104,10 @@ function test_thin_lvol_check_integrity() {
lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
# Create thin provisioned lvol bdev with size equals to lvol store space
lvol_size_mb=$(( LVS_DEFAULT_CAPACITY_MB ))
lvol_size_mb=$((LVS_DEFAULT_CAPACITY_MB))
# Round down lvol size to the nearest cluster size boundary
lvol_size_mb=$(( lvol_size_mb / LVS_DEFAULT_CLUSTER_SIZE_MB * LVS_DEFAULT_CLUSTER_SIZE_MB ))
lvol_size=$(( lvol_size_mb * 1024 * 1024 ))
lvol_size_mb=$((lvol_size_mb / LVS_DEFAULT_CLUSTER_SIZE_MB * LVS_DEFAULT_CLUSTER_SIZE_MB))
lvol_size=$((lvol_size_mb * 1024 * 1024))
lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb" -t)
nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" /dev/nbd0
@ -127,8 +127,8 @@ function test_thin_lvol_resize() {
# Construct thin provisioned lvol bdevs on created lvol store
# with size equal to 50% of lvol store
lvol_size_mb=$( round_down $(( LVS_DEFAULT_CAPACITY_MB / 2 )) )
lvol_size=$(( lvol_size_mb * 1024 * 1024 ))
lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 2)))
lvol_size=$((lvol_size_mb * 1024 * 1024))
lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb" -t)
# Fill all free space of lvol bdev with data
@ -140,15 +140,15 @@ function test_thin_lvol_resize() {
lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid")
free_clusters_start="$(jq -r '.[0].free_clusters' <<< "$lvs")"
# Resize bdev to full size of lvs
lvol_size_full_mb=$( round_down $(( LVS_DEFAULT_CAPACITY_MB )) )
lvol_size_full=$(( lvol_size_full_mb * 1024 * 1024 ))
lvol_size_full_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB)))
lvol_size_full=$((lvol_size_full_mb * 1024 * 1024))
rpc_cmd bdev_lvol_resize $lvol_uuid $lvol_size_full_mb
# Check if bdev size changed (total_data_clusters*cluster_size
# equals to num_blocks*block_size)
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = $(( lvol_size_full / MALLOC_BS )) ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = $((lvol_size_full / MALLOC_BS)) ]
# Check if free_clusters on lvs remain unaffected
lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid")
@ -167,13 +167,13 @@ function test_thin_lvol_resize() {
[ $free_clusters_start == 0 ]
# Resize bdev to 25% of lvs and check if it ended with success
lvol_size_quarter_mb=$( round_down $(( LVS_DEFAULT_CAPACITY_MB / 4 )) )
lvol_size_quarter_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 4)))
rpc_cmd bdev_lvol_resize $lvol_uuid $lvol_size_quarter_mb
# Check free clusters on lvs
lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid")
free_clusters_resize_quarter="$(jq -r '.[0].free_clusters' <<< "$lvs")"
free_clusters_expected=$(( (lvol_size_full_mb - lvol_size_quarter_mb) / LVS_DEFAULT_CLUSTER_SIZE_MB ))
free_clusters_expected=$(((lvol_size_full_mb - lvol_size_quarter_mb) / LVS_DEFAULT_CLUSTER_SIZE_MB))
[ $free_clusters_resize_quarter == $free_clusters_expected ]
rpc_cmd bdev_lvol_delete "$lvol_uuid"
@ -187,16 +187,16 @@ function test_thin_overprovisioning() {
# Construct two thin provisioned lvol bdevs on created lvol store
# with size equal to free lvol store size
lvol_size_mb=$( round_down $(( LVS_DEFAULT_CAPACITY_MB )) )
lvol_size=$(( lvol_size_mb * 1024 * 1024 ))
lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB)))
lvol_size=$((lvol_size_mb * 1024 * 1024))
lvol_uuid1=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test1 "$lvol_size_mb" -t)
lvol_uuid2=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test2 "$lvol_size_mb" -t)
nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid1" /dev/nbd0
nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid2" /dev/nbd1
# Fill first bdev to 50% of its space with specific pattern
fill_size=$(( lvol_size_mb * 5 / 10 / LVS_DEFAULT_CLUSTER_SIZE_MB * LVS_DEFAULT_CLUSTER_SIZE_MB ))
fill_size=$(( fill_size * 1024 * 1024))
fill_size=$((lvol_size_mb * 5 / 10 / LVS_DEFAULT_CLUSTER_SIZE_MB * LVS_DEFAULT_CLUSTER_SIZE_MB))
fill_size=$((fill_size * 1024 * 1024))
run_fio_test /dev/nbd0 0 $fill_size "write" "0xcc"
# Fill second bdev up to 50% of its space
@ -205,7 +205,7 @@ function test_thin_overprovisioning() {
# Fill rest of second bdev
# Check that error message occured while filling second bdev with data
offset=$fill_size
fill_size_rest=$(( lvol_size - fill_size ))
fill_size_rest=$((lvol_size - fill_size))
run_fio_test /dev/nbd1 "$offset" "$fill_size_rest" "write" "0xcc" && false
# Check if data on first disk stayed unchanged

View File

@ -26,7 +26,7 @@ function confirm_abi_deps() {
return 1
fi
cat <<EOF > ${suppression_file}
cat << EOF > ${suppression_file}
[suppress_variable]
name = SPDK_LOG_BDEV
[suppress_variable]
@ -107,7 +107,7 @@ EOF
continue
fi
processed_so=$((processed_so+1))
processed_so=$((processed_so + 1))
done
rm -f $suppression_file
echo "Processed $processed_so objects."
@ -131,7 +131,7 @@ function replace_defined_variables() {
for dep in "${bad_values[@]}"; do
dep_def_arr=($(grep -v "#" $libdeps_file | grep "${dep}" | cut -d "=" -f 2 | xargs))
new_values=($(replace_defined_variables "${dep_def_arr[@]}"))
good_values=( "${good_values[@]}" "${new_values[@]}" )
good_values=("${good_values[@]}" "${new_values[@]}")
done
echo ${good_values[*]}
}
@ -175,9 +175,9 @@ function confirm_deps() {
done
IFS=$'\n'
# Ignore any event_* dependencies. Those are based on the subsystem configuration and not readelf.
lib_make_deps=( $(printf "%s\n" "${lib_make_deps[@]}" | sort | grep -v "event_") )
lib_make_deps=($(printf "%s\n" "${lib_make_deps[@]}" | sort | grep -v "event_"))
# Ignore the env_dpdk readelf dependency. We don't want people explicitly linking against it.
dep_names=( $(printf "%s\n" "${dep_names[@]}" | sort | uniq | grep -v "env_dpdk") )
dep_names=($(printf "%s\n" "${dep_names[@]}" | sort | uniq | grep -v "env_dpdk"))
unset IFS
diff=$(echo "${dep_names[@]}" "${lib_make_deps[@]}" | tr ' ' '\n' | sort | uniq -u)
if [ "$diff" != "" ]; then
@ -224,7 +224,10 @@ if grep -q 'CONFIG_VHOST_INTERNAL_LIB?=n' $rootdir/mk/config.mk; then
IGNORED_LIBS+=("rte_vhost")
fi
( for lib in $SPDK_LIBS; do confirm_deps $lib & done; wait )
(
for lib in $SPDK_LIBS; do confirm_deps $lib & done
wait
)
$MAKE $MAKEFLAGS clean
git checkout "$rootdir/mk/spdk.lib.mk"

View File

@ -110,7 +110,10 @@ ssh_vm 'echo ready'
timing_exit wait_for_vm
timing_enter copy_repo
(cd "$rootdir"; tar -cf - .) | (ssh_vm 'tar -xf -')
(
cd "$rootdir"
tar -cf - .
) | (ssh_vm 'tar -xf -')
timing_exit copy_repo
devices_initialization

View File

@ -48,7 +48,7 @@ trap 'killprocess $example_pid; exit 1' SIGINT SIGTERM EXIT
i=0
while ! grep "Starting I/O" log.txt; do
[ $i -lt 20 ] || break
i=$((i+1))
i=$((i + 1))
sleep 1
done

View File

@ -5,7 +5,7 @@ rootdir=$(readlink -f $testdir/../..)
source $rootdir/scripts/common.sh
source $rootdir/test/common/autotest_common.sh
function nvme_identify {
function nvme_identify() {
$rootdir/examples/nvme/identify/identify -i 0
for bdf in $(get_nvme_bdfs); do
$rootdir/examples/nvme/identify/identify -r "trtype:PCIe traddr:${bdf}" -i 0
@ -13,7 +13,7 @@ function nvme_identify {
timing_exit identify
}
function nvme_perf {
function nvme_perf() {
# enable no shutdown notification option
$rootdir/examples/nvme/perf/perf -q 128 -w read -o 12288 -t 1 -LL -i 0 -N
$rootdir/examples/nvme/perf/perf -q 128 -w write -o 12288 -t 1 -LL -i 0
@ -23,7 +23,7 @@ function nvme_perf {
fi
}
function nvme_fio_test {
function nvme_fio_test() {
PLUGIN_DIR=$rootdir/examples/nvme/fio_plugin
for bdf in $(get_nvme_bdfs); do
for blkname in $(get_nvme_name_from_bdf $bdf); do
@ -32,7 +32,7 @@ function nvme_fio_test {
done
}
function nvme_multi_secondary {
function nvme_multi_secondary() {
$rootdir/examples/nvme/perf/perf -i 0 -q 16 -w read -o 4096 -t 3 -c 0x1 &
pid0=$!
$rootdir/examples/nvme/perf/perf -i 0 -q 16 -w read -o 4096 -t 3 -c 0x2 &

View File

@ -17,7 +17,7 @@ KERNEL_ENGINES=(
["kernel-libaio"]="--ioengine=libaio"
["kernel-classic-polling"]="--ioengine=pvsync2 --hipri=100"
["kernel-hybrid-polling"]="--ioengine=pvsync2 --hipri=100"
["kernel-io-uring"]="--ioengine=io_uring" )
["kernel-io-uring"]="--ioengine=io_uring")
RW=randrw
MIX=100
@ -45,27 +45,27 @@ function is_bdf_not_mounted() {
return $mountpoints
}
function get_cores(){
function get_cores() {
local cpu_list="$1"
for cpu in ${cpu_list//,/ }; do
echo $cpu
done
}
function get_cores_numa_node(){
function get_cores_numa_node() {
local cores=$1
for core in $cores; do
lscpu -p=cpu,node | grep "^$core\b" | awk -F ',' '{print $2}'
done
}
function get_numa_node(){
function get_numa_node() {
local plugin=$1
local disks=$2
if [[ "$plugin" =~ "nvme" ]]; then
for bdf in $disks; do
local driver
driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent |awk -F"=" '{print $2}')
driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent | awk -F"=" '{print $2}')
# Use this check to ommit blacklisted devices ( not binded to driver with setup.sh script )
if [ "$driver" = "vfio-pci" ] || [ "$driver" = "uio_pci_generic" ]; then
cat /sys/bus/pci/devices/$bdf/numa_node
@ -89,7 +89,7 @@ function get_numa_node(){
fi
}
function get_disks(){
function get_disks() {
local plugin=$1
if [[ "$plugin" =~ "nvme" ]]; then
for bdf in $(get_nvme_bdfs); do
@ -111,23 +111,22 @@ function get_disks(){
fi
}
function get_disks_on_numa(){
function get_disks_on_numa() {
local devs=($1)
local numas=($2)
local numa_no=$3
local disks_on_numa=""
local i
for (( i=0; i<${#devs[@]}; i++ ))
do
for ((i = 0; i < ${#devs[@]}; i++)); do
if [ ${numas[$i]} = $numa_no ]; then
disks_on_numa=$((disks_on_numa+1))
disks_on_numa=$((disks_on_numa + 1))
fi
done
echo $disks_on_numa
}
function create_fio_config(){
function create_fio_config() {
local disk_no=$1
local plugin=$2
local disks=($3)
@ -139,19 +138,17 @@ function create_fio_config(){
local cores_numa
cores_numa=($(get_cores_numa_node "$5"))
local disks_per_core=$((disk_no/no_cores))
local disks_per_core_mod=$((disk_no%no_cores))
local disks_per_core=$((disk_no / no_cores))
local disks_per_core_mod=$((disk_no % no_cores))
# For kernel dirver, each disk will be alligned with all cpus on the same NUMA node
if [[ "$plugin" =~ "kernel" ]]; then
for (( i=0; i<disk_no; i++ ))
do
for ((i = 0; i < disk_no; i++)); do
sed -i -e "\$a[filename${i}]" $BASE_DIR/config.fio
filename="/dev/${disks[$i]}"
sed -i -e "\$afilename=$filename" $BASE_DIR/config.fio
cpu_used=""
for (( j=0; j<no_cores; j++ ))
do
for ((j = 0; j < no_cores; j++)); do
core_numa=${cores_numa[$j]}
if [ "${disks_numa[$i]}" = "$core_numa" ]; then
cpu_used+="${cores[$j]},"
@ -161,13 +158,12 @@ function create_fio_config(){
echo "" >> $BASE_DIR/config.fio
done
else
for (( i=0; i<no_cores; i++ ))
do
for ((i = 0; i < no_cores; i++)); do
core_numa=${cores_numa[$i]}
total_disks_per_core=$disks_per_core
if [ "$disks_per_core_mod" -gt "0" ]; then
total_disks_per_core=$((disks_per_core+1))
disks_per_core_mod=$((disks_per_core_mod-1))
total_disks_per_core=$((disks_per_core + 1))
disks_per_core_mod=$((disks_per_core_mod - 1))
fi
if [ "$total_disks_per_core" = "0" ]; then
@ -181,7 +177,7 @@ function create_fio_config(){
n=0 #counter of all disks
while [ "$m" -lt "$total_disks_per_core" ]; do
if [ ${disks_numa[$n]} = $core_numa ]; then
m=$((m+1))
m=$((m + 1))
if [[ "$plugin" = "spdk-plugin-nvme" ]]; then
filename='trtype=PCIe traddr='${disks[$n]//:/.}' ns=1'
elif [[ "$plugin" = "spdk-plugin-bdev" ]]; then
@ -191,7 +187,7 @@ function create_fio_config(){
#Mark numa of n'th disk as "x" to mark it as claimed
disks_numa[$n]="x"
fi
n=$((n+1))
n=$((n + 1))
# If there is no more disks with numa node same as cpu numa node, switch to other numa node.
if [ $n -ge $total_disks ]; then
if [ "$core_numa" = "1" ]; then
@ -231,9 +227,9 @@ function preconditioning() {
rm -f $BASE_DIR/config.fio
}
function get_results(){
function get_results() {
local reads_pct=$2
local writes_pct=$((100-$2))
local writes_pct=$((100 - $2))
case "$1" in
iops)
@ -244,42 +240,42 @@ function get_results(){
mean_lat_usec)
mean_lat=$(jq -r ".jobs[] | (.read.lat_ns.mean * $reads_pct + .write.lat_ns.mean * $writes_pct)" $NVME_FIO_RESULTS)
mean_lat=${mean_lat%.*}
echo $(( mean_lat/100000 ))
echo $((mean_lat / 100000))
;;
p99_lat_usec)
p99_lat=$(jq -r ".jobs[] | (.read.clat_ns.percentile.\"99.000000\" * $reads_pct + .write.clat_ns.percentile.\"99.000000\" * $writes_pct)" $NVME_FIO_RESULTS)
p99_lat=${p99_lat%.*}
echo $(( p99_lat/100000 ))
echo $((p99_lat / 100000))
;;
p99_99_lat_usec)
p99_99_lat=$(jq -r ".jobs[] | (.read.clat_ns.percentile.\"99.990000\" * $reads_pct + .write.clat_ns.percentile.\"99.990000\" * $writes_pct)" $NVME_FIO_RESULTS)
p99_99_lat=${p99_99_lat%.*}
echo $(( p99_99_lat/100000 ))
echo $((p99_99_lat / 100000))
;;
stdev_usec)
stdev=$(jq -r ".jobs[] | (.read.clat_ns.stddev * $reads_pct + .write.clat_ns.stddev * $writes_pct)" $NVME_FIO_RESULTS)
stdev=${stdev%.*}
echo $(( stdev/100000 ))
echo $((stdev / 100000))
;;
mean_slat_usec)
mean_slat=$(jq -r ".jobs[] | (.read.slat_ns.mean * $reads_pct + .write.slat_ns.mean * $writes_pct)" $NVME_FIO_RESULTS)
mean_slat=${mean_slat%.*}
echo $(( mean_slat/100000 ))
echo $((mean_slat / 100000))
;;
mean_clat_usec)
mean_clat=$(jq -r ".jobs[] | (.read.clat_ns.mean * $reads_pct + .write.clat_ns.mean * $writes_pct)" $NVME_FIO_RESULTS)
mean_clat=${mean_clat%.*}
echo $(( mean_clat/100000 ))
echo $((mean_clat / 100000))
;;
bw_Kibs)
bw=$(jq -r ".jobs[] | (.read.bw + .write.bw)" $NVME_FIO_RESULTS)
bw=${bw%.*}
echo $(( bw ))
echo $((bw))
;;
esac
}
function get_bdevperf_results(){
function get_bdevperf_results() {
case "$1" in
iops)
iops=$(grep Total $NVME_FIO_RESULTS | awk -F 'Total' '{print $2}' | awk '{print $2}')
@ -289,7 +285,7 @@ function get_bdevperf_results(){
bw_Kibs)
bw_MBs=$(grep Total $NVME_FIO_RESULTS | awk -F 'Total' '{print $2}' | awk '{print $4}')
bw_MBs=${bw_MBs%.*}
echo $(( bw_MBs * 1024 ))
echo $((bw_MBs * 1024))
;;
esac
}
@ -301,7 +297,7 @@ function get_nvmeperf_results() {
local max_lat_usec
local min_lat_usec
read -r iops bw_MBs mean_lat_usec min_lat_usec max_lat_usec<<< $(tr -s " " < $NVME_FIO_RESULTS | grep -oP "(?<=Total : )(.*+)")
read -r iops bw_MBs mean_lat_usec min_lat_usec max_lat_usec <<< $(tr -s " " < $NVME_FIO_RESULTS | grep -oP "(?<=Total : )(.*+)")
# We need to get rid of the decimal spaces due
# to use of arithmetic expressions instead of "bc" for calculations
@ -314,27 +310,25 @@ function get_nvmeperf_results() {
echo "$iops $(bc <<< "$bw_MBs * 1024") $mean_lat_usec $min_lat_usec $max_lat_usec"
}
function run_spdk_nvme_fio(){
function run_spdk_nvme_fio() {
local plugin=$1
echo "** Running fio test, this can take a while, depending on the run-time and ramp-time setting."
if [[ "$plugin" = "spdk-plugin-nvme" ]]; then
LD_PRELOAD=$PLUGIN_DIR_NVME/fio_plugin $FIO_BIN $BASE_DIR/config.fio --output-format=json\
"${@:2}" --ioengine=spdk
LD_PRELOAD=$PLUGIN_DIR_NVME/fio_plugin $FIO_BIN $BASE_DIR/config.fio --output-format=json "${@:2}" --ioengine=spdk
elif [[ "$plugin" = "spdk-plugin-bdev" ]]; then
LD_PRELOAD=$PLUGIN_DIR_BDEV/fio_plugin $FIO_BIN $BASE_DIR/config.fio --output-format=json\
"${@:2}" --ioengine=spdk_bdev --spdk_json_conf=$BASE_DIR/bdev.conf --spdk_mem=4096
LD_PRELOAD=$PLUGIN_DIR_BDEV/fio_plugin $FIO_BIN $BASE_DIR/config.fio --output-format=json "${@:2}" --ioengine=spdk_bdev --spdk_json_conf=$BASE_DIR/bdev.conf --spdk_mem=4096
fi
sleep 1
}
function run_nvme_fio(){
function run_nvme_fio() {
echo "** Running fio test, this can take a while, depending on the run-time and ramp-time setting."
$FIO_BIN $BASE_DIR/config.fio --output-format=json "$@"
sleep 1
}
function run_bdevperf(){
function run_bdevperf() {
echo "** Running bdevperf test, this can take a while, depending on the run-time setting."
$BDEVPERF_DIR/bdevperf --json $BASE_DIR/bdev.conf -q $IODEPTH -o $BLK_SIZE -w $RW -M $MIX -t $RUNTIME -m "[$CPUS_ALLOWED]"
sleep 1
@ -346,8 +340,8 @@ function run_nvmeperf() {
local disks
# Limit the number of disks to $1 if needed
disks=( $(get_disks nvme) )
disks=( "${disks[@]:0:$1}" )
disks=($(get_disks nvme))
disks=("${disks[@]:0:$1}")
r_opt=$(printf -- ' -r "trtype:PCIe traddr:%s"' "${disks[@]}")
echo "** Running nvme perf test, this can take a while, depending on the run-time setting."
@ -363,7 +357,7 @@ function wait_for_nvme_reload() {
shopt -s extglob
for disk in $nvmes; do
cmd="ls /sys/block/$disk/queue/*@(iostats|rq_affinity|nomerges|io_poll_delay)*"
until $cmd 2>/dev/null; do
until $cmd 2> /dev/null; do
echo "Waiting for full nvme driver reload..."
sleep 0.5
done
@ -374,7 +368,7 @@ function wait_for_nvme_reload() {
function verify_disk_number() {
# Check if we have appropriate number of disks to carry out the test
if [[ "$PLUGIN" =~ "bdev" ]]; then
cat <<-JSON >"$BASE_DIR/bdev.conf"
cat <<- JSON > "$BASE_DIR/bdev.conf"
{"subsystems":[
$("$ROOT_DIR/scripts/gen_nvme.sh" --json)
]}
@ -390,10 +384,12 @@ function verify_disk_number() {
fi
}
function usage()
{
function usage() {
set +x
[[ -n $2 ]] && ( echo "$2"; echo ""; )
[[ -n $2 ]] && (
echo "$2"
echo ""
)
echo "Run NVMe PMD/BDEV performance test. Change options for easier debug and setup configuration"
echo "Usage: $(basename $1) [options]"
echo "-h, --help Print help and exit"
@ -437,7 +433,10 @@ while getopts 'h-:' optchar; do
case "$optchar" in
-)
case "$OPTARG" in
help) usage $0; exit 0 ;;
help)
usage $0
exit 0
;;
rw=*) RW="${OPTARG#*=}" ;;
rwmixread=*) MIX="${OPTARG#*=}" ;;
iodepth=*) IODEPTH="${OPTARG#*=}" ;;
@ -448,15 +447,27 @@ while getopts 'h-:' optchar; do
repeat-no=*) REPEAT_NO="${OPTARG#*=}" ;;
fio-bin=*) FIO_BIN="${OPTARG#*=}" ;;
driver=*) PLUGIN="${OPTARG#*=}" ;;
disk-no=*) DISKNO="${OPTARG#*=}"; ONEWORKLOAD=true ;;
disk-no=*)
DISKNO="${OPTARG#*=}"
ONEWORKLOAD=true
;;
max-disk=*) DISKNO="${OPTARG#*=}" ;;
cpu-allowed=*) CPUS_ALLOWED="${OPTARG#*=}" ;;
no-preconditioning) PRECONDITIONING=false ;;
no-io-scaling) NOIOSCALING=true ;;
*) usage $0 echo "Invalid argument '$OPTARG'"; exit 1 ;;
*)
usage $0 echo "Invalid argument '$OPTARG'"
exit 1
;;
esac
;;
h) usage $0; exit 0 ;;
*) usage $0 "Invalid argument '$optchar'"; exit 1 ;;
h)
usage $0
exit 0
;;
*)
usage $0 "Invalid argument '$optchar'"
exit 1
;;
esac
done

View File

@ -96,17 +96,15 @@ echo "run-time,ramp-time,fio-plugin,QD,block-size,num-cpu-cores,workload,workloa
printf "%s,%s,%s,%s,%s,%s,%s,%s\n" $RUNTIME $RAMP_TIME $PLUGIN $IODEPTH $BLK_SIZE $NO_CORES $RW $MIX >> $result_file
echo "num_of_disks,iops,avg_lat[usec],p99[usec],p99.99[usec],stdev[usec],avg_slat[usec],avg_clat[usec],bw[Kib/s]" >> $result_file
#Run each workolad $REPEAT_NO times
for (( j=0; j < REPEAT_NO; j++ ))
do
for ((j = 0; j < REPEAT_NO; j++)); do
#Start with $DISKNO disks and remove 2 disks for each run to avoid preconditioning before each run.
for (( k=DISKNO; k >= 1; k-=2 ))
do
for ((k = DISKNO; k >= 1; k -= 2)); do
cp $BASE_DIR/config.fio.tmp $BASE_DIR/config.fio
echo "" >> $BASE_DIR/config.fio
#The SPDK fio plugin supports submitting/completing I/Os to multiple SSDs from a single thread.
#Therefore, the per thread queue depth is set to the desired IODEPTH/device X the number of devices per thread.
if [[ "$PLUGIN" =~ "spdk-plugin" ]] && [[ "$NOIOSCALING" = false ]]; then
qd=$(( IODEPTH * k ))
qd=$((IODEPTH * k))
else
qd=$IODEPTH
fi
@ -177,8 +175,7 @@ do
done
done
#Write results to csv file
for (( k=DISKNO; k >= 1; k-=2 ))
do
for ((k = DISKNO; k >= 1; k -= 2)); do
iops_disks[$k]=$((${iops_disks[$k]} / REPEAT_NO))
if [[ "$PLUGIN" =~ "plugin" ]]; then
@ -206,7 +203,7 @@ do
bw[$k]=$((${bw[$k]} / REPEAT_NO))
printf "%s,%s,%s,%s,%s,%s,%s,%s,%s\n" ${k} ${iops_disks[$k]} ${mean_lat_disks_usec[$k]} ${p99_lat_disks_usec[$k]}\
printf "%s,%s,%s,%s,%s,%s,%s,%s,%s\n" ${k} ${iops_disks[$k]} ${mean_lat_disks_usec[$k]} ${p99_lat_disks_usec[$k]} \
${p99_99_lat_disks_usec[$k]} ${stdev_disks_usec[$k]} ${mean_slat_disks_usec[$k]} ${mean_clat_disks_usec[$k]} ${bw[$k]} >> $result_file
#if tested on only one numeber of disk

View File

@ -18,20 +18,20 @@ bdf=$(get_first_nvme_bdf)
PCI_WHITELIST="${bdf}" $rootdir/scripts/setup.sh reset
sleep 1
bdf_sysfs_path=$( readlink -f /sys/class/nvme/nvme* | grep "$bdf/nvme/nvme" )
bdf_sysfs_path=$(readlink -f /sys/class/nvme/nvme* | grep "$bdf/nvme/nvme")
if [ -z "$bdf_sysfs_path" ]; then
echo "setup.sh failed bind kernel driver to ${bdf}"
return 1
fi
nvme_name=$( basename $bdf_sysfs_path )
nvme_name=$(basename $bdf_sysfs_path)
set +e
ctrlr="/dev/${nvme_name}"
ns="/dev/${nvme_name}n1"
oacs=$( ${NVME_CMD} id-ctrl $ctrlr | grep oacs | cut -d: -f2 )
oacs_firmware=$(( oacs & 0x4 ))
oacs=$(${NVME_CMD} id-ctrl $ctrlr | grep oacs | cut -d: -f2)
oacs_firmware=$((oacs & 0x4))
${NVME_CMD} get-ns-id $ns > ${KERNEL_OUT}.1
${NVME_CMD} id-ns $ns > ${KERNEL_OUT}.2

View File

@ -12,19 +12,19 @@ bdf=$(get_first_nvme_bdf)
PCI_WHITELIST="${bdf}" $rootdir/scripts/setup.sh reset
sleep 1
bdf_sysfs_path=$( readlink -f /sys/class/nvme/nvme* | grep "$bdf/nvme/nvme" )
bdf_sysfs_path=$(readlink -f /sys/class/nvme/nvme* | grep "$bdf/nvme/nvme")
if [ -z "$bdf_sysfs_path" ]; then
echo "setup.sh failed bind kernel driver to ${bdf}"
exit 1
fi
nvme_name=$( basename $bdf_sysfs_path )
nvme_name=$(basename $bdf_sysfs_path)
KERNEL_SMART_JSON=$( ${SMARTCTL_CMD} --json=g -a /dev/${nvme_name} | grep -v "/dev/${nvme_name}" | sort || true )
KERNEL_SMART_JSON=$(${SMARTCTL_CMD} --json=g -a /dev/${nvme_name} | grep -v "/dev/${nvme_name}" | sort || true)
${SMARTCTL_CMD} -i /dev/${nvme_name}n1
# logs are not provided by json output
KERNEL_SMART_ERRLOG=$( ${SMARTCTL_CMD} -l error /dev/${nvme_name} )
KERNEL_SMART_ERRLOG=$(${SMARTCTL_CMD} -l error /dev/${nvme_name})
$rootdir/scripts/setup.sh
@ -43,19 +43,19 @@ if [ ! -c /dev/spdk/nvme0 ]; then
exit 1
fi
CUSE_SMART_JSON=$( ${SMARTCTL_CMD} --json=g -a /dev/spdk/nvme0 | grep -v "/dev/spdk/nvme0" | sort || true )
CUSE_SMART_JSON=$(${SMARTCTL_CMD} --json=g -a /dev/spdk/nvme0 | grep -v "/dev/spdk/nvme0" | sort || true)
DIFF_SMART_JSON=$( diff --changed-group-format='%<' --unchanged-group-format='' <(echo "$KERNEL_SMART_JSON") <(echo "$CUSE_SMART_JSON") || true)
DIFF_SMART_JSON=$(diff --changed-group-format='%<' --unchanged-group-format='' <(echo "$KERNEL_SMART_JSON") <(echo "$CUSE_SMART_JSON") || true)
# Mask values can change
ERR_SMART_JSON=$( grep -v "json\.nvme_smart_health_information_log\.\|json\.local_time\.\|json\.temperature\.\|json\.power_on_time\.hours" <<< $DIFF_SMART_JSON || true )
ERR_SMART_JSON=$(grep -v "json\.nvme_smart_health_information_log\.\|json\.local_time\.\|json\.temperature\.\|json\.power_on_time\.hours" <<< $DIFF_SMART_JSON || true)
if [ -n "$ERR_SMART_JSON" ] ; then
if [ -n "$ERR_SMART_JSON" ]; then
echo "Wrong values for: $ERR_SMART_JSON"
exit 1
fi
CUSE_SMART_ERRLOG=$( ${SMARTCTL_CMD} -l error /dev/spdk/nvme0 )
CUSE_SMART_ERRLOG=$(${SMARTCTL_CMD} -l error /dev/spdk/nvme0)
if [ "$CUSE_SMART_ERRLOG" != "$KERNEL_SMART_ERRLOG" ]; then
echo "Wrong values in NVMe Error log"
exit 1

View File

@ -5,8 +5,7 @@ NVMF_TCP_IP_ADDRESS="127.0.0.1"
NVMF_TRANSPORT_OPTS=""
NVMF_SERIAL=SPDK00000000000001
function build_nvmf_app_args()
{
function build_nvmf_app_args() {
if [ $SPDK_RUN_NON_ROOT -eq 1 ]; then
NVMF_APP=(sudo -u "$USER" "${NVMF_APP[@]}")
NVMF_APP+=(-i "$NVMF_APP_SHM_ID" -e 0xFFFF)
@ -15,13 +14,13 @@ function build_nvmf_app_args()
fi
}
: ${NVMF_APP_SHM_ID="0"}; export NVMF_APP_SHM_ID
: ${NVMF_APP_SHM_ID="0"}
export NVMF_APP_SHM_ID
build_nvmf_app_args
have_pci_nics=0
function load_ib_rdma_modules()
{
function load_ib_rdma_modules() {
if [ $(uname) != Linux ]; then
return 0
fi
@ -37,9 +36,7 @@ function load_ib_rdma_modules()
modprobe rdma_ucm
}
function detect_soft_roce_nics()
{
function detect_soft_roce_nics() {
if hash rxe_cfg; then
rxe_cfg start
rdma_nics=$(get_rdma_if_list)
@ -54,12 +51,10 @@ function detect_soft_roce_nics()
fi
}
# args 1 and 2 represent the grep filters for finding our NICS.
# subsequent args are all drivers that should be loaded if we find these NICs.
# Those drivers should be supplied in the correct order.
function detect_nics_and_probe_drivers()
{
function detect_nics_and_probe_drivers() {
NIC_VENDOR="$1"
NIC_CLASS="$2"
@ -80,9 +75,7 @@ function detect_nics_and_probe_drivers()
fi
}
function detect_pci_nics()
{
function detect_pci_nics() {
if ! hash lspci; then
return 0
@ -101,38 +94,34 @@ function detect_pci_nics()
sleep 5
}
function detect_rdma_nics()
{
function detect_rdma_nics() {
detect_pci_nics
if [ "$have_pci_nics" -eq "0" ]; then
detect_soft_roce_nics
fi
}
function allocate_nic_ips()
{
(( count=NVMF_IP_LEAST_ADDR ))
function allocate_nic_ips() {
((count = NVMF_IP_LEAST_ADDR))
for nic_name in $(get_rdma_if_list); do
ip="$(get_ip_address $nic_name)"
if [ -z $ip ]; then
ip addr add $NVMF_IP_PREFIX.$count/24 dev $nic_name
ip link set $nic_name up
(( count=count+1 ))
((count = count + 1))
fi
# dump configuration for debug log
ip addr show $nic_name
done
}
function get_available_rdma_ips()
{
function get_available_rdma_ips() {
for nic_name in $(get_rdma_if_list); do
get_ip_address $nic_name
done
}
function get_rdma_if_list()
{
function get_rdma_if_list() {
for nic_type in /sys/class/infiniband/*; do
[[ -e "$nic_type" ]] || break
for nic_name in /sys/class/infiniband/"$(basename ${nic_type})"/device/net/*; do
@ -142,14 +131,12 @@ function get_rdma_if_list()
done
}
function get_ip_address()
{
function get_ip_address() {
interface=$1
ip -o -4 addr show $interface | awk '{print $4}' | cut -d"/" -f1
}
function nvmfcleanup()
{
function nvmfcleanup() {
sync
set +e
for i in {1..20}; do
@ -170,8 +157,7 @@ function nvmfcleanup()
modprobe -v -r nvme-fabrics
}
function nvmftestinit()
{
function nvmftestinit() {
if [ -z $TEST_TRANSPORT ]; then
echo "transport not specified - use --transport= to specify"
return 1
@ -205,8 +191,7 @@ function nvmftestinit()
modprobe nvme-$TEST_TRANSPORT || true
}
function nvmfappstart()
{
function nvmfappstart() {
timing_enter start_nvmf_tgt
"${NVMF_APP[@]}" $1 &
nvmfpid=$!
@ -215,8 +200,7 @@ function nvmfappstart()
timing_exit start_nvmf_tgt
}
function nvmftestfini()
{
function nvmftestfini() {
nvmfcleanup || :
if [ -n "$nvmfpid" ]; then
killprocess $nvmfpid
@ -229,15 +213,13 @@ function nvmftestfini()
fi
}
function rdma_device_init()
{
function rdma_device_init() {
load_ib_rdma_modules
detect_rdma_nics
allocate_nic_ips
}
function revert_soft_roce()
{
function revert_soft_roce() {
if hash rxe_cfg; then
interfaces="$(ip -o link | awk '{print $2}' | cut -d":" -f1)"
for interface in $interfaces; do
@ -247,8 +229,7 @@ function revert_soft_roce()
fi
}
function check_ip_is_soft_roce()
{
function check_ip_is_soft_roce() {
IP=$1
if hash rxe_cfg; then
dev=$(ip -4 -o addr show | grep $IP | cut -d" " -f2)
@ -262,8 +243,7 @@ function check_ip_is_soft_roce()
fi
}
function nvme_connect()
{
function nvme_connect() {
local init_count
init_count=$(nvme list | wc -l)
@ -279,8 +259,7 @@ function nvme_connect()
return 1
}
function get_nvme_devs()
{
function get_nvme_devs() {
local dev rest
nvmes=()
@ -292,18 +271,17 @@ function get_nvme_devs()
echo "$dev $rest"
fi
done < <(nvme list)
(( ${#nvmes[@]} )) || return 1
((${#nvmes[@]})) || return 1
echo "${#nvmes[@]}" >&2
}
function gen_nvmf_target_json()
{
function gen_nvmf_target_json() {
local subsystem config=()
for subsystem in "${@:-1}"; do
config+=(
"$(
cat <<-EOF
cat <<- EOF
{
"params": {
"name": "Nvme$subsystem",
@ -319,13 +297,16 @@ function gen_nvmf_target_json()
)"
)
done
jq . <<-JSON
jq . <<- JSON
{
"subsystems": [
{
"subsystem": "bdev",
"config": [
$(IFS=","; printf '%s\n' "${config[*]}")
$(
IFS=","
printf '%s\n' "${config[*]}"
)
]
}
]

View File

@ -10,8 +10,7 @@ MALLOC_BLOCK_SIZE=512
rpc_py="$rootdir/scripts/rpc.py"
function tgt_init()
{
function tgt_init() {
nvmfappstart "-m 0xF"
$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
@ -29,7 +28,6 @@ if [ $TEST_TRANSPORT == "rdma" ] && check_ip_is_soft_roce $NVMF_FIRST_TARGET_IP;
exit 0
fi
tgt_init
"$rootdir/test/bdev/bdevperf/bdevperf" --json <(gen_nvmf_target_json) -q 128 -o 4096 -w verify -t 1

View File

@ -12,8 +12,7 @@ MALLOC_BLOCK_SIZE=512
rpc_py="$rootdir/scripts/rpc.py"
function disconnect_init()
{
function disconnect_init() {
nvmfappstart "-m 0xF0"
$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0
@ -27,7 +26,7 @@ function disconnect_init()
# Test to make sure we don't segfault or access null pointers when we try to connect to
# a discovery controller that doesn't exist yet.
function nvmf_target_disconnect_tc1 {
function nvmf_target_disconnect_tc1() {
set +e
$rootdir/examples/nvme/reconnect/reconnect -q 32 -o 4096 -w randrw -M 50 -t 10 -c 0xF \
-r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT"
@ -40,7 +39,7 @@ function nvmf_target_disconnect_tc1 {
set -e
}
function nvmf_target_disconnect_tc2 {
function nvmf_target_disconnect_tc2() {
disconnect_init $NVMF_FIRST_TARGET_IP
# If perf doesn't shut down, this test will time out.
@ -58,7 +57,7 @@ function nvmf_target_disconnect_tc2 {
sync
}
function nvmf_target_disconnect_tc3 {
function nvmf_target_disconnect_tc3() {
$rootdir/examples/nvme/reconnect/reconnect -q 32 -o 4096 -w randrw -M 50 -t 10 -c 0xF \
-r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT alt_traddr:$NVMF_SECOND_TARGET_IP" &
reconnectpid=$!
@ -86,6 +85,5 @@ else
fi
fi
trap - SIGINT SIGTERM EXIT
nvmftestfini

View File

@ -11,7 +11,7 @@ source $rootdir/test/nvmf/common.sh
trap "exit 1" SIGINT SIGTERM EXIT
TEST_ARGS=( "$@" )
TEST_ARGS=("$@")
run_test "nvmf_example" test/nvmf/target/nvmf_example.sh "${TEST_ARGS[@]}"
run_test "nvmf_filesystem" test/nvmf/target/filesystem.sh "${TEST_ARGS[@]}"

View File

@ -12,7 +12,7 @@ rpc_py="$rootdir/scripts/rpc.py"
nvmftestinit
function nvmf_filesystem_create {
function nvmf_filesystem_create() {
fstype=$1
nvme_name=$2
@ -27,7 +27,7 @@ function nvmf_filesystem_create {
i=0
while ! umount /mnt/device; do
[ $i -lt 15 ] || break
i=$((i+1))
i=$((i + 1))
sleep 1
done
@ -41,7 +41,7 @@ function nvmf_filesystem_create {
lsblk -l -o NAME | grep -q -w "${nvme_name}p1"
}
function nvmf_filesystem_part {
function nvmf_filesystem_part() {
incapsule=$1
nvmfappstart "-m 0xF"

View File

@ -9,7 +9,7 @@ rpc_py="$rootdir/scripts/rpc.py"
nvmftestinit
"${NVMF_APP[@]}" -m 0xF >$output_dir/nvmf_fuzz_tgt_output.txt 2>&1 &
"${NVMF_APP[@]}" -m 0xF > $output_dir/nvmf_fuzz_tgt_output.txt 2>&1 &
nvmfpid=$!
trap 'process_shm --id $NVMF_APP_SHM_ID; rm -f $testdir/nvmf_fuzz.conf; killprocess $nvmfpid; nvmftestfini $1; exit 1' SIGINT SIGTERM EXIT
@ -27,9 +27,9 @@ echo "[Nvme]" > $testdir/nvmf_fuzz.conf
echo " TransportID \"trtype:$TEST_TRANSPORT adrfam:IPv4 subnqn:nqn.2016-06.io.spdk:cnode1 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT\" Nvme0" >> $testdir/nvmf_fuzz.conf
# Note that we chose a consistent seed to ensure that this test is consistent in nightly builds.
$rootdir/test/app/fuzz/nvme_fuzz/nvme_fuzz -m 0xF0 -r "/var/tmp/nvme_fuzz" -t 30 -S 123456 -C $testdir/nvmf_fuzz.conf -N -a 2>$output_dir/nvmf_fuzz_logs1.txt
$rootdir/test/app/fuzz/nvme_fuzz/nvme_fuzz -m 0xF0 -r "/var/tmp/nvme_fuzz" -t 30 -S 123456 -C $testdir/nvmf_fuzz.conf -N -a 2> $output_dir/nvmf_fuzz_logs1.txt
# We don't specify a seed for this test. Instead we run a static list of commands from example.json.
$rootdir/test/app/fuzz/nvme_fuzz/nvme_fuzz -m 0xF0 -r "/var/tmp/nvme_fuzz" -C $testdir/nvmf_fuzz.conf -j $rootdir/test/app/fuzz/nvme_fuzz/example.json -a 2>$output_dir/nvmf_fuzz_logs2.txt
$rootdir/test/app/fuzz/nvme_fuzz/nvme_fuzz -m 0xF0 -r "/var/tmp/nvme_fuzz" -C $testdir/nvmf_fuzz.conf -j $rootdir/test/app/fuzz/nvme_fuzz/example.json -a 2> $output_dir/nvmf_fuzz_logs2.txt
rm -f $testdir/nvmf_fuzz.conf
$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1

View File

@ -13,7 +13,7 @@ nvmftestinit
timing_enter nvme_identify
bdf=$(get_first_nvme_bdf)
if [ -z "${bdf}" ] ; then
if [ -z "${bdf}" ]; then
echo "No NVMe drive found but test requires it. Failing the test."
exit 1
fi
@ -59,12 +59,12 @@ nvmf_model_number=$($rootdir/examples/nvme/identify/identify -r "\
trsvcid:$NVMF_PORT \
subnqn:nqn.2016-06.io.spdk:cnode1" | grep "Model Number:" | awk '{print $3}')
if [ ${nvme_serial_number} != ${nvmf_serial_number} ] ; then
if [ ${nvme_serial_number} != ${nvmf_serial_number} ]; then
echo "Serial number doesn't match"
exit 1
fi
if [ ${nvme_model_number} != ${nvmf_model_number} ] ; then
if [ ${nvme_model_number} != ${nvmf_model_number} ]; then
echo "Model number doesn't match"
exit 1
fi

View File

@ -12,14 +12,13 @@ target=foobar
# pre-seed the rng to generate predictive values across different test runs
RANDOM=0
gen_random_s () {
gen_random_s() {
local length=$1 ll
# generate ascii table which nvme supports
local chars=({32..127})
local string
for (( ll = 0; ll < length; ll++ )); do
for ((ll = 0; ll < length; ll++)); do
string+="$(echo -e "\x$(printf '%x' "${chars[RANDOM % ${#chars[@]}]}")")"
done
# Be nice to rpc.py's arg parser and escape `-` in case it's a first character
@ -34,7 +33,6 @@ nvmfappstart "-m 0xF"
trap 'process_shm --id $NVMF_APP_SHM_ID; nvmftestfini $1; exit 1' SIGINT SIGTERM EXIT
# Attempt to create subsystem with non-existing target
out=$("$rpc" nvmf_create_subsystem -t "$target" "$nqn$RANDOM" 2>&1) && false
[[ $out == *"Unable to find target"* ]]

View File

@ -25,8 +25,7 @@ fi
$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
for i in $(seq 1 $NVMF_SUBSYS)
do
for i in $(seq 1 $NVMF_SUBSYS); do
$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc$i
$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode$i -a -s SPDK$i
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode$i Malloc$i

View File

@ -15,21 +15,21 @@ trap 'process_shm --id $NVMF_APP_SHM_ID; nvmftestfini $1; exit 1' SIGINT SIGTERM
# Target application should start with a single target.
if [ "$($rpc_py nvmf_get_targets | jq 'length')" != "1" ]; then
echo "SPDK application did not start with the proper number of targets." && false
echo "SPDK application did not start with the proper number of targets." && false
fi
$rpc_py nvmf_create_target -n nvmf_tgt_1 -s 32
$rpc_py nvmf_create_target -n nvmf_tgt_2 -s 32
if [ "$($rpc_py nvmf_get_targets | jq 'length')" != "3" ]; then
echo "nvmf_create_target RPC didn't properly create targets." && false
echo "nvmf_create_target RPC didn't properly create targets." && false
fi
$rpc_py nvmf_delete_target -n nvmf_tgt_1
$rpc_py nvmf_delete_target -n nvmf_tgt_2
if [ "$($rpc_py nvmf_get_targets | jq 'length')" != "1" ]; then
echo "nvmf_delete_target RPC didn't properly destroy targets." && false
echo "nvmf_delete_target RPC didn't properly destroy targets." && false
fi
trap - SIGINT SIGTERM EXIT

View File

@ -33,7 +33,7 @@ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPOR
nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
waitforserial $NVMF_SERIAL 2
if ! get_nvme_devs print 2>/dev/null; then
if ! get_nvme_devs print 2> /dev/null; then
echo "Could not find any nvme devices to work with, aborting the test" >&2
exit 1
fi

View File

@ -10,8 +10,7 @@ rpc_py="$rootdir/scripts/rpc.py"
MALLOC_BDEV_SIZE=64
MALLOC_BLOCK_SIZE=512
function build_nvmf_example_args()
{
function build_nvmf_example_args() {
if [ $SPDK_RUN_NON_ROOT -eq 1 ]; then
echo "sudo -u $(logname) ./examples/nvmf/nvmf/nvmf -i $NVMF_APP_SHM_ID"
else
@ -21,8 +20,7 @@ function build_nvmf_example_args()
NVMF_EXAMPLE="$(build_nvmf_example_args)"
function nvmfexamplestart()
{
function nvmfexamplestart() {
timing_enter start_nvmf_example
$NVMF_EXAMPLE $1 &
nvmfpid=$!
@ -53,7 +51,7 @@ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPOR
perf="$rootdir/examples/nvme/perf/perf"
$perf -q 64 -o 4096 -w randrw -M 30 -t 10 \
-r "trtype:${TEST_TRANSPORT} adrfam:IPv4 traddr:${NVMF_FIRST_TARGET_IP} trsvcid:${NVMF_PORT} \
-r "trtype:${TEST_TRANSPORT} adrfam:IPv4 traddr:${NVMF_FIRST_TARGET_IP} trsvcid:${NVMF_PORT} \
subnqn:nqn.2016-06.io.spdk:cnode1"
trap - SIGINT SIGTERM EXIT

View File

@ -7,14 +7,12 @@ source $rootdir/test/nvmf/common.sh
rpc_py="$rootdir/scripts/rpc.py"
function jcount()
{
function jcount() {
local filter=$1
jq "$filter" | wc -l
}
function jsum()
{
function jsum() {
local filter=$1
jq "$filter" | awk '{s+=$1}END{print s}'
}
@ -82,8 +80,7 @@ nvme disconnect -n nqn.2016-06.io.spdk:cnode1
$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
# do frequent add delete of namespaces with different nsid.
for i in $(seq 1 $times)
do
for i in $(seq 1 $times); do
$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -s $NVMF_SERIAL
$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1 -n 5
@ -100,8 +97,7 @@ do
done
# do frequent add delete.
for i in $(seq 1 $times)
do
for i in $(seq 1 $times); do
$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -s $NVMF_SERIAL
$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1

View File

@ -59,7 +59,7 @@ function waitforio() {
fi
local ret=1
local i
for (( i = 10; i != 0; i-- )); do
for ((i = 10; i != 0; i--)); do
read_io_count=$($rpc_py -s $1 bdev_get_iostat -b $2 | jq -r '.bdevs[0].num_read_ops')
# A few I/O will happen during initial examine. So wait until at least 100 I/O
# have completed to know that bdevperf is really generating the I/O.
@ -73,7 +73,7 @@ function waitforio() {
}
# Test 1: Kill the initiator unexpectedly with no I/O outstanding
function nvmf_shutdown_tc1 {
function nvmf_shutdown_tc1() {
starttarget
# Run bdev_svc, which connects but does not issue I/O
@ -97,7 +97,7 @@ function nvmf_shutdown_tc1 {
}
# Test 2: Kill initiator unexpectedly with I/O outstanding
function nvmf_shutdown_tc2 {
function nvmf_shutdown_tc2() {
starttarget
# Run bdevperf
@ -119,7 +119,7 @@ function nvmf_shutdown_tc2 {
}
# Test 3: Kill the target unexpectedly with I/O outstanding
function nvmf_shutdown_tc3 {
function nvmf_shutdown_tc3() {
starttarget
# Run bdevperf

View File

@ -1,4 +1,3 @@
source $rootdir/scripts/common.sh
source $rootdir/test/common/autotest_common.sh
@ -11,8 +10,7 @@ function nvme_cfg() {
echo "$ocf_nvme_cfg"
}
function clear_nvme()
{
function clear_nvme() {
mapfile -t bdf < <(iter_all_pci_class_code 01 08 02)
# Clear metadata on NVMe device

View File

@ -5,11 +5,11 @@ rootdir=$(readlink -f $curdir/../../..)
source $rootdir/test/ocf/common.sh
function fio_verify(){
function fio_verify() {
fio_bdev $curdir/test.fio --aux-path=/tmp/ --ioengine=spdk_bdev "$@"
}
function cleanup(){
function cleanup() {
rm -f $curdir/modes.conf
}

View File

@ -6,12 +6,11 @@ source $rootdir/test/common/autotest_common.sh
rpc_py=$rootdir/scripts/rpc.py
function bdev_check_claimed()
{
function bdev_check_claimed() {
if [ "$($rpc_py get_bdevs -b "$@" | jq '.[0].claimed')" = "true" ]; then
return 0;
return 0
else
return 1;
return 1
fi
}
@ -34,13 +33,13 @@ $rpc_py bdev_ocf_get_bdevs NonExisting | jq -e \
'.[0] | .name == "PartCache"'
if ! bdev_check_claimed Malloc0; then
>&2 echo "Base device expected to be claimed now"
echo >&2 "Base device expected to be claimed now"
exit 1
fi
$rpc_py bdev_ocf_delete PartCache
if bdev_check_claimed Malloc0; then
>&2 echo "Base device is not expected to be claimed now"
echo >&2 "Base device is not expected to be claimed now"
exit 1
fi
@ -50,34 +49,34 @@ $rpc_py bdev_ocf_get_bdevs FullCache | jq -e \
'.[0] | .started and .cache.attached and .core.attached'
if ! (bdev_check_claimed Malloc0 && bdev_check_claimed Malloc1); then
>&2 echo "Base devices expected to be claimed now"
echo >&2 "Base devices expected to be claimed now"
exit 1
fi
$rpc_py bdev_ocf_delete FullCache
if bdev_check_claimed Malloc0 && bdev_check_claimed Malloc1; then
>&2 echo "Base devices are not expected to be claimed now"
echo >&2 "Base devices are not expected to be claimed now"
exit 1
fi
$rpc_py bdev_ocf_create HotCache wt Malloc0 Malloc1
if ! (bdev_check_claimed Malloc0 && bdev_check_claimed Malloc1); then
>&2 echo "Base devices expected to be claimed now"
echo >&2 "Base devices expected to be claimed now"
exit 1
fi
$rpc_py bdev_malloc_delete Malloc0
if bdev_check_claimed Malloc1; then
>&2 echo "Base device is not expected to be claimed now"
echo >&2 "Base device is not expected to be claimed now"
exit 1
fi
status=$($rpc_py get_bdevs)
gone=$(echo $status | jq 'map(select(.name == "HotCache")) == []')
if [[ $gone == false ]]; then
>&2 echo "OCF bdev is expected to unregister"
echo >&2 "OCF bdev is expected to unregister"
exit 1
fi

View File

@ -7,15 +7,13 @@ source $rootdir/test/common/autotest_common.sh
rpc_py=$rootdir/scripts/rpc.py
spdk_pid='?'
function start_spdk()
{
function start_spdk() {
$rootdir/app/iscsi_tgt/iscsi_tgt &
spdk_pid=$!
trap 'killprocess $spdk_pid; exit 1' SIGINT SIGTERM EXIT
waitforlisten $spdk_pid
}
function stop_spdk()
{
function stop_spdk() {
killprocess $spdk_pid
trap - SIGINT SIGTERM EXIT
}

View File

@ -3,9 +3,11 @@
testdir=$(readlink -f $(dirname $0))
rootdir=$(readlink -f $testdir/../..)
function usage()
{
[[ -n $2 ]] && ( echo "$2"; echo ""; )
function usage() {
[[ -n $2 ]] && (
echo "$2"
echo ""
)
echo "Devstack installation script"
echo "Usage: $(basename $1) [OPTIONS]"
echo "--branch=BRANCH Define which version of openstack"
@ -15,7 +17,6 @@ function usage()
exit 0
}
branch="master"
while getopts 'h-:' optchar; do
case "$optchar" in
@ -26,7 +27,7 @@ while getopts 'h-:' optchar; do
esac
;;
h) usage $0 ;;
*) usage $0 "Invalid argument '$OPTARG'"
*) usage $0 "Invalid argument '$OPTARG'" ;;
esac
done

View File

@ -9,7 +9,7 @@ TEST_TRANSPORT='rdma'
nvmftestinit
function finish_test {
function finish_test() {
{
"$rpc_py" bdev_lvol_delete_lvstore -l lvs0
kill -9 $rpc_proxy_pid
@ -17,7 +17,7 @@ function finish_test {
} || :
}
cat <<-JSON >"$testdir/conf.json"
cat <<- JSON > "$testdir/conf.json"
{"subsystems":[
$("$rootdir/scripts/gen_nvme.sh" --json)
]}

View File

@ -1,8 +1,7 @@
# Prints error message and return error code, closes vhost app and remove
# pmem pool file
# input: error message, error code
function error()
{
function error() {
local error_code=${2:-1}
echo "==========="
echo -e "ERROR: $1"
@ -16,8 +15,7 @@ function error()
# check if there is pool file & remove it
# input: path to pool file
# default: $default_pool_file
function pmem_clean_pool_file()
{
function pmem_clean_pool_file() {
local pool_file=${1:-$default_pool_file}
if [ -f $pool_file ]; then
@ -29,8 +27,7 @@ function pmem_clean_pool_file()
# create new pmem file
# input: path to pool file, size in MB, block_size
# default: $default_pool_file 32 512
function pmem_create_pool_file()
{
function pmem_create_pool_file() {
local pool_file=${1:-$default_pool_file}
local size=${2:-32}
local block_size=${3:-512}
@ -46,8 +43,7 @@ function pmem_create_pool_file()
fi
}
function pmem_unmount_ramspace
{
function pmem_unmount_ramspace() {
if [ -d "$testdir/ramspace" ]; then
if mount | grep -q "$testdir/ramspace"; then
umount $testdir/ramspace
@ -57,16 +53,14 @@ function pmem_unmount_ramspace
fi
}
function pmem_print_tc_name
{
function pmem_print_tc_name() {
echo ""
echo "==============================================================="
echo "Now running: $1"
echo "==============================================================="
}
function vhost_start()
{
function vhost_start() {
local vhost_pid
$rootdir/app/vhost/vhost &
@ -76,8 +70,7 @@ function vhost_start()
waitforlisten $vhost_pid
}
function vhost_kill()
{
function vhost_kill() {
local vhost_pid_file="$testdir/vhost.pid"
local vhost_pid
vhost_pid="$(cat $vhost_pid_file)"

View File

@ -19,9 +19,11 @@ default_pool_file="$testdir/pool_file"
obj_pool_file="$testdir/obj_pool_file"
bdev_name=pmem0
function usage()
{
[[ -n $2 ]] && ( echo "$2"; echo ""; )
function usage() {
[[ -n $2 ]] && (
echo "$2"
echo ""
)
echo "Shortcut script for automated RPC tests for PMEM"
echo
echo "Usage: $(basename $1) [OPTIONS]"
@ -42,18 +44,33 @@ while getopts 'xh-:' optchar; do
-)
case "$OPTARG" in
help) usage $0 ;;
info) test_info=true; test_all=false;;
create) test_create=true; test_all=false;;
delete) test_delete=true; test_all=false;;
construct_bdev) test_construct_bdev=true; test_all=false;;
delete_bdev) test_delete_bdev=true; test_all=false;;
all) test_all_get=true;;
info)
test_info=true
test_all=false
;;
create)
test_create=true
test_all=false
;;
delete)
test_delete=true
test_all=false
;;
construct_bdev)
test_construct_bdev=true
test_all=false
;;
delete_bdev)
test_delete_bdev=true
test_all=false
;;
all) test_all_get=true ;;
*) usage $0 "Invalid argument '$OPTARG'" ;;
esac
;;
h) usage $0 ;;
x) enable_script_debug=true ;;
*) usage $0 "Invalid argument '$OPTARG'"
*) usage $0 "Invalid argument '$OPTARG'" ;;
esac
done
@ -69,8 +86,7 @@ fi
#================================================
# bdev_pmem_get_pool_info tests
#================================================
function bdev_pmem_get_pool_info_tc1()
{
function bdev_pmem_get_pool_info_tc1() {
pmem_print_tc_name ${FUNCNAME[0]}
if $rpc_py bdev_pmem_get_pool_info; then
@ -80,8 +96,7 @@ function bdev_pmem_get_pool_info_tc1()
return 0
}
function bdev_pmem_get_pool_info_tc2()
{
function bdev_pmem_get_pool_info_tc2() {
pmem_print_tc_name ${FUNCNAME[0]}
if $rpc_py bdev_pmem_get_pool_info $rootdir/non/existing/path/non_existent_file; then
@ -91,8 +106,7 @@ function bdev_pmem_get_pool_info_tc2()
return 0
}
function bdev_pmem_get_pool_info_tc3()
{
function bdev_pmem_get_pool_info_tc3() {
pmem_print_tc_name ${FUNCNAME[0]}
pmem_clean_pool_file $obj_pool_file
@ -113,8 +127,7 @@ function bdev_pmem_get_pool_info_tc3()
return 0
}
function bdev_pmem_get_pool_info_tc4()
{
function bdev_pmem_get_pool_info_tc4() {
pmem_print_tc_name ${FUNCNAME[0]}
pmem_clean_pool_file
@ -130,8 +143,7 @@ function bdev_pmem_get_pool_info_tc4()
#================================================
# bdev_pmem_create_pool tests
#================================================
function bdev_pmem_create_pool_tc1()
{
function bdev_pmem_create_pool_tc1() {
pmem_print_tc_name ${FUNCNAME[0]}
pmem_clean_pool_file
@ -159,8 +171,7 @@ function bdev_pmem_create_pool_tc1()
return 0
}
function bdev_pmem_create_pool_tc2()
{
function bdev_pmem_create_pool_tc2() {
pmem_print_tc_name ${FUNCNAME[0]}
pmem_clean_pool_file
@ -176,8 +187,7 @@ function bdev_pmem_create_pool_tc2()
return 0
}
function bdev_pmem_create_pool_tc3()
{
function bdev_pmem_create_pool_tc3() {
pmem_print_tc_name ${FUNCNAME[0]}
pmem_clean_pool_file
@ -205,8 +215,7 @@ function bdev_pmem_create_pool_tc3()
return 0
}
function bdev_pmem_create_pool_tc4()
{
function bdev_pmem_create_pool_tc4() {
pmem_print_tc_name ${FUNCNAME[0]}
pmem_unmount_ramspace
@ -236,8 +245,7 @@ function bdev_pmem_create_pool_tc4()
return 0
}
function bdev_pmem_create_pool_tc5()
{
function bdev_pmem_create_pool_tc5() {
pmem_print_tc_name ${FUNCNAME[0]}
pmem_clean_pool_file
local pmem_block_size
@ -278,14 +286,12 @@ function bdev_pmem_create_pool_tc5()
return 0
}
function bdev_pmem_create_pool_tc6()
{
function bdev_pmem_create_pool_tc6() {
pmem_print_tc_name ${FUNCNAME[0]}
pmem_clean_pool_file
local created_pmem_block_size
for i in 511 512 1024 2048 4096 131072 262144
do
for i in 511 512 1024 2048 4096 131072 262144; do
if ! $rpc_py bdev_pmem_create_pool $default_pool_file 256 $i; then
error "Failed to create pmem pool!"
fi
@ -307,8 +313,7 @@ function bdev_pmem_create_pool_tc6()
return 0
}
function bdev_pmem_create_pool_tc7()
{
function bdev_pmem_create_pool_tc7() {
pmem_print_tc_name ${FUNCNAME[0]}
pmem_clean_pool_file
@ -324,8 +329,7 @@ function bdev_pmem_create_pool_tc7()
return 0
}
function bdev_pmem_create_pool_tc8()
{
function bdev_pmem_create_pool_tc8() {
pmem_print_tc_name "bdev_pmem_create_pool_tc8"
pmem_clean_pool_file
@ -341,8 +345,7 @@ function bdev_pmem_create_pool_tc8()
return 0
}
function bdev_pmem_create_pool_tc9()
{
function bdev_pmem_create_pool_tc9() {
pmem_print_tc_name ${FUNCNAME[0]}
pmem_clean_pool_file
@ -369,8 +372,7 @@ function bdev_pmem_create_pool_tc9()
#================================================
# bdev_pmem_delete_pool tests
#================================================
function bdev_pmem_delete_pool_tc1()
{
function bdev_pmem_delete_pool_tc1() {
pmem_print_tc_name ${FUNCNAME[0]}
pmem_clean_pool_file
@ -381,8 +383,7 @@ function bdev_pmem_delete_pool_tc1()
return 0
}
function bdev_pmem_delete_pool_tc2()
{
function bdev_pmem_delete_pool_tc2() {
pmem_print_tc_name "bdev_pmem_delete_pool_tc2"
pmem_clean_pool_file $obj_pool_file
@ -403,8 +404,7 @@ function bdev_pmem_delete_pool_tc2()
return 0
}
function bdev_pmem_delete_pool_tc3()
{
function bdev_pmem_delete_pool_tc3() {
pmem_print_tc_name ${FUNCNAME[0]}
pmem_clean_pool_file
@ -424,8 +424,7 @@ function bdev_pmem_delete_pool_tc3()
return 0
}
function bdev_pmem_delete_pool_tc4()
{
function bdev_pmem_delete_pool_tc4() {
pmem_print_tc_name ${FUNCNAME[0]}
bdev_pmem_delete_pool_tc3
@ -439,8 +438,7 @@ function bdev_pmem_delete_pool_tc4()
#================================================
# bdev_pmem_create tests
#================================================
function bdev_pmem_create_tc1()
{
function bdev_pmem_create_tc1() {
pmem_print_tc_name ${FUNCNAME[0]}
pmem_clean_pool_file
@ -453,8 +451,7 @@ function bdev_pmem_create_tc1()
return 0
}
function bdev_pmem_create_tc2()
{
function bdev_pmem_create_tc2() {
pmem_print_tc_name ${FUNCNAME[0]}
pmem_clean_pool_file
@ -471,8 +468,7 @@ function bdev_pmem_create_tc2()
return 0
}
function bdev_pmem_create_tc3()
{
function bdev_pmem_create_tc3() {
pmem_print_tc_name ${FUNCNAME[0]}
truncate -s 32M $rootdir/test/pmem/random_file
@ -488,8 +484,7 @@ function bdev_pmem_create_tc3()
return 0
}
function bdev_pmem_create_tc4()
{
function bdev_pmem_create_tc4() {
pmem_print_tc_name ${FUNCNAME[0]}
pmem_clean_pool_file $obj_pool_file
@ -510,8 +505,7 @@ function bdev_pmem_create_tc4()
return 0
}
function bdev_pmem_create_tc5()
{
function bdev_pmem_create_tc5() {
pmem_print_tc_name ${FUNCNAME[0]}
pmem_clean_pool_file
pmem_create_pool_file
@ -541,8 +535,7 @@ function bdev_pmem_create_tc5()
return 0
}
function bdev_pmem_create_tc6()
{
function bdev_pmem_create_tc6() {
pmem_print_tc_name ${FUNCNAME[0]}
local pmem_bdev_name
pmem_clean_pool_file
@ -579,8 +572,7 @@ function bdev_pmem_create_tc6()
#================================================
# bdev_pmem_delete tests
#================================================
function delete_bdev_tc1()
{
function delete_bdev_tc1() {
pmem_print_tc_name ${FUNCNAME[0]}
local pmem_bdev_name
local bdevs_names
@ -612,8 +604,7 @@ function delete_bdev_tc1()
return 0
}
function delete_bdev_tc2()
{
function delete_bdev_tc2() {
pmem_print_tc_name ${FUNCNAME[0]}
pmem_clean_pool_file
pmem_create_pool_file $default_pool_file 256 512

View File

@ -26,7 +26,7 @@ function waitfortcplisten() {
xtrace_disable
local ret=0
local i
for (( i = 40; i != 0; i-- )); do
for ((i = 40; i != 0; i--)); do
# if the process is no longer running, then exit the script
# since it means the application crashed
if ! kill -s 0 $1; then
@ -35,7 +35,7 @@ function waitfortcplisten() {
break
fi
if $rootdir/scripts/rpc.py -t 1 -s "$ipaddr" -p $port rpc_get_methods &>/dev/null; then
if $rootdir/scripts/rpc.py -t 1 -s "$ipaddr" -p $port rpc_get_methods &> /dev/null; then
break
fi
@ -43,7 +43,7 @@ function waitfortcplisten() {
done
xtrace_restore
if (( i == 0 )); then
if ((i == 0)); then
echo "ERROR: timeout while waiting for process (pid: $1) to start listening on '$ipaddr:$port'"
ret=1
fi

View File

@ -12,7 +12,7 @@ source "$rootdir/test/common/autotest_common.sh"
cd "$rootdir"
function unittest_bdev {
function unittest_bdev() {
$valgrind $testdir/lib/bdev/bdev.c/bdev_ut
$valgrind $testdir/lib/bdev/bdev_ocssd.c/bdev_ocssd_ut
$valgrind $testdir/lib/bdev/raid/bdev_raid.c/bdev_raid_ut
@ -25,7 +25,7 @@ function unittest_bdev {
$valgrind $testdir/lib/bdev/mt/bdev.c/bdev_ut
}
function unittest_blob {
function unittest_blob() {
$valgrind $testdir/lib/blob/blob.c/blob_ut
$valgrind $testdir/lib/blobfs/tree.c/tree_ut
$valgrind $testdir/lib/blobfs/blobfs_async_ut/blobfs_async_ut
@ -34,13 +34,13 @@ function unittest_blob {
$valgrind $testdir/lib/blobfs/blobfs_bdev.c/blobfs_bdev_ut
}
function unittest_event {
function unittest_event() {
$valgrind $testdir/lib/event/subsystem.c/subsystem_ut
$valgrind $testdir/lib/event/app.c/app_ut
$valgrind $testdir/lib/event/reactor.c/reactor_ut
}
function unittest_ftl {
function unittest_ftl() {
$valgrind $testdir/lib/ftl/ftl_ppa/ftl_ppa_ut
$valgrind $testdir/lib/ftl/ftl_band.c/ftl_band_ut
$valgrind $testdir/lib/ftl/ftl_reloc.c/ftl_reloc_ut
@ -49,7 +49,7 @@ function unittest_ftl {
$valgrind $testdir/lib/ftl/ftl_io.c/ftl_io_ut
}
function unittest_iscsi {
function unittest_iscsi() {
$valgrind $testdir/lib/iscsi/conn.c/conn_ut
$valgrind $testdir/lib/iscsi/param.c/param_ut
$valgrind $testdir/lib/iscsi/tgt_node.c/tgt_node_ut $testdir/lib/iscsi/tgt_node.c/tgt_node.conf
@ -58,14 +58,14 @@ function unittest_iscsi {
$valgrind $testdir/lib/iscsi/portal_grp.c/portal_grp_ut $testdir/lib/iscsi/portal_grp.c/portal_grp.conf
}
function unittest_json {
function unittest_json() {
$valgrind $testdir/lib/json/json_parse.c/json_parse_ut
$valgrind $testdir/lib/json/json_util.c/json_util_ut
$valgrind $testdir/lib/json/json_write.c/json_write_ut
$valgrind $testdir/lib/jsonrpc/jsonrpc_server.c/jsonrpc_server_ut
}
function unittest_nvme {
function unittest_nvme() {
$valgrind $testdir/lib/nvme/nvme.c/nvme_ut
$valgrind $testdir/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut
$valgrind $testdir/lib/nvme/nvme_ctrlr_cmd.c/nvme_ctrlr_cmd_ut
@ -81,7 +81,7 @@ function unittest_nvme {
$valgrind $testdir/lib/nvme/nvme_uevent.c/nvme_uevent_ut
}
function unittest_nvmf {
function unittest_nvmf() {
$valgrind $testdir/lib/nvmf/ctrlr.c/ctrlr_ut
$valgrind $testdir/lib/nvmf/ctrlr_bdev.c/ctrlr_bdev_ut
$valgrind $testdir/lib/nvmf/ctrlr_discovery.c/ctrlr_discovery_ut
@ -89,7 +89,7 @@ function unittest_nvmf {
$valgrind $testdir/lib/nvmf/tcp.c/tcp_ut
}
function unittest_scsi {
function unittest_scsi() {
$valgrind $testdir/lib/scsi/dev.c/dev_ut
$valgrind $testdir/lib/scsi/lun.c/lun_ut
$valgrind $testdir/lib/scsi/scsi.c/scsi_ut
@ -97,12 +97,12 @@ function unittest_scsi {
$valgrind $testdir/lib/scsi/scsi_pr.c/scsi_pr_ut
}
function unittest_sock {
function unittest_sock() {
$valgrind $testdir/lib/sock/sock.c/sock_ut
$valgrind $testdir/lib/sock/posix.c/posix_ut
}
function unittest_util {
function unittest_util() {
$valgrind $testdir/lib/util/base64.c/base64_ut
$valgrind $testdir/lib/util/bit_array.c/bit_array_ut
$valgrind $testdir/lib/util/cpuset.c/cpuset_ut

View File

@ -25,8 +25,7 @@ mkdir -p $TARGET_DIR
#
source $rootdir/test/vhost/common/autotest.config
function vhosttestinit()
{
function vhosttestinit() {
if [ "$TEST_MODE" == "iso" ]; then
$rootdir/scripts/setup.sh
@ -48,15 +47,13 @@ function vhosttestinit()
fi
}
function vhosttestfini()
{
function vhosttestfini() {
if [ "$TEST_MODE" == "iso" ]; then
$rootdir/scripts/setup.sh reset
fi
}
function message()
{
function message() {
local verbose_out
if ! $SPDK_VHOST_VERBOSE; then
verbose_out=""
@ -71,16 +68,14 @@ function message()
echo -e "${msg_type}${verbose_out}: $*"
}
function fail()
{
function fail() {
echo "===========" >&2
message "FAIL" "$@" >&2
echo "===========" >&2
exit 1
}
function error()
{
function error() {
echo "===========" >&2
message "ERROR" "$@" >&2
echo "===========" >&2
@ -88,18 +83,15 @@ function error()
false
}
function warning()
{
function warning() {
message "WARN" "$@" >&2
}
function notice()
{
function notice() {
message "INFO" "$@"
}
function get_vhost_dir()
{
function get_vhost_dir() {
local vhost_name="$1"
if [[ -z "$vhost_name" ]]; then
@ -110,8 +102,7 @@ function get_vhost_dir()
echo "$TARGET_DIR/${vhost_name}"
}
function vhost_run()
{
function vhost_run() {
local vhost_name="$1"
local run_gen_nvme=true
@ -150,7 +141,8 @@ function vhost_run()
notice "Command: $cmd"
timing_enter vhost_start
cd $vhost_dir; $cmd &
cd $vhost_dir
$cmd &
vhost_pid=$!
echo $vhost_pid > $vhost_pid_file
@ -158,16 +150,14 @@ function vhost_run()
waitforlisten "$vhost_pid" "$vhost_dir/rpc.sock"
#do not generate nvmes if pci access is disabled
if [[ "$cmd" != *"--no-pci"* ]] && [[ "$cmd" != *"-u"* ]] && $run_gen_nvme; then
$rootdir/scripts/gen_nvme.sh "--json" | $rootdir/scripts/rpc.py\
-s $vhost_dir/rpc.sock load_subsystem_config
$rootdir/scripts/gen_nvme.sh "--json" | $rootdir/scripts/rpc.py -s $vhost_dir/rpc.sock load_subsystem_config
fi
notice "vhost started - pid=$vhost_pid"
timing_exit vhost_start
}
function vhost_kill()
{
function vhost_kill() {
local rc=0
local vhost_name="$1"
@ -192,7 +182,7 @@ function vhost_kill()
if kill -INT $vhost_pid > /dev/null; then
notice "sent SIGINT to vhost app - waiting 60 seconds to exit"
for ((i=0; i<60; i++)); do
for ((i = 0; i < 60; i++)); do
if kill -0 $vhost_pid; then
echo "."
sleep 1
@ -227,8 +217,7 @@ function vhost_kill()
return $rc
}
function vhost_rpc
{
function vhost_rpc() {
local vhost_name="$1"
if [[ -z "$vhost_name" ]]; then
@ -244,20 +233,18 @@ function vhost_rpc
# Mgmt functions
###
function assert_number()
{
function assert_number() {
[[ "$1" =~ [0-9]+ ]] && return 0
error "Invalid or missing paramter: need number but got '$1'"
return 1;
return 1
}
# Run command on vm with given password
# First argument - vm number
# Second argument - ssh password for vm
#
function vm_sshpass()
{
function vm_sshpass() {
vm_num_is_valid $1 || return 1
local ssh_cmd
@ -271,32 +258,27 @@ function vm_sshpass()
$ssh_cmd "$@"
}
# Helper to validate VM number
# param $1 VM number
#
function vm_num_is_valid()
{
function vm_num_is_valid() {
[[ "$1" =~ ^[0-9]+$ ]] && return 0
error "Invalid or missing paramter: vm number '$1'"
return 1;
return 1
}
# Print network socket for given VM number
# param $1 virtual machine number
#
function vm_ssh_socket()
{
function vm_ssh_socket() {
vm_num_is_valid $1 || return 1
local vm_dir="$VM_DIR/$1"
cat $vm_dir/ssh_socket
}
function vm_fio_socket()
{
function vm_fio_socket() {
vm_num_is_valid $1 || return 1
local vm_dir="$VM_DIR/$1"
@ -306,8 +288,7 @@ function vm_fio_socket()
# Execute command on given VM
# param $1 virtual machine number
#
function vm_exec()
{
function vm_exec() {
vm_num_is_valid $1 || return 1
local vm_num="$1"
@ -324,8 +305,7 @@ function vm_exec()
# Execute scp command on given VM
# param $1 virtual machine number
#
function vm_scp()
{
function vm_scp() {
vm_num_is_valid $1 || return 1
local vm_num="$1"
@ -339,11 +319,9 @@ function vm_scp()
"$@"
}
# check if specified VM is running
# param $1 VM num
function vm_is_running()
{
function vm_is_running() {
vm_num_is_valid $1 || return 1
local vm_dir="$VM_DIR/$1"
@ -370,8 +348,7 @@ function vm_is_running()
# check if specified VM is running
# param $1 VM num
function vm_os_booted()
{
function vm_os_booted() {
vm_num_is_valid $1 || return 1
local vm_dir="$VM_DIR/$1"
@ -380,21 +357,19 @@ function vm_os_booted()
return 1
fi
if ! VM_SSH_OPTIONS="-o ControlMaster=no" vm_exec $1 "true" 2>/dev/null; then
if ! VM_SSH_OPTIONS="-o ControlMaster=no" vm_exec $1 "true" 2> /dev/null; then
# Shutdown existing master. Ignore errors as it might not exist.
VM_SSH_OPTIONS="-O exit" vm_exec $1 "true" 2>/dev/null
VM_SSH_OPTIONS="-O exit" vm_exec $1 "true" 2> /dev/null
return 1
fi
return 0
}
# Shutdown given VM
# param $1 virtual machine number
# return non-zero in case of error.
function vm_shutdown()
{
function vm_shutdown() {
vm_num_is_valid $1 || return 1
local vm_dir="$VM_DIR/$1"
if [[ ! -d "$vm_dir" ]]; then
@ -419,8 +394,7 @@ function vm_shutdown()
# Kill given VM
# param $1 virtual machine number
#
function vm_kill()
{
function vm_kill() {
vm_num_is_valid $1 || return 1
local vm_dir="$VM_DIR/$1"
@ -445,10 +419,12 @@ function vm_kill()
# List all VM numbers in VM_DIR
#
function vm_list_all()
{
function vm_list_all() {
local vms
vms="$(shopt -s nullglob; echo $VM_DIR/[0-9]*)"
vms="$(
shopt -s nullglob
echo $VM_DIR/[0-9]*
)"
if [[ -n "$vms" ]]; then
basename --multiple $vms
fi
@ -456,8 +432,7 @@ function vm_list_all()
# Kills all VM in $VM_DIR
#
function vm_kill_all()
{
function vm_kill_all() {
local vm
for vm in $(vm_list_all); do
vm_kill $vm
@ -468,8 +443,7 @@ function vm_kill_all()
# Shutdown all VM in $VM_DIR
#
function vm_shutdown_all()
{
function vm_shutdown_all() {
# XXX: temporarily disable to debug shutdown issue
# xtrace_disable
@ -498,7 +472,7 @@ function vm_shutdown_all()
return 0
fi
((timeo-=1))
((timeo -= 1))
sleep 1
done
@ -507,8 +481,7 @@ function vm_shutdown_all()
xtrace_restore
}
function vm_setup()
{
function vm_setup() {
xtrace_disable
local OPTIND optchar vm_num
@ -536,7 +509,7 @@ function vm_setup()
qemu-args=*) qemu_args+=("${OPTARG#*=}") ;;
disk-type=*) disk_type_g="${OPTARG#*=}" ;;
read-only=*) read_only="${OPTARG#*=}" ;;
disks=*) IFS=":" read -ra disks <<<"${OPTARG#*=}" ;;
disks=*) IFS=":" read -ra disks <<< "${OPTARG#*=}" ;;
raw-cache=*) raw_cache=",cache${OPTARG#*=}" ;;
force=*) force_vm=${OPTARG#*=} ;;
memory=*) guest_memory=${OPTARG#*=} ;;
@ -548,6 +521,7 @@ function vm_setup()
*)
error "unknown argument $OPTARG"
return 1
;;
esac
;;
*)
@ -568,7 +542,7 @@ function vm_setup()
local vm_dir=""
set +x
for (( i=0; i<=256; i++)); do
for ((i = 0; i <= 256; i++)); do
local vm_dir="$VM_DIR/$i"
[[ ! -d $vm_dir ]] && break
done
@ -633,14 +607,14 @@ function vm_setup()
notice "TASK MASK: $task_mask"
local cmd=(taskset -a -c "$task_mask" "$QEMU_BIN")
local vm_socket_offset=$(( 10000 + 100 * vm_num ))
local vm_socket_offset=$((10000 + 100 * vm_num))
local ssh_socket=$(( vm_socket_offset + 0 ))
local fio_socket=$(( vm_socket_offset + 1 ))
local monitor_port=$(( vm_socket_offset + 2 ))
local migration_port=$(( vm_socket_offset + 3 ))
local gdbserver_socket=$(( vm_socket_offset + 4 ))
local vnc_socket=$(( 100 + vm_num ))
local ssh_socket=$((vm_socket_offset + 0))
local fio_socket=$((vm_socket_offset + 1))
local monitor_port=$((vm_socket_offset + 2))
local migration_port=$((vm_socket_offset + 3))
local gdbserver_socket=$((vm_socket_offset + 4))
local vnc_socket=$((100 + vm_num))
local qemu_pid_file="$vm_dir/qemu.pid"
local cpu_num=0
@ -652,13 +626,13 @@ function vm_setup()
for c in $cpu_list; do
# if range is detected - count how many cpus
if [[ $c =~ [0-9]+-[0-9]+ ]]; then
val=$((c-1))
val=$((c - 1))
val=${val#-}
else
val=1
fi
cpu_num=$((cpu_num+val))
queue_number=$((queue_number+val))
cpu_num=$((cpu_num + val))
queue_number=$((queue_number + val))
done
if [ -z $queue_number ]; then
@ -687,9 +661,9 @@ function vm_setup()
cmd+=(-device "ide-hd,drive=os_disk,bootindex=0")
fi
if (( ${#disks[@]} == 0 )) && [[ $disk_type_g == virtio* ]]; then
if ((${#disks[@]} == 0)) && [[ $disk_type_g == virtio* ]]; then
disks=("default_virtio.img")
elif (( ${#disks[@]} == 0 )); then
elif ((${#disks[@]} == 0)); then
error "No disks defined, aborting"
return 1
fi
@ -697,7 +671,7 @@ function vm_setup()
for disk in "${disks[@]}"; do
# Each disk can define its type in a form of a disk_name,type. The remaining parts
# of the string are dropped.
IFS="," read -r disk disk_type _ <<<"$disk"
IFS="," read -r disk disk_type _ <<< "$disk"
[[ -z $disk_type ]] && disk_type=$disk_type_g
case $disk_type in
@ -712,8 +686,8 @@ function vm_setup()
fi
# Create disk file if it not exist or it is smaller than 1G
if { [[ -f $raw_disk ]] && [[ $(stat --printf="%s" $raw_disk) -lt $((1024 * 1024 * 1024)) ]]; } || \
[[ ! -e $raw_disk ]]; then
if { [[ -f $raw_disk ]] && [[ $(stat --printf="%s" $raw_disk) -lt $((1024 * 1024 * 1024)) ]]; } \
|| [[ ! -e $raw_disk ]]; then
if [[ $raw_disk =~ /dev/.* ]]; then
error \
"ERROR: Virtio disk point to missing device ($raw_disk) -\n" \
@ -772,9 +746,9 @@ function vm_setup()
return 1
fi
(( ${#qemu_args[@]} )) && cmd+=("${qemu_args[@]}")
((${#qemu_args[@]})) && cmd+=("${qemu_args[@]}")
notice "Saving to $vm_dir/run.sh"
cat <<-RUN >"$vm_dir/run.sh"
cat <<- RUN > "$vm_dir/run.sh"
#!/bin/bash
qemu_log () {
echo "=== qemu.log ==="
@ -819,8 +793,7 @@ function vm_setup()
[[ -z $vm_migrate_to ]] || ln -fs $VM_DIR/$vm_migrate_to $vm_dir/vm_migrate_to
}
function vm_run()
{
function vm_run() {
local OPTIND optchar vm
local run_all=false
local vms_to_run=""
@ -838,7 +811,7 @@ function vm_run()
if $run_all; then
vms_to_run="$(vm_list_all)"
else
shift $((OPTIND-1))
shift $((OPTIND - 1))
for vm in "$@"; do
vm_num_is_valid $1 || return 1
if [[ ! -x $VM_DIR/$vm/run.sh ]]; then
@ -863,8 +836,7 @@ function vm_run()
done
}
function vm_print_logs()
{
function vm_print_logs() {
vm_num=$1
warning "================"
warning "QEMU LOG:"
@ -892,8 +864,7 @@ function vm_print_logs()
# Wait for all created VMs to boot.
# param $1 max wait time
function vm_wait_for_boot()
{
function vm_wait_for_boot() {
assert_number $1
xtrace_disable
@ -934,7 +905,7 @@ function vm_wait_for_boot()
xtrace_restore
return 1
fi
if (( i > 30 )); then
if ((i > 30)); then
local i=0
echo
fi
@ -957,8 +928,7 @@ function vm_wait_for_boot()
return 0
}
function vm_start_fio_server()
{
function vm_start_fio_server() {
local OPTIND optchar
local readonly=''
while getopts ':-:' optchar; do
@ -967,14 +937,14 @@ function vm_start_fio_server()
case "$OPTARG" in
fio-bin=*) local fio_bin="${OPTARG#*=}" ;;
readonly) local readonly="--readonly" ;;
*) error "Invalid argument '$OPTARG'" && return 1;;
*) error "Invalid argument '$OPTARG'" && return 1 ;;
esac
;;
*) error "Invalid argument '$OPTARG'" && return 1;;
*) error "Invalid argument '$OPTARG'" && return 1 ;;
esac
done
shift $(( OPTIND - 1 ))
shift $((OPTIND - 1))
for vm_num in "$@"; do
notice "Starting fio server on VM$vm_num"
if [[ $fio_bin != "" ]]; then
@ -986,8 +956,7 @@ function vm_start_fio_server()
done
}
function vm_check_scsi_location()
{
function vm_check_scsi_location() {
# Script to find wanted disc
local script='shopt -s nullglob;
for entry in /sys/block/sd*; do
@ -1009,13 +978,12 @@ function vm_check_scsi_location()
# Note: to use this function your VM should be run with
# appropriate memory and with SPDK source already cloned
# and compiled in /root/spdk.
function vm_check_virtio_location()
{
function vm_check_virtio_location() {
vm_exec $1 NRHUGE=512 /root/spdk/scripts/setup.sh
vm_exec $1 "cat > /root/bdev.conf" <<- EOF
[VirtioPci]
Enable Yes
EOF
EOF
vm_exec $1 "cat /root/bdev.conf"
@ -1025,7 +993,7 @@ EOF
source /root/spdk/test/common/autotest_common.sh
discover_bdevs /root/spdk /root/bdev.conf | jq -r '[.[].name] | join(" ")' > /root/fio_bdev_filenames
exit 0
EOF
EOF
SCSI_DISK=$(vm_exec $1 cat /root/fio_bdev_filenames)
if [[ -z "$SCSI_DISK" ]]; then
@ -1037,16 +1005,14 @@ EOF
# Script to perform scsi device reset on all disks in VM
# param $1 VM num
# param $2..$n Disks to perform reset on
function vm_reset_scsi_devices()
{
function vm_reset_scsi_devices() {
for disk in "${@:2}"; do
notice "VM$1 Performing device reset on disk $disk"
vm_exec $1 sg_reset /dev/$disk -vNd
done
}
function vm_check_blk_location()
{
function vm_check_blk_location() {
local script='shopt -s nullglob; cd /sys/block; echo vd*'
SCSI_DISK="$(echo "$script" | vm_exec $1 bash -s)"
@ -1056,8 +1022,7 @@ function vm_check_blk_location()
fi
}
function run_fio()
{
function run_fio() {
local arg
local job_file=""
local fio_bin=""
@ -1074,7 +1039,7 @@ function run_fio()
case "$arg" in
--job-file=*) local job_file="${arg#*=}" ;;
--fio-bin=*) local fio_bin="${arg#*=}" ;;
--vm=*) vms+=( "${arg#*=}" ) ;;
--vm=*) vms+=("${arg#*=}") ;;
--out=*)
local out="${arg#*=}"
mkdir -p $out
@ -1083,7 +1048,8 @@ function run_fio()
--plugin)
notice "Using plugin mode. Disabling server mode."
run_plugin_mode=true
run_server_mode=false ;;
run_server_mode=false
;;
--json) fio_output_format="json" ;;
--hide-results) hide_results=true ;;
--no-wait-for-fio) wait_for_fio=false ;;
@ -1162,8 +1128,7 @@ function run_fio()
# Shutdown or kill any running VM and SPDK APP.
#
function at_app_exit()
{
function at_app_exit() {
local vhost_name
notice "APP EXITING"
@ -1179,8 +1144,7 @@ function at_app_exit()
notice "EXIT DONE"
}
function error_exit()
{
function error_exit() {
trap - ERR
print_backtrace
set +e

View File

@ -17,9 +17,11 @@ used_vms=""
x=""
readonly=""
function usage()
{
[[ -n $2 ]] && ( echo "$2"; echo ""; )
function usage() {
[[ -n $2 ]] && (
echo "$2"
echo ""
)
echo "Shortcut script for doing automated test"
echo "Usage: $(basename $1) [OPTIONS]"
echo
@ -62,12 +64,14 @@ while getopts 'xh-:' optchar; do
esac
;;
h) usage $0 ;;
x) set -x
x="-x" ;;
*) usage $0 "Invalid argument '$OPTARG'"
x)
set -x
x="-x"
;;
*) usage $0 "Invalid argument '$OPTARG'" ;;
esac
done
shift $(( OPTIND - 1 ))
shift $((OPTIND - 1))
if [[ ! -r "$fio_job" ]]; then
fail "no fio job file specified"
@ -153,7 +157,7 @@ for vm_conf in "${vms[@]}"; do
fi
done
done <<< "${conf[2]}"
unset IFS;
unset IFS
$rpc_py vhost_get_controllers
fi
@ -188,7 +192,7 @@ if [[ $test_type == "spdk_vhost_scsi" ]]; then
$rpc_py vhost_scsi_controller_add_target naa.$disk.${conf[0]} 0 $based_disk
done
done <<< "${conf[2]}"
unset IFS;
unset IFS
done
fi

View File

@ -13,13 +13,13 @@ vhost_rpc_py="$rootdir/scripts/rpc.py"
fuzz_generic_rpc_py="$rootdir/scripts/rpc.py -s $FUZZ_RPC_SOCK"
fuzz_specific_rpc_py="$rootdir/test/app/fuzz/common/fuzz_rpc.py -s $FUZZ_RPC_SOCK"
"${VHOST_APP[@]}" >"$output_dir/vhost_fuzz_tgt_output.txt" 2>&1 &
"${VHOST_APP[@]}" > "$output_dir/vhost_fuzz_tgt_output.txt" 2>&1 &
vhostpid=$!
waitforlisten $vhostpid
trap 'killprocess $vhostpid; exit 1' SIGINT SIGTERM exit
"${VHOST_FUZZ_APP[@]}" -t 10 2>"$output_dir/vhost_fuzz_output1.txt" &
"${VHOST_FUZZ_APP[@]}" -t 10 2> "$output_dir/vhost_fuzz_output1.txt" &
fuzzpid=$!
waitforlisten $fuzzpid $FUZZ_RPC_SOCK
@ -47,7 +47,7 @@ $fuzz_generic_rpc_py framework_start_init
wait $fuzzpid
"${VHOST_FUZZ_APP[@]}" -j "$rootdir/test/app/fuzz/vhost_fuzz/example.json" 2>"$output_dir/vhost_fuzz_output2.txt" &
"${VHOST_FUZZ_APP[@]}" -j "$rootdir/test/app/fuzz/vhost_fuzz/example.json" 2> "$output_dir/vhost_fuzz_output2.txt" &
fuzzpid=$!
waitforlisten $fuzzpid $FUZZ_RPC_SOCK

View File

@ -17,9 +17,11 @@ scsi_hot_remove_test=0
blk_hot_remove_test=0
readonly=""
function usage() {
[[ -n $2 ]] && ( echo "$2"; echo ""; )
[[ -n $2 ]] && (
echo "$2"
echo ""
)
echo "Shortcut script for doing automated hotattach/hotdetach test"
echo "Usage: $(basename $1) [OPTIONS]"
echo
@ -57,12 +59,14 @@ while getopts 'xh-:' optchar; do
esac
;;
h) usage $0 ;;
x) set -x
x="-x" ;;
*) usage $0 "Invalid argument '$OPTARG'"
x)
set -x
x="-x"
;;
*) usage $0 "Invalid argument '$OPTARG'" ;;
esac
done
shift $(( OPTIND - 1 ))
shift $((OPTIND - 1))
fio_job=$testdir/fio_jobs/default_integrity.job
tmp_attach_job=$testdir/fio_jobs/fio_attach.job.tmp
@ -176,7 +180,6 @@ function wait_for_finish() {
wait $wait_for_pid
}
function reboot_all_and_prepare() {
vms_reboot_all "$1"
vms_prepare "$1"
@ -205,13 +208,13 @@ function check_disks() {
function get_traddr() {
local nvme_name=$1
local nvme
nvme="$( $rootdir/scripts/gen_nvme.sh )"
nvme="$($rootdir/scripts/gen_nvme.sh)"
while read -r line; do
if [[ $line == *"TransportID"* ]] && [[ $line == *$nvme_name* ]]; then
local word_array=($line)
for word in "${word_array[@]}"; do
if [[ $word == *"traddr"* ]]; then
traddr=$( echo $word | sed 's/traddr://' | sed 's/"//' )
traddr=$(echo $word | sed 's/traddr://' | sed 's/"//')
fi
done
fi

View File

@ -8,7 +8,7 @@ source $rootdir/test/vhost/hotplug/common.sh
function get_first_disk() {
vm_check_scsi_location $1
disk_array=( $SCSI_DISK )
disk_array=($SCSI_DISK)
eval "$2=${disk_array[0]}"
}
@ -74,7 +74,6 @@ function prepare_fio_cmd_tc2_iter2() {
done
}
function prepare_fio_cmd_tc3_iter1() {
print_test_fio_header
@ -91,13 +90,13 @@ function prepare_fio_cmd_tc3_iter1() {
for disk in $SCSI_DISK; do
if [ $vm_num == 2 ]; then
if [ $j == 1 ]; then
(( j++ ))
((j++))
continue
fi
fi
echo "[nvme-host$disk]" >> $tmp_detach_job
echo "filename=/dev/$disk" >> $tmp_detach_job
(( j++ ))
((j++))
done
vm_scp "$vm_num" $tmp_detach_job 127.0.0.1:/root/$vm_job_name
run_fio+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/$vm_job_name "

View File

@ -14,8 +14,7 @@ function run_spdk_fio() {
fio_bdev --ioengine=spdk_bdev "$@" --spdk_mem=1024 --spdk_single_seg=1
}
function create_bdev_config()
{
function create_bdev_config() {
if [ -z "$($RPC_PY bdev_get_bdevs | jq '.[] | select(.name=="Nvme0n1")')" ]; then
error "Nvme0n1 bdev not found!"
fi
@ -43,7 +42,7 @@ function create_bdev_config()
function err_cleanup() {
rm -f $testdir/bdev.json
vhost_kill 0
if [[ -n "$dummy_spdk_pid" ]] && kill -0 $dummy_spdk_pid &>/dev/null; then
if [[ -n "$dummy_spdk_pid" ]] && kill -0 $dummy_spdk_pid &> /dev/null; then
killprocess $dummy_spdk_pid
fi
vhosttestfini
@ -70,7 +69,7 @@ rpc_cmd -s /tmp/spdk2.sock bdev_virtio_attach_controller --trtype user --traddr
rpc_cmd -s /tmp/spdk2.sock bdev_virtio_attach_controller --trtype user --traddr 'naa.Malloc0.0' -d scsi --vq-count 8 'VirtioScsi1'
rpc_cmd -s /tmp/spdk2.sock bdev_virtio_attach_controller --trtype user --traddr 'naa.Malloc1.0' -d scsi --vq-count 8 'VirtioScsi2'
cat <<-CONF > $testdir/bdev.json
cat <<- CONF > $testdir/bdev.json
{"subsystems":[
$(rpc_cmd -s /tmp/spdk2.sock save_subsystem_config -n bdev)
]}

View File

@ -8,9 +8,11 @@ source $rootdir/test/vhost/common.sh
ctrl_type="spdk_vhost_scsi"
vm_fs="ext4"
function usage()
{
[[ -n $2 ]] && ( echo "$2"; echo ""; )
function usage() {
[[ -n $2 ]] && (
echo "$2"
echo ""
)
echo "Shortcut script for doing automated test"
echo "Usage: $(basename $1) [OPTIONS]"
echo
@ -25,8 +27,7 @@ function usage()
exit 0
}
function clean_lvol_cfg()
{
function clean_lvol_cfg() {
notice "Removing lvol bdev and lvol store"
$rpc_py bdev_lvol_delete lvol_store/lvol_bdev
$rpc_py bdev_lvol_delete_lvstore -l lvol_store
@ -43,9 +44,11 @@ while getopts 'xh-:' optchar; do
esac
;;
h) usage $0 ;;
x) set -x
x="-x" ;;
*) usage $0 "Invalid argument '$OPTARG'"
x)
set -x
x="-x"
;;
*) usage $0 "Invalid argument '$OPTARG'" ;;
esac
done

View File

@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -xe
MAKE="make -j$(( $(nproc) * 2 ))"
MAKE="make -j$(($(nproc) * 2))"
if [[ $1 == "spdk_vhost_scsi" ]]; then
devs=""
@ -11,7 +11,10 @@ if [[ $1 == "spdk_vhost_scsi" ]]; then
fi
done
elif [[ $1 == "spdk_vhost_blk" ]]; then
devs=$(cd /sys/block; echo vd*)
devs=$(
cd /sys/block
echo vd*
)
fi
fs=$2
@ -27,7 +30,7 @@ for fs in $fs; do
$parted_cmd mklabel gpt
while ! ($parted_cmd print | grep -q gpt); do
[[ $i -lt 100 ]] || break
i=$((i+1))
i=$((i + 1))
sleep 0.1
done
$parted_cmd mkpart primary 2048s 100%
@ -41,7 +44,7 @@ for fs in $fs; do
i=0
until wipefs -a /dev/${dev}1; do
[[ $i -lt 100 ]] || break
i=$((i+1))
i=$((i + 1))
echo "Waiting for /dev/${dev}1"
sleep 0.1
done
@ -64,7 +67,7 @@ for fs in $fs; do
rm -rf /mnt/${dev}dir
parted -s /dev/${dev} rm 1
stats=( $(cat /sys/block/$dev/stat) )
stats=($(cat /sys/block/$dev/stat))
echo ""
echo "$dev stats"
printf "READ IO cnt: % 8u merges: % 8u sectors: % 8u ticks: % 8u\n" \

Some files were not shown because too many files have changed in this diff Show More