nvmf: add automated fio and mkfs tests

Change-Id: I499ae29155b8f053babf18750cecb5c8d210d59c
Signed-off-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
Daniel Verkamp 2016-06-07 08:49:44 -07:00
parent 0f912a0eaf
commit 91330eb5c8
6 changed files with 477 additions and 0 deletions

View File

@ -54,6 +54,13 @@ time test/lib/log/log.sh
timing_exit lib
timing_enter nvmf
time test/nvmf/fio/fio.sh
time test/nvmf/filesystem/filesystem.sh
timing_exit nvmf
timing_enter cleanup
./scripts/setup.sh reset
./scripts/build_kmod.sh clean

View File

@ -102,3 +102,13 @@ function process_core() {
return $ret
}
function killprocess() {
# $1 = process pid
if [ -z "$1" ]; then
exit 1
fi
echo "killing process with pid $1"
kill $1
wait $1 || true
}

View File

@ -0,0 +1,166 @@
#!/usr/bin/env bash
testdir=$(readlink -f $(dirname $0))
rootdir=$testdir/../../..
source $rootdir/scripts/autotest_common.sh
NVMF_PORT=7174
NVMF_IP_PREFIX="192.168.100."
NVMF_IP_LEAST_ADDR=8
NVMF_FIRST_TARGET_IP=$NVMF_IP_PREFIX$NVMF_IP_LEAST_ADDR
nvmf_nic_bdfs=""
function load_ib_rdma_modules()
{
if [ `uname` != Linux ]; then
exit 0
fi
modprobe ib_addr ib_mad ib_sa || true # part of core since 4.7
modprobe ib_cm
modprobe ib_core
modprobe ib_ucm
modprobe ib_umad
modprobe ib_uverbs
modprobe iw_cm
modprobe rdma_cm
modprobe rdma_ucm
}
function detect_mellanox_nics()
{
nvmf_nic_bdfs=`lspci | grep Ethernet | grep Mellanox | awk -F ' ' '{print "0000:"$1}'`
mlx_core_driver="mlx4_core"
mlx_ib_driver="mlx4_ib"
mlx_en_driver="mlx4_en"
if [ -z "$nvmf_nic_bdfs" ]; then
exit 0
fi
# for nvmf target loopback test, suppose we only have one type of card.
for nvmf_nic_bdf in $nvmf_nic_bdfs
do
result=`find /sys -name $nvmf_nic_bdf | grep driver | awk -F / '{ print $6 }'`
if [ "$result" == "mlx5_core" ]; then
mlx_core_driver="mlx5_core"
mlx_ib_driver="mlx5_ib"
mlx_en_driver=""
fi
break;
done
# Uninstall/install driver to make a clean test environment
if lsmod | grep -q $mlx_ib_driver; then
rmmod $mlx_ib_driver
fi
if [ -n "$mlx_en_driver" ]; then
if lsmod | grep -q $mlx_en_driver; then
rmmod $mlx_en_driver
fi
fi
if lsmod | grep -q $mlx_core_driver; then
rmmod $mlx_core_driver
fi
modprobe $mlx_core_driver
modprobe $mlx_ib_driver
if [ -n "$mlx_en_driver" ]; then
modprobe $mlx_en_driver
fi
trap - SIGINT SIGTERM EXIT
}
function detect_rdma_nics()
{
detect_mellanox_nics
}
function allocate_nic_ips()
{
LEAST_ADDR=$NVMF_IP_LEAST_ADDR
for bdf in $1; do
dir=`find /sys -name $bdf | grep "/sys/devices"`
if [ -e $dir ]; then
if [ -e $dir"/net" ]; then
nic_name=`ls $dir"/net"`
echo $nic_name
ifconfig $nic_name $NVMF_IP_PREFIX$LEAST_ADDR netmask 255.255.255.0 up
LEAST_ADDR=$[$LEAST_ADDR + 1]
fi
fi
done
# check whether the IP is configured
result=`ifconfig | grep $NVMF_IP_PREFIX`
if [ -z "$result" ]; then
echo "no NIC for nvmf test"
exit 0
fi
}
function nvmfcleanup() {
rmmod nvme-rdma
}
function filesystem_test()
{
mkdir -p /mnt/device
devs=`lsblk -l -o NAME | grep nvme`
for dev in $devs; do
parted -s /dev/$dev mklabel msdos
parted -s /dev/$dev mkpart primary '0%' '100%'
sleep 1
for fstype in "ext4" "btrfs" "xfs"; do
if [ "$fstype" == "ext4" ]; then
mkfs.${fstype} -F /dev/${dev}
else
mkfs.${fstype} -f /dev/${dev}
fi
mount /dev/${dev} /mnt/device
touch /mnt/device/aaa
rm -rf /mnt/device/aaa
umount /mnt/device
done
done
}
load_ib_rdma_modules
detect_rdma_nics
allocate_nic_ips $nvmf_nic_bdfs
timing_enter fs_test
# Start up the NVMf target in another process
$rootdir/app/nvmf_tgt/nvmf_tgt -c $testdir/../nvmf.conf &
nvmfpid=$!
trap "process_core; killprocess $nvmfpid; exit 1" SIGINT SIGTERM EXIT
sleep 10
modprobe -v nvme-rdma
if [ -e "/dev/nvme-fabrics" ]; then
chmod a+rw /dev/nvme-fabrics
fi
echo 'traddr='$NVMF_FIRST_TARGET_IP',transport=rdma,nr_io_queues=1,trsvcid='$NVMF_PORT',nqn=iqn.2013-06.com.intel.ch.spdk:cnode1' > /dev/nvme-fabrics
# file system test
filesystem_test
rm -f ./local-job0-0-verify.state
trap - SIGINT SIGTERM EXIT
nvmfcleanup
killprocess $nvmfpid
timing_exit fs_test

142
test/nvmf/fio/fio.sh Executable file
View File

@ -0,0 +1,142 @@
#!/usr/bin/env bash
testdir=$(readlink -f $(dirname $0))
rootdir=$testdir/../../..
source $rootdir/scripts/autotest_common.sh
NVMF_PORT=7174
NVMF_IP_PREFIX="192.168.100."
NVMF_IP_LEAST_ADDR=8
NVMF_FIRST_TARGET_IP=$NVMF_IP_PREFIX$NVMF_IP_LEAST_ADDR
nvmf_nic_bdfs=""
function load_ib_rdma_modules()
{
if [ `uname` != Linux ]; then
exit 0
fi
modprobe ib_addr ib_mad ib_sa || true # part of core since 4.7
modprobe ib_cm
modprobe ib_core
modprobe ib_ucm
modprobe ib_umad
modprobe ib_uverbs
modprobe iw_cm
modprobe rdma_cm
modprobe rdma_ucm
}
function detect_mellanox_nics()
{
nvmf_nic_bdfs=`lspci | grep Ethernet | grep Mellanox | awk -F ' ' '{print "0000:"$1}'`
mlx_core_driver="mlx4_core"
mlx_ib_driver="mlx4_ib"
mlx_en_driver="mlx4_en"
if [ -z "$nvmf_nic_bdfs" ]; then
exit 0
fi
# for nvmf target loopback test, suppose we only have one type of card.
for nvmf_nic_bdf in $nvmf_nic_bdfs
do
result=`find /sys -name $nvmf_nic_bdf | grep driver | awk -F / '{ print $6 }'`
if [ "$result" == "mlx5_core" ]; then
mlx_core_driver="mlx5_core"
mlx_ib_driver="mlx5_ib"
mlx_en_driver=""
fi
break;
done
# Uninstall/install driver to make a clean test environment
if lsmod | grep -q $mlx_ib_driver; then
rmmod $mlx_ib_driver
fi
if [ -n "$mlx_en_driver" ]; then
if lsmod | grep -q $mlx_en_driver; then
rmmod $mlx_en_driver
fi
fi
if lsmod | grep -q $mlx_core_driver; then
rmmod $mlx_core_driver
fi
modprobe $mlx_core_driver
modprobe $mlx_ib_driver
if [ -n "$mlx_en_driver" ]; then
modprobe $mlx_en_driver
fi
trap - SIGINT SIGTERM EXIT
}
function detect_rdma_nics()
{
detect_mellanox_nics
}
function allocate_nic_ips()
{
LEAST_ADDR=$NVMF_IP_LEAST_ADDR
for bdf in $1; do
dir=`find /sys -name $bdf | grep "/sys/devices"`
if [ -e $dir ]; then
if [ -e $dir"/net" ]; then
nic_name=`ls $dir"/net"`
echo $nic_name
ifconfig $nic_name $NVMF_IP_PREFIX$LEAST_ADDR netmask 255.255.255.0 up
LEAST_ADDR=$[$LEAST_ADDR + 1]
fi
fi
done
# check whether the IP is configured
result=`ifconfig | grep $NVMF_IP_PREFIX`
if [ -z "$result" ]; then
echo "no NIC for nvmf test"
exit 0
fi
}
function nvmfcleanup() {
rmmod nvme-rdma
}
load_ib_rdma_modules
detect_rdma_nics
allocate_nic_ips $nvmf_nic_bdfs
timing_enter fio
# Start up the NVMf target in another process
$rootdir/app/nvmf_tgt/nvmf_tgt -c $testdir/../nvmf.conf -t nvmf -t rdma &
nvmfpid=$!
trap "process_core; killprocess $nvmfpid; exit 1" SIGINT SIGTERM EXIT
sleep 10
modprobe -v nvme-rdma
if [ -e "/dev/nvme-fabrics" ]; then
chmod a+rw /dev/nvme-fabrics
fi
echo 'traddr='$NVMF_FIRST_TARGET_IP',transport=rdma,nr_io_queues=1,trsvcid='$NVMF_PORT',nqn=iqn.2013-06.com.intel.ch.spdk:cnode1' > /dev/nvme-fabrics
$testdir/nvmf_fio.py 4096 1 rw 1 verify
$testdir/nvmf_fio.py 4096 1 randrw 1 verify
$testdir/nvmf_fio.py 4096 128 rw 1 verify
$testdir/nvmf_fio.py 4096 128 randrw 1 verify
rm -f ./local-job0-0-verify.state
trap - SIGINT SIGTERM EXIT
nvmfcleanup
killprocess $nvmfpid
timing_exit fio

122
test/nvmf/fio/nvmf_fio.py Executable file
View File

@ -0,0 +1,122 @@
#!/usr/bin/env python
from subprocess import check_call, call, check_output, Popen, PIPE
import re
import sys
import signal
fio_template = """
[global]
thread=1
invalidate=1
rw=%(testtype)s
time_based=1
runtime=%(runtime)s
ioengine=libaio
direct=1
bs=%(blocksize)d
iodepth=%(iodepth)d
%(verify)s
verify_dump=1
verify_async=10
"""
verify_template = """
do_verify=1
verify=meta
verify_pattern="meta"
"""
fio_job_template = """
[job%(jobnumber)d]
filename=%(device)s
"""
def interrupt_handler(signum, frame):
fio.terminate()
print "FIO terminated"
sys.exit(0)
def main():
global fio
if (len(sys.argv) < 5):
print "usage:"
print " " + sys.argv[0] + " <io_size> <queue_depth> <test_type> <runtime>"
print "advanced usage:"
print "If you want to run fio with verify, please add verify string after runtime."
print "Currently fio.py only support write rw randwrite randrw with verify enabled."
sys.exit(1)
io_size = int(sys.argv[1])
queue_depth = int(sys.argv[2])
test_type = sys.argv[3]
runtime = sys.argv[4]
if len(sys.argv) > 5:
verify = True
else:
verify = False
devices = get_target_devices()
print "Found devices: ", devices
# configure_devices(devices)
fio_executable = '/usr/bin/fio'
device_paths = ['/dev/' + dev for dev in devices]
print device_paths
sys.stdout.flush()
signal.signal(signal.SIGTERM, interrupt_handler)
signal.signal(signal.SIGINT, interrupt_handler)
fio = Popen([fio_executable, '-'], stdin=PIPE)
fio.communicate(create_fio_config(io_size, queue_depth, device_paths, test_type, runtime, verify))
fio.stdin.close()
rc = fio.wait()
print "FIO completed with code %d\n" % rc
sys.stdout.flush()
sys.exit(rc)
def get_target_devices():
output = check_output('lsblk -l -o NAME', shell=True)
return re.findall("(nvme[0-9]+n[0-9]+)\n", output)
def create_fio_config(size, q_depth, devices, test, run_time, verify):
if not verify:
verifyfio = ""
else:
verifyfio = verify_template
fiofile = fio_template % {"blocksize": size, "iodepth": q_depth,
"testtype": test, "runtime": run_time, "verify": verifyfio}
for (i, dev) in enumerate(devices):
fiofile += fio_job_template % {"jobnumber": i, "device": dev}
return fiofile
def set_device_parameter(devices, filename_template, value):
for dev in devices:
filename = filename_template % dev
f = open(filename, 'r+b')
f.write(value)
f.close()
def configure_devices(devices):
set_device_parameter(devices, "/sys/block/%s/queue/nomerges", "2")
set_device_parameter(devices, "/sys/block/%s/queue/nr_requests", "128")
requested_qd = 128
qd = requested_qd
while qd > 0:
try:
set_device_parameter(devices, "/sys/block/%s/device/queue_depth", str(qd))
break
except IOError:
qd = qd - 1
if qd == 0:
print "Could not set block device queue depths."
else:
print "Requested queue_depth {} but only {} is supported.".format(str(requested_qd), str(qd))
set_device_parameter(devices, "/sys/block/%s/queue/scheduler", "noop")
if __name__ == "__main__":
main()

30
test/nvmf/nvmf.conf Normal file
View File

@ -0,0 +1,30 @@
[Global]
Comment "Global section"
LogFacility "local7"
[Rpc]
#RpcConfiguration Yes
[Nvmf]
NodeBase "iqn.2013-06.com.intel.ch.spdk"
AuthFile /usr/local/etc/nvmf/auth.conf
MaxConnectionsPerSession 4
[Port1]
Comment "Test1"
FabricIntf DA1 192.168.100.8:7174
[InitiatorGroup1]
Comment "Initiator Group1"
InitiatorName ALL
Netmask 192.168.100.0/24
[Nvme]
ClaimAllDevices Yes
UnbindFromKernel Yes
[SubsystemGroup1]
SubsystemName cnode1
Mapping Port1 InitiatorGroup1
QueueDepth 128
Controller0 Nvme0