From 429258ed26bcda3252b3b220d522927fe091007e Mon Sep 17 00:00:00 2001 From: Karol Latecki Date: Wed, 17 Jul 2019 15:56:01 +0200 Subject: [PATCH] test/nvme: add kernel io_uring mode for perf scripts Enable running tests with io_uring ioengine. Change-Id: I679343c1774feb3f78f9b6a7ec84735ecfa395a7 Signed-off-by: Karol Latecki Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/462272 Tested-by: SPDK CI Jenkins Reviewed-by: Jim Harris Reviewed-by: Shuhei Matsumoto Reviewed-by: Ben Walker Reviewed-by: John Kariuki Reviewed-by: Maciej Wawryk --- test/nvme/perf/common.sh | 5 ++-- test/nvme/perf/run_perf.sh | 49 ++++++++++++++++++++++++++++++++++++-- 2 files changed, 50 insertions(+), 4 deletions(-) diff --git a/test/nvme/perf/common.sh b/test/nvme/perf/common.sh index a85a2dc6c..d832334cc 100755 --- a/test/nvme/perf/common.sh +++ b/test/nvme/perf/common.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -set -xe +set -e BASE_DIR=$(readlink -f $(dirname $0)) ROOT_DIR=$(readlink -f $BASE_DIR/../../..) PLUGIN_DIR_NVME=$ROOT_DIR/examples/nvme/fio_plugin @@ -312,7 +312,8 @@ function usage() echo " --ramp-time=TIME[s] Fio will run the specified workload for this amount of time before logging any performance numbers. [default=$RAMP_TIME]" echo " --fio-bin=PATH Path to fio binary. [default=$FIO_BIN]" echo " --driver=STR Use 'bdev' or 'nvme' for spdk driver with fio_plugin," - echo " 'kernel-libaio', 'kernel-classic-polling' or 'kernel-hybrid-polling' for kernel driver. [default=$PLUGIN]" + echo " 'kernel-libaio', 'kernel-classic-polling', 'kernel-hybrid-polling' or" + echo " 'kernel-io-uring' for kernel driver. [default=$PLUGIN]" echo " --max-disk=INT,ALL Number of disks to test on, this will run multiple workloads with increasing number of disk each run, if =ALL then test on all found disk. [default=$DISKNO]" echo " --disk-no=INT,ALL Number of disks to test on, this will run one workload on selected number od disks, it discards max-disk setting, if =ALL then test on all found disk" echo " --rw=STR Type of I/O pattern. Accepted values are randrw,rw. [default=$RW]" diff --git a/test/nvme/perf/run_perf.sh b/test/nvme/perf/run_perf.sh index e97d455e2..8d8e49d88 100755 --- a/test/nvme/perf/run_perf.sh +++ b/test/nvme/perf/run_perf.sh @@ -15,9 +15,10 @@ # 2 devices on numa0 per core, cores 28-29 will be aligned with 2 devices on numa1 per core and cores 30-33 with 1 device on numa1 per core. # "--iodepth" - Number of I/Os to keep in flight per devices for SPDK fio_plugin and per job for kernel driver. # "--driver" - "This parameter is used to set the ioengine and other fio parameters that determine how fio jobs issue I/O. SPDK supports two modes (nvme and bdev): to use the SPDK BDEV fio plugin set the value to bdev, set the value to nvme to use the SPDK NVME PMD. -# "There are 3 modes available for Linux Kernel driver: set the value to kernel-libaio to use the Linux asynchronous I/O engine, +# "There are 4 modes available for Linux Kernel driver: set the value to kernel-libaio to use the Linux asynchronous I/O engine, # set the value to kernel-classic-polling to use the pvsynch2 ioengine in classic polling mode (100% load on the polling CPU core), -# set the value to kernel-hybrid-polling to use the pvsynch2 ioengine in hybrid polling mode where the polling thread sleeps for half the mean device execution time. +# set the value to kernel-hybrid-polling to use the pvsynch2 ioengine in hybrid polling mode where the polling thread sleeps for half the mean device execution time, +# set the value to kernel-io-uring to use io_uring engine. # "--no-preconditioning" - skip preconditioning - Normally the script will precondition disks to put them in a steady state. # However, preconditioning could be skipped, for example preconditiong has been already made and workload was 100% reads. # "--disk-no" - use specified number of disks for test. @@ -63,6 +64,35 @@ elif [ $PLUGIN = "kernel-hybrid-polling" ]; then elif [ $PLUGIN = "kernel-libaio" ]; then $ROOT_DIR/scripts/setup.sh reset fio_ioengine_opt="--ioengine=libaio" +elif [ $PLUGIN = "kernel-io-uring" ]; then + $ROOT_DIR/scripts/setup.sh reset + fio_ioengine_opt="--ioengine=io_uring" + + modprobe -rv nvme + modprobe nvme poll_queues=8 + + backup_dir="/tmp/nvme_param_bak" + mkdir -p $backup_dir + + for disk in $disk_names; do + echo "INFO: Backing up device parameters for $disk" + sysfs=/sys/block/$disk/queue + mkdir -p $backup_dir/$disk + cat $sysfs/iostats > $backup_dir/$disk/io_stats + cat $sysfs/rq_affinity > $backup_dir/$disk/rq_affinity + cat $sysfs/nomerges > $backup_dir/$disk/nomerges + cat $sysfs/io_poll_delay > $backup_dir/$disk/io_poll_delay + done + + + for disk in $disk_names; do + echo "INFO: Setting device parameters for $disk" + sysfs=/sys/block/$disk/queue + echo 0 > $sysfs/iostats + echo 0 > $sysfs/rq_affinity + echo 2 > $sysfs/nomerges + echo 0 > $sysfs/io_poll_delay + done fi result_dir=perf_results_${BLK_SIZE}BS_${IODEPTH}QD_${RW}_${MIX}MIX_${PLUGIN}_${date} @@ -161,4 +191,19 @@ do break fi done + +if [ $PLUGIN = "kernel-io-uring" ]; then + # Reload the nvme driver so that other test runs are not affected + modprobe -rv nvme + modprobe nvme + + for disk in $disk_names; do + echo "INFO: Restoring device parameters for $disk" + sysfs=/sys/block/$disk/queue + cat $backup_dir/$disk/iostats > $sysfs/iostats + cat $backup_dir/$disk/rq_affinity > $sysfs/rq_affinity + cat $backup_dir/$disk/nomerges > $sysfs/nomerges + cat $backup_dir/$disk/io_poll_delay > $sysfs/io_poll_delay + done +fi rm -f $BASE_DIR/bdev.conf $BASE_DIR/config.fio