test/adq: a functional perf test for tcp with ADQ enabled

We apply ADQ on target, then run perf, so that we can verify
SPDK implementation interacting with ADQ.
Although the functional test cases here are relatively simplified,
and in the CI, we have to add -debug compilation option,
so the perf performance with ADQ may not be ideal. We will give
another performance test reference cases of ADQ later.

Change-Id: I5341a7fcd61334ef78084302a4ae70f8ec9b9e46
Signed-off-by: wanghailiangx <hailiangx.e.wang@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/7476
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Karol Latecki <karol.latecki@intel.com>
Reviewed-by: Kamil Godzwon <kamilx.godzwon@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Xiaodong Liu <xiaodong.liu@intel.com>
This commit is contained in:
wanghailiangx 2021-04-19 13:27:20 -04:00 committed by Tomasz Zawadzki
parent 31513614a7
commit c930efda28
2 changed files with 181 additions and 0 deletions

View File

@ -58,6 +58,13 @@ run_test "nvmf_nmic" test/nvmf/target/nmic.sh "${TEST_ARGS[@]}"
run_test "nvmf_fio_target" test/nvmf/target/fio.sh "${TEST_ARGS[@]}" run_test "nvmf_fio_target" test/nvmf/target/fio.sh "${TEST_ARGS[@]}"
run_test "nvmf_bdevio" test/nvmf/target/bdevio.sh "${TEST_ARGS[@]}" run_test "nvmf_bdevio" test/nvmf/target/bdevio.sh "${TEST_ARGS[@]}"
if [[ $NET_TYPE == phy ]]; then if [[ $NET_TYPE == phy ]]; then
if [ "$SPDK_TEST_NVMF_TRANSPORT" = "tcp" ]; then
gather_supported_nvmf_pci_devs
TCP_INTERFACE_LIST=("${net_devs[@]}")
if ((${#TCP_INTERFACE_LIST[@]} > 0)); then
run_test "nvmf_perf_adq" test/nvmf/target/perf_adq.sh "${TEST_ARGS[@]}"
fi
fi
run_test "nvmf_shutdown" test/nvmf/target/shutdown.sh "${TEST_ARGS[@]}" run_test "nvmf_shutdown" test/nvmf/target/shutdown.sh "${TEST_ARGS[@]}"
#TODO: disabled due to intermittent failures. Need to triage. #TODO: disabled due to intermittent failures. Need to triage.
# run_test "nvmf_srq_overwhelm" test/nvmf/target/srq_overwhelm.sh $TEST_ARGS # run_test "nvmf_srq_overwhelm" test/nvmf/target/srq_overwhelm.sh $TEST_ARGS
@ -71,6 +78,7 @@ timing_enter host
run_test "nvmf_identify" test/nvmf/host/identify.sh "${TEST_ARGS[@]}" run_test "nvmf_identify" test/nvmf/host/identify.sh "${TEST_ARGS[@]}"
run_test "nvmf_perf" test/nvmf/host/perf.sh "${TEST_ARGS[@]}" run_test "nvmf_perf" test/nvmf/host/perf.sh "${TEST_ARGS[@]}"
run_test "nvmf_failover" test/nvmf/host/failover.sh "${TEST_ARGS[@]}" run_test "nvmf_failover" test/nvmf/host/failover.sh "${TEST_ARGS[@]}"
if [[ $SPDK_TEST_USDT -eq 1 ]]; then if [[ $SPDK_TEST_USDT -eq 1 ]]; then
run_test "nvmf_multipath" test/nvmf/host/multipath.sh "${TEST_ARGS[@]}" run_test "nvmf_multipath" test/nvmf/host/multipath.sh "${TEST_ARGS[@]}"

173
test/nvmf/target/perf_adq.sh Executable file
View File

@ -0,0 +1,173 @@
#!/usr/bin/env bash
testdir=$(readlink -f $(dirname $0))
rootdir=$(readlink -f $testdir/../../..)
source $rootdir/test/common/autotest_common.sh
source $rootdir/test/nvmf/common.sh
gather_supported_nvmf_pci_devs
TCP_INTERFACE_LIST=("${net_devs[@]}")
if ((${#TCP_INTERFACE_LIST[@]} == 0)); then
echo "ERROR: Physical TCP interfaces are not ready"
exit 1
fi
rpc_py="$rootdir/scripts/rpc.py"
perf="$SPDK_EXAMPLE_DIR/perf"
enable_adq=0
function pre_conf_for_adq() {
# Enable adding flows to hardware
"${NVMF_TARGET_NS_CMD[@]}" ethtool --offload $NVMF_TARGET_INTERFACE hw-tc-offload on
# ADQ driver turns on this switch by default, we need to turn it off for SPDK testing
"${NVMF_TARGET_NS_CMD[@]}" ethtool --set-priv-flags $NVMF_TARGET_INTERFACE channel-pkt-inspect-optimize off
# Since sockets are non-blocking, a non-zero value of net.core.busy_read is sufficient
sysctl -w net.core.busy_poll=1
sysctl -w net.core.busy_read=1
}
# Configuring traffic classes
function traffic_classes() {
tc=/usr/sbin/tc
# Create 2 traffic classes and 2 tc1 queues
"${NVMF_TARGET_NS_CMD[@]}" $tc qdisc add dev $NVMF_TARGET_INTERFACE root \
mqprio num_tc 2 map 0 1 queues 2@0 2@2 hw 1 mode channel
"${NVMF_TARGET_NS_CMD[@]}" $tc qdisc add dev $NVMF_TARGET_INTERFACE ingress
# TC filter is configured using target address (traddr) and port number (trsvcid) to steer packets
"${NVMF_TARGET_NS_CMD[@]}" $tc filter add dev $NVMF_TARGET_INTERFACE protocol \
ip parent ffff: prio 1 flower dst_ip $NVMF_FIRST_TARGET_IP/32 ip_proto tcp dst_port $NVMF_PORT skip_sw hw_tc 1
# Setup mechanism for Tx queue selection based on Rx queue(s) map
"${NVMF_TARGET_NS_CMD[@]}" $rootdir/scripts/perf/nvmf/set_xps_rxqs $NVMF_TARGET_INTERFACE
}
function start_nvmf_target() {
nvmfappstart -m $1 --wait-for-rpc
trap 'process_shm --id $NVMF_APP_SHM_ID; clean_ints_files; nvmftestfini; exit 1' SIGINT SIGTERM EXIT
$rpc_py sock_impl_set_options --enable-placement-id $enable_adq --enable-zerocopy-send-server -i posix
$rpc_py framework_start_init
$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS --io-unit-size 8192 --sock-priority $enable_adq
$rpc_py bdev_malloc_create 64 512 -b Malloc1
$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1
$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
}
function reload_driver() {
rmmod ice
modprobe ice
sleep 5
}
function clean_ints_files() {
rm -f temp_ints1.log
rm -f temp_ints2.log
}
function get_nvmf_poll_groups() {
"$rpc_py" thread_get_pollers | jq -r '.threads[] | .active_pollers[] |
select(.name == "nvmf_poll_group_poll").busy_count'
}
function num_busy_count() {
get_pollers_busy_count0=($(get_nvmf_poll_groups))
sleep 2
get_pollers_busy_count1=($(get_nvmf_poll_groups))
local num=0
for i in "${!get_pollers_busy_count0[@]}"; do
increment=$((get_pollers_busy_count1[i] - get_pollers_busy_count0[i]))
if ((increment > 0)); then
((++num))
fi
done
echo $num
}
function compare_ints() {
grep $2 < /proc/interrupts | awk '{
for (i = 1; i <= NF; i++) {
val[i]=$i;
if (i>1)
printf "%d\n", $i;
}
}' > temp_ints1.log
sleep $1
grep $2 < /proc/interrupts | awk '{
for (i = 1; i <= NF; i++) {
val[i]=$i;
if (i>1)
printf "%d\n", $i;
}
}' > temp_ints2.log
if diff temp_ints1.log temp_ints2.log > /dev/null; then
return 0
fi
return 1
}
function check_ints_result() {
# We only test check_ints three times here, as long as there is no interruption once,
# we consider it is pass. Of course, ideally, one time is enough.
for ((i = 1; i <= 3; i++)); do
if compare_ints 2 $NVMF_TARGET_INTERFACE; then
return 0
fi
done
return 1
}
# Clear the previous configuration that may have an impact.
# At present, ADQ configuration is only applicable to the ice driver.
reload_driver
# Testcase 1 and Testcase 2 show the SPDK interacting with ADQ.
# The number of continuously increasing nvmf_poll_group_poll's busy_count, we define it as "num_busy_count".
# When ADQ enabled, num_busy_count will be equal to the number of tc1 queues of traffic classes.
# When ADQ disabled, num_busy_count will be equal to the smaller value of initiator connections and target cores.
# Testcase 1: Testing 2 traffic classes and 2 tc1 queues without ADQ
nvmftestinit
start_nvmf_target 0xF
sleep 2
$perf -q 64 -o 4096 -w randread -t 10 -c 0x70 \
-r "trtype:${TEST_TRANSPORT} adrfam:IPv4 traddr:${NVMF_FIRST_TARGET_IP} trsvcid:${NVMF_PORT} \
subnqn:nqn.2016-06.io.spdk:cnode1" &
perfpid=$!
sleep 2
if [[ $(num_busy_count) -ne 3 ]]; then
echo "ERROR: num_busy_count != cores of initiators! Testcase 1 failed."
exit 1
fi
wait $perfpid
clean_ints_files
nvmftestfini
reload_driver
# Testcase 2: Testing 2 traffic classes and 2 tc1 queues with ADQ
enable_adq=1
nvmftestinit
sleep 2
pre_conf_for_adq
traffic_classes
start_nvmf_target 0xF
sleep 2
# The number of I/O connections from initiator is the core count * qpairs per ns, so here its 6.
# ADQ on target side will work if 6 connections are matched to two out of four cores on the target.
$perf -q 64 -o 4096 -w randread -t 15 -P 2 -c 0x70 \
-r "trtype:${TEST_TRANSPORT} adrfam:IPv4 traddr:${NVMF_FIRST_TARGET_IP} trsvcid:${NVMF_PORT} \
subnqn:nqn.2016-06.io.spdk:cnode1" &
perfpid=$!
sleep 3
if ! check_ints_result; then
echo "ERROR: check_ints failed! There is interruption in perf, this is not what we expected."
exit 1
fi
if [[ $(num_busy_count) -ne 2 ]]; then
echo "ERROR: num_busy_count != tc1 queues of traffic classes! Testcase 2 failed."
exit 1
fi
wait $perfpid
clean_ints_files
nvmftestfini
trap - SIGINT SIGTERM EXIT