diff --git a/test/nvmf/target/perf_adq.sh b/test/nvmf/target/perf_adq.sh index 580830f2c..635383738 100755 --- a/test/nvmf/target/perf_adq.sh +++ b/test/nvmf/target/perf_adq.sh @@ -53,44 +53,28 @@ function adq_reload_driver() { sleep 5 } -function get_nvmf_poll_groups() { - "$rpc_py" thread_get_pollers | jq -r '.threads[] | .active_pollers[] | - select(.name == "nvmf_poll_group_poll").busy_count' -} - -function num_busy_count() { - get_pollers_busy_count0=($(get_nvmf_poll_groups)) - sleep 2 - get_pollers_busy_count1=($(get_nvmf_poll_groups)) - local num=0 - for i in "${!get_pollers_busy_count0[@]}"; do - increment=$((get_pollers_busy_count1[i] - get_pollers_busy_count0[i])) - if ((increment > 0)); then - ((++num)) - fi - done - echo $num -} - # Clear the previous configuration that may have an impact. # At present, ADQ configuration is only applicable to the ice driver. adq_reload_driver -# Testcase 1 and Testcase 2 show the SPDK interacting with ADQ. -# The number of continuously increasing nvmf_poll_group_poll's busy_count, we define it as "num_busy_count". -# When ADQ enabled, num_busy_count will be equal to the number of tc1 queues of traffic classes. -# When ADQ disabled, num_busy_count will be equal to the smaller value of initiator connections and target cores. -# Testcase 1: Testing 2 traffic classes and 2 tc1 queues without ADQ +# We are going to run the test twice, once with ADQ enabled and once with it disabled. +# The nvmf target is given 4 cores and ADQ creates queues in one traffic class. We then run +# perf with 4 cores (i.e. 4 connections) and examine how the connections are allocated to the nvmf target's +# poll groups. + +# When ADQ is disabled, we expect 1 connection on each of the 4 poll groups. nvmftestinit adq_start_nvmf_target 0 0xF sleep 2 -$perf -q 64 -o 4096 -w randread -t 10 -c 0x70 \ +$perf -q 64 -o 4096 -w randread -t 10 -c 0xF0 \ -r "trtype:${TEST_TRANSPORT} adrfam:IPv4 traddr:${NVMF_FIRST_TARGET_IP} trsvcid:${NVMF_PORT} \ -subnqn:nqn.2016-06.io.spdk:cnode1" & + subnqn:nqn.2016-06.io.spdk:cnode1" & perfpid=$! sleep 2 -if [[ $(num_busy_count) -ne 3 ]]; then - echo "ERROR: num_busy_count != cores of initiators! Testcase 1 failed." + +count=$("$rpc_py" nvmf_get_stats | jq -r '.poll_groups[] | select(.current_io_qpairs == 1) | length' | wc -l) +if [[ "$count" -ne 4 ]]; then + echo "ERROR: With ADQ disabled, connections were not evenly distributed amongst poll groups!" exit 1 fi wait $perfpid @@ -98,24 +82,24 @@ nvmftestfini adq_reload_driver -# Testcase 2: Testing 2 traffic classes and 2 tc1 queues with ADQ +# When ADQ is enabled, we expect the connections to reside on AT MOST two poll groups. nvmftestinit sleep 2 adq_configure_driver adq_start_nvmf_target 1 0xF sleep 2 -# The number of I/O connections from initiator is the core count * qpairs per ns, so here its 12. -# ADQ on target side will work if 12 connections are matched to two out of four cores on the target. -$perf -q 64 -o 4096 -w randread -t 15 -P 4 -c 0x70 \ +$perf -q 64 -o 4096 -w randread -t 10 -c 0xF0 \ -r "trtype:${TEST_TRANSPORT} adrfam:IPv4 traddr:${NVMF_FIRST_TARGET_IP} trsvcid:${NVMF_PORT} \ -subnqn:nqn.2016-06.io.spdk:cnode1" & + subnqn:nqn.2016-06.io.spdk:cnode1" & perfpid=$! -sleep 3 +sleep 2 -if [[ $(num_busy_count) -ne 2 ]]; then - echo "ERROR: num_busy_count != tc1 queues of traffic classes! Testcase 2 failed." +count=$("$rpc_py" nvmf_get_stats | jq -r '.poll_groups[] | select(.current_io_qpairs == 0) | length' | wc -l) +if [[ "$count" -lt 2 ]]; then + echo "ERROR: With ADQ enabled, did not find 0 connections on 2 of the poll groups!" exit 1 fi + wait $perfpid nvmftestfini