From 02f1526086ec7e9af2f25be5535266206e975ac3 Mon Sep 17 00:00:00 2001 From: Karol Latecki Date: Tue, 27 Aug 2019 09:59:24 +0200 Subject: [PATCH] test/nvmf: lower number of io queues in connect_disconnect nightly test By default nvme-cli will try to connect to subsystem using number of io queues equal to number of available cores. In case of more powerful CPUs and HT enabled this results in more time needed to allocate resources. Using "-i" option for nvme connect allows us to control the number of IO queues and avoid timeout issues when testing. Fixes issue #927 Change-Id: Ibb9b89b2d8be84fba29360db0f781cc99ae5c5b1 Signed-off-by: Karol Latecki Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/466389 Reviewed-by: Tomasz Zawadzki Reviewed-by: Ben Walker Reviewed-by: Broadcom SPDK FC-NVMe CI Tested-by: SPDK CI Jenkins --- test/nvmf/target/connect_disconnect.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/nvmf/target/connect_disconnect.sh b/test/nvmf/target/connect_disconnect.sh index bc3fd9c76..14474e8a3 100755 --- a/test/nvmf/target/connect_disconnect.sh +++ b/test/nvmf/target/connect_disconnect.sh @@ -26,13 +26,14 @@ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPOR if [ $RUN_NIGHTLY -eq 1 ]; then num_iterations=200 + IO_QUEUES="-i 8" else num_iterations=10 fi set +x for i in $(seq 1 $num_iterations); do - nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT" + nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT" "$IO_QUEUES" waitforblk "nvme0n1" nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" waitforblk_disconnect "nvme0n1"