test/sma: limit the number of created devices in vhost_blk

We don't need to create as many devices to fully populate two buses, we
can create 32 devices on the first bus and 1 device on the second one
and basically test the same thing, while allocating less resources at the
same time.

Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Change-Id: Ibaca094f71f3702f8d58f5feb54b676df749ff49
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/15645
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
This commit is contained in:
Konrad Sztyber 2022-11-24 14:23:59 +01:00 committed by Tomasz Zawadzki
parent 55b047a776
commit 8b0bb7f08e

View File

@ -138,23 +138,23 @@ delete_device "$devid1"
# At the end check if vhost devices are gone # At the end check if vhost devices are gone
[[ $(vm_exec $vm_no "lsblk | grep -E \"^vd.\" | wc -l") -eq 0 ]] [[ $(vm_exec $vm_no "lsblk | grep -E \"^vd.\" | wc -l") -eq 0 ]]
# Create 62 bdevs, two already exist # Create 31 bdevs, two already exist
for ((i = 2; i < 64; i++)); do for ((i = 2; i < 33; i++)); do
rpc_cmd bdev_null_create null$i 100 4096 rpc_cmd bdev_null_create null$i 100 4096
done done
devids=() devids=()
# Not try to add 64 devices, max for two buses # Now try to add 33 devices, max for one bus + one device on the next bus
for ((i = 0; i < 64; i++)); do for ((i = 0; i < 33; i++)); do
uuid=$(rpc_cmd bdev_get_bdevs -b null$i | jq -r '.[].uuid') uuid=$(rpc_cmd bdev_get_bdevs -b null$i | jq -r '.[].uuid')
devids[$i]=$(create_device $i $uuid | jq -r '.handle') devids[$i]=$(create_device $i $uuid | jq -r '.handle')
done done
[[ $(vm_exec $vm_no "lsblk | grep -E \"^vd.\" | wc -l") -eq 64 ]] [[ $(vm_exec $vm_no "lsblk | grep -E \"^vd.\" | wc -l") -eq 33 ]]
# Cleanup at the end # Cleanup at the end
for ((i = 0; i < 64; i++)); do for ((i = 0; i < 33; i++)); do
delete_device ${devids[$i]} delete_device ${devids[$i]}
done done
@ -241,7 +241,7 @@ diff <(get_qos_caps $device_vhost | jq --sort-keys) <(
EOF EOF
# Make sure that limits were changed # Make sure that limits were changed
diff <(rpc_cmd bdev_get_bdevs -b null63 | jq --sort-keys '.[].assigned_rate_limits') <( diff <(rpc_cmd bdev_get_bdevs -b null32 | jq --sort-keys '.[].assigned_rate_limits') <(
jq --sort-keys <<- EOF jq --sort-keys <<- EOF
{ {
"rw_ios_per_sec": 3000, "rw_ios_per_sec": 3000,
@ -272,7 +272,7 @@ diff <(rpc_cmd bdev_get_bdevs -b null63 | jq --sort-keys '.[].assigned_rate_limi
EOF EOF
# Make sure that limits were changed even if volume id is not set # Make sure that limits were changed even if volume id is not set
diff <(rpc_cmd bdev_get_bdevs -b null63 | jq --sort-keys '.[].assigned_rate_limits') <( diff <(rpc_cmd bdev_get_bdevs -b null32 | jq --sort-keys '.[].assigned_rate_limits') <(
jq --sort-keys <<- EOF jq --sort-keys <<- EOF
{ {
"rw_ios_per_sec": 4000, "rw_ios_per_sec": 4000,
@ -322,7 +322,7 @@ NOT "$rootdir/scripts/sma-client.py" <<- EOF
EOF EOF
# Values remain unchanged # Values remain unchanged
diff <(rpc_cmd bdev_get_bdevs -b null63 | jq --sort-keys '.[].assigned_rate_limits') <( diff <(rpc_cmd bdev_get_bdevs -b null32 | jq --sort-keys '.[].assigned_rate_limits') <(
jq --sort-keys <<- EOF jq --sort-keys <<- EOF
{ {
"rw_ios_per_sec": 4000, "rw_ios_per_sec": 4000,