test: Shellcheck - correct rule: $/${} is unnecessary on arithmetic variables.

Correct shellcheck rule SC2004: $/${} is unnecessary on arithmetic variables.

Signed-off-by: Maciej Wawryk <maciejx.wawryk@intel.com>
Change-Id: Ibf2879360bc50cc058b0f4434a5777c53c0eeffb
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/473265
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
This commit is contained in:
Maciej Wawryk 2019-11-05 10:28:35 +01:00 committed by Jim Harris
parent 08a71404a2
commit 9a4a87b573
20 changed files with 74 additions and 74 deletions

View File

@ -240,7 +240,7 @@ if hash shellcheck 2>/dev/null; then
# go to: https://trello.com/c/29Z90j1W
# Error descriptions can also be found at: https://github.com/koalaman/shellcheck/wiki
# This SHCK_EXCLUDE list is out "to do" and we work to fix all of this errors.
SHCK_EXCLUDE="SC1083,SC2002,SC2004,\
SHCK_EXCLUDE="SC1083,SC2002,\
SC2010,SC2012,SC2016,SC2034,SC2045,SC2046,SC2068,SC2086,SC2089,SC2090,\
SC2097,SC2098,SC2119,SC2120,SC2121,SC2124,SC2126,SC2128,\
SC2129,SC2140,SC2142,SC2143,SC2154,SC2155,SC2162"

View File

@ -20,7 +20,7 @@ function configure_performance() {
echo -n "Moving all interrupts off of core 0..."
count=$(($(nproc) / 4))
cpumask="e"
for ((i=1; i<$count; i++)); do
for ((i=1; i<count; i++)); do
if [ $((i % 8)) -eq 0 ]; then
cpumask=",$cpumask"
fi

View File

@ -388,7 +388,7 @@ function configure_linux {
MEMLOCK_AMNT=$(ulimit -l)
if [ "$MEMLOCK_AMNT" != "unlimited" ] ; then
MEMLOCK_MB=$(( $MEMLOCK_AMNT / 1024 ))
MEMLOCK_MB=$(( MEMLOCK_AMNT / 1024 ))
echo ""
echo "Current user memlock limit: ${MEMLOCK_MB} MB"
echo ""
@ -709,7 +709,7 @@ fi
if [ $(uname) = Linux ]; then
HUGEPGSZ=$(( $(grep Hugepagesize /proc/meminfo | cut -d : -f 2 | tr -dc '0-9') ))
HUGEPGSZ_MB=$(( $HUGEPGSZ / 1024 ))
HUGEPGSZ_MB=$(( HUGEPGSZ / 1024 ))
: ${NRHUGE=$(( (HUGEMEM + HUGEPGSZ_MB - 1) / HUGEPGSZ_MB ))}
if [ "$mode" == "config" ]; then

View File

@ -25,9 +25,9 @@ function create_nv_cache_bdev() {
local chunk_size=$(get_chunk_size $ocssd_bdf)
# We need at least 2 bands worth of data + 1 block
local size=$((2 * 4096 * $chunk_size * $num_punits + 1))
local size=$((2 * 4096 * chunk_size * num_punits + 1))
# Round the size up to the nearest megabyte
local size=$((($size + $bytes_to_mb) / $bytes_to_mb))
local size=$(((size + bytes_to_mb) / bytes_to_mb))
# Create NVMe bdev on specified device and split it so that it has the desired size
local nvc_bdev=$($rootdir/scripts/rpc.py bdev_nvme_attach_controller -b $name -t PCIe -a $cache_bdf)

View File

@ -33,10 +33,10 @@ restore_kill() {
trap "restore_kill; exit 1" SIGINT SIGTERM EXIT
chunk_size=$(get_chunk_size $device)
pu_count=$(($pu_end - $pu_start + 1))
pu_count=$((pu_end - pu_start + 1))
# Write one band worth of data + one extra chunk
data_size=$(($chunk_size * ($pu_count + 1)))
data_size=$((chunk_size * (pu_count + 1)))
$rootdir/app/spdk_tgt/spdk_tgt & svcpid=$!
waitforlisten $svcpid

View File

@ -42,7 +42,7 @@ $rootdir/app/spdk_tgt/spdk_tgt & svcpid=$!
waitforlisten $svcpid
if [ -n "$nv_cache" ]; then
nvc_bdev=$(create_nv_cache_bdev nvc0 $device $nv_cache $(($pu_end - $pu_start + 1)))
nvc_bdev=$(create_nv_cache_bdev nvc0 $device $nv_cache $((pu_end - pu_start + 1)))
fi
ftl_construct_args="bdev_ftl_create -b nvme0 -a $device -l ${pu_start}-${pu_end}"

View File

@ -150,7 +150,7 @@ function start_vpp() {
counter=40
while [ $counter -gt 0 ] ; do
vppctl show version &> /dev/null && break
counter=$(( $counter - 1 ))
counter=$(( counter - 1 ))
sleep 0.5
done
xtrace_restore
@ -178,8 +178,8 @@ function start_vpp() {
sleep 3
# SC1010: ping -M do - in this case do is an option not bash special word
# shellcheck disable=SC1010
ping -c 1 $TARGET_IP -s $(( $MTU - 28 )) -M do
vppctl ping $INITIATOR_IP repeat 1 size $(( $MTU - (28 + 8) )) verbose
ping -c 1 $TARGET_IP -s $(( MTU - 28 )) -M do
vppctl ping $INITIATOR_IP repeat 1 size $(( MTU - (28 + 8) )) verbose
}
function kill_vpp() {

View File

@ -71,7 +71,7 @@ sleep 1
timing_enter discovery
iscsiadm -m discovery -t sendtargets -p $TARGET_IP:$ISCSI_PORT
iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
waitforiscsidevices $(( $NUM_LVS * $NUM_LVOL ))
waitforiscsidevices $(( NUM_LVS * NUM_LVOL ))
timing_exit discovery
timing_enter fio

View File

@ -56,7 +56,7 @@ ls_guid=$($rpc_py bdev_lvol_create_lvstore "Nvme0n1" "lvs0" -c 1048576)
# Assign even size for each lvol_bdev.
get_lvs_free_mb $ls_guid
lvol_bdev_size=$(($free_mb / $CONNECTION_NUMBER))
lvol_bdev_size=$((free_mb / CONNECTION_NUMBER))
for i in $(seq 1 $CONNECTION_NUMBER); do
$rpc_py bdev_lvol_create -u $ls_guid lbd_$i $lvol_bdev_size
done

View File

@ -100,7 +100,7 @@ fi
$rpc_py iscsi_create_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT
$rpc_py iscsi_create_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
for (( i=0; i < $DISKNO; i++ ))
for (( i=0; i < DISKNO; i++ ))
do
$rpc_py iscsi_create_target_node Target${i} Target${i}_alias "${bdevs[i]}:0" "$PORTAL_TAG:$INITIATOR_TAG" 64 -d
done

View File

@ -23,8 +23,8 @@ function run_fio() {
local end_io_count=$(jq -r '.bdevs[0].num_read_ops' <<< "$iostats")
local end_bytes_read=$(jq -r '.bdevs[0].bytes_read' <<< "$iostats")
IOPS_RESULT=$(((end_io_count-start_io_count)/$run_time))
BANDWIDTH_RESULT=$(((end_bytes_read-start_bytes_read)/$run_time))
IOPS_RESULT=$(((end_io_count-start_io_count)/run_time))
BANDWIDTH_RESULT=$(((end_bytes_read-start_bytes_read)/run_time))
}
function verify_qos_limits() {
@ -84,19 +84,19 @@ trap 'iscsicleanup; killprocess $pid; iscsitestfini $1 $2; exit 1' SIGINT SIGTER
run_fio Malloc0
# Set IOPS/bandwidth limit to 50% of the actual unrestrained performance.
IOPS_LIMIT=$(($IOPS_RESULT/2))
BANDWIDTH_LIMIT=$(($BANDWIDTH_RESULT/2))
IOPS_LIMIT=$((IOPS_RESULT/2))
BANDWIDTH_LIMIT=$((BANDWIDTH_RESULT/2))
# Set READ bandwidth limit to 50% of the RW bandwidth limit to be able
# to differentiate those two.
READ_BANDWIDTH_LIMIT=$(($BANDWIDTH_LIMIT/2))
READ_BANDWIDTH_LIMIT=$((BANDWIDTH_LIMIT/2))
# Also round them down to nearest multiple of either 1000 IOPS or 1MB BW
# which are the minimal QoS granularities
IOPS_LIMIT=$(($IOPS_LIMIT/1000*1000))
BANDWIDTH_LIMIT_MB=$(($BANDWIDTH_LIMIT/1024/1024))
BANDWIDTH_LIMIT=$(($BANDWIDTH_LIMIT_MB*1024*1024))
READ_BANDWIDTH_LIMIT_MB=$(($READ_BANDWIDTH_LIMIT/1024/1024))
READ_BANDWIDTH_LIMIT=$(($READ_BANDWIDTH_LIMIT_MB*1024*1024))
IOPS_LIMIT=$((IOPS_LIMIT/1000*1000))
BANDWIDTH_LIMIT_MB=$((BANDWIDTH_LIMIT/1024/1024))
BANDWIDTH_LIMIT=$((BANDWIDTH_LIMIT_MB*1024*1024))
READ_BANDWIDTH_LIMIT_MB=$((READ_BANDWIDTH_LIMIT/1024/1024))
READ_BANDWIDTH_LIMIT=$((READ_BANDWIDTH_LIMIT_MB*1024*1024))
# Limit the I/O rate by RPC, then confirm the observed rate matches.
$rpc_py bdev_set_qos_limit Malloc0 --rw_ios_per_sec $IOPS_LIMIT

View File

@ -73,7 +73,7 @@ sleep 1
iscsiadm -m discovery -t sendtargets -p $TARGET_IP:$ISCSI_PORT
iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
waitforiscsidevices $(( $CONNECTION_NUMBER + 1 ))
waitforiscsidevices $(( CONNECTION_NUMBER + 1 ))
trap 'iscsicleanup; killprocess $iscsi_pid; killprocess $record_pid; delete_tmp_files; iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT

View File

@ -415,7 +415,7 @@ function json_config_clear() {
count=100
while [ $count -gt 0 ] ; do
$rootdir/scripts/rpc.py -s "${app_socket[$1]}" save_config | $config_filter -method delete_global_parameters | $config_filter -method check_empty && break
count=$(( $count -1 ))
count=$(( count -1 ))
sleep 0.1
done

View File

@ -50,7 +50,7 @@ trap 'killprocess $example_pid; exit 1' SIGINT SIGTERM EXIT
i=0
while ! grep "Starting I/O" log.txt; do
[ $i -lt 20 ] || break
i=$(($i+1))
i=$((i+1))
sleep 1
done

View File

@ -107,7 +107,7 @@ function get_disks_on_numa(){
for (( i=0; i<${#devs[@]}; i++ ))
do
if [ ${numas[$i]} = $numa_no ]; then
disks_on_numa=$(($disks_on_numa+1))
disks_on_numa=$((disks_on_numa+1))
fi
done
echo $disks_on_numa
@ -124,18 +124,18 @@ function create_fio_config(){
local filename=""
local cores_numa=($(get_cores_numa_node "$5"))
local disks_per_core=$(($disk_no/$no_cores))
local disks_per_core_mod=$(($disk_no%$no_cores))
local disks_per_core=$((disk_no/no_cores))
local disks_per_core_mod=$((disk_no%no_cores))
# For kernel dirver, each disk will be alligned with all cpus on the same NUMA node
if [ "$plugin" != "nvme" ] && [ "$plugin" != "bdev" ]; then
for (( i=0; i<$disk_no; i++ ))
for (( i=0; i<disk_no; i++ ))
do
sed -i -e "\$a[filename${i}]" $BASE_DIR/config.fio
filename="/dev/${disks[$i]}"
sed -i -e "\$afilename=$filename" $BASE_DIR/config.fio
cpu_used=""
for (( j=0; j<$no_cores; j++ ))
for (( j=0; j<no_cores; j++ ))
do
core_numa=${cores_numa[$j]}
if [ "${disks_numa[$i]}" = "$core_numa" ]; then
@ -146,13 +146,13 @@ function create_fio_config(){
echo "" >> $BASE_DIR/config.fio
done
else
for (( i=0; i<$no_cores; i++ ))
for (( i=0; i<no_cores; i++ ))
do
core_numa=${cores_numa[$i]}
total_disks_per_core=$disks_per_core
if [ "$disks_per_core_mod" -gt "0" ]; then
total_disks_per_core=$(($disks_per_core+1))
disks_per_core_mod=$(($disks_per_core_mod-1))
total_disks_per_core=$((disks_per_core+1))
disks_per_core_mod=$((disks_per_core_mod-1))
fi
if [ "$total_disks_per_core" = "0" ]; then
@ -166,7 +166,7 @@ function create_fio_config(){
n=0 #counter of all disks
while [ "$m" -lt "$total_disks_per_core" ]; do
if [ ${disks_numa[$n]} = $core_numa ]; then
m=$(($m+1))
m=$((m+1))
if [ "$plugin" = "nvme" ]; then
filename='trtype=PCIe traddr='${disks[$n]//:/.}' ns=1'
elif [ "$plugin" = "bdev" ]; then
@ -176,7 +176,7 @@ function create_fio_config(){
#Mark numa of n'th disk as "x" to mark it as claimed
disks_numa[$n]="x"
fi
n=$(($n+1))
n=$((n+1))
# If there is no more disks with numa node same as cpu numa node, switch to other numa node.
if [ $n -ge $total_disks ]; then
if [ "$core_numa" = "1" ]; then
@ -225,37 +225,37 @@ function get_results(){
mean_lat_usec)
mean_lat=$(cat $NVME_FIO_RESULTS | jq -r ".jobs[] | (.read.lat_ns.mean * $reads_pct + .write.lat_ns.mean * $writes_pct)")
mean_lat=${mean_lat%.*}
echo $(( $mean_lat/100000 ))
echo $(( mean_lat/100000 ))
;;
p99_lat_usec)
p99_lat=$(cat $NVME_FIO_RESULTS | jq -r ".jobs[] | (.read.clat_ns.percentile.\"99.000000\" * $reads_pct + .write.clat_ns.percentile.\"99.000000\" * $writes_pct)")
p99_lat=${p99_lat%.*}
echo $(( $p99_lat/100000 ))
echo $(( p99_lat/100000 ))
;;
p99_99_lat_usec)
p99_99_lat=$(cat $NVME_FIO_RESULTS | jq -r ".jobs[] | (.read.clat_ns.percentile.\"99.990000\" * $reads_pct + .write.clat_ns.percentile.\"99.990000\" * $writes_pct)")
p99_99_lat=${p99_99_lat%.*}
echo $(( $p99_99_lat/100000 ))
echo $(( p99_99_lat/100000 ))
;;
stdev_usec)
stdev=$(cat $NVME_FIO_RESULTS | jq -r ".jobs[] | (.read.clat_ns.stddev * $reads_pct + .write.clat_ns.stddev * $writes_pct)")
stdev=${stdev%.*}
echo $(( $stdev/100000 ))
echo $(( stdev/100000 ))
;;
mean_slat_usec)
mean_slat=$(cat $NVME_FIO_RESULTS | jq -r ".jobs[] | (.read.slat_ns.mean * $reads_pct + .write.slat_ns.mean * $writes_pct)")
mean_slat=${mean_slat%.*}
echo $(( $mean_slat/100000 ))
echo $(( mean_slat/100000 ))
;;
mean_clat_usec)
mean_clat=$(cat $NVME_FIO_RESULTS | jq -r ".jobs[] | (.read.clat_ns.mean * $reads_pct + .write.clat_ns.mean * $writes_pct)")
mean_clat=${mean_clat%.*}
echo $(( $mean_clat/100000 ))
echo $(( mean_clat/100000 ))
;;
bw_Kibs)
bw=$(cat $NVME_FIO_RESULTS | jq -r ".jobs[] | (.read.bw + .write.bw)")
bw=${bw%.*}
echo $(( $bw ))
echo $(( bw ))
;;
esac
}
@ -270,7 +270,7 @@ function get_bdevperf_results(){
bw_Kibs)
bw_MBs=$(grep Total $NVME_FIO_RESULTS | awk -F 'Total' '{print $2}' | awk '{print $4}')
bw_MBs=${bw_MBs%.*}
echo $(( $bw_MBs * 1024 ))
echo $(( bw_MBs * 1024 ))
;;
esac
}

View File

@ -103,17 +103,17 @@ echo "run-time,ramp-time,fio-plugin,QD,block-size,num-cpu-cores,workload,workloa
printf "%s,%s,%s,%s,%s,%s,%s,%s\n" $RUNTIME $RAMP_TIME $PLUGIN $IODEPTH $BLK_SIZE $no_cores $RW $MIX >> $result_file
echo "num_of_disks,iops,avg_lat[usec],p99[usec],p99.99[usec],stdev[usec],avg_slat[usec],avg_clat[usec],bw[Kib/s]" >> $result_file
#Run each workolad $REPEAT_NO times
for (( j=0; j < $REPEAT_NO; j++ ))
for (( j=0; j < REPEAT_NO; j++ ))
do
#Start with $DISKNO disks and remove 2 disks for each run to avoid preconditioning before each run.
for (( k=$DISKNO; k >= 1; k-=2 ))
for (( k=DISKNO; k >= 1; k-=2 ))
do
cp $BASE_DIR/config.fio.tmp $BASE_DIR/config.fio
echo "" >> $BASE_DIR/config.fio
#The SPDK fio plugin supports submitting/completing I/Os to multiple SSDs from a single thread.
#Therefore, the per thread queue depth is set to the desired IODEPTH/device X the number of devices per thread.
if [ "$PLUGIN" = "nvme" ] || [ "$PLUGIN" = "bdev" ] && [ "$NOIOSCALING" = false ]; then
qd=$(( $IODEPTH * $k ))
qd=$(( IODEPTH * k ))
else
qd=$IODEPTH
fi
@ -161,17 +161,17 @@ do
done
done
#Write results to csv file
for (( k=$DISKNO; k >= 1; k-=2 ))
for (( k=DISKNO; k >= 1; k-=2 ))
do
iops_disks[$k]=$((${iops_disks[$k]} / $REPEAT_NO))
iops_disks[$k]=$((${iops_disks[$k]} / REPEAT_NO))
if [ $PLUGIN != "bdevperf" ]; then
mean_lat_disks_usec[$k]=$((${mean_lat_disks_usec[$k]} / $REPEAT_NO))
p99_lat_disks_usec[$k]=$((${p99_lat_disks_usec[$k]} / $REPEAT_NO))
p99_99_lat_disks_usec[$k]=$((${p99_99_lat_disks_usec[$k]} / $REPEAT_NO))
stdev_disks_usec[$k]=$((${stdev_disks_usec[$k]} / $REPEAT_NO))
mean_slat_disks_usec[$k]=$((${mean_slat_disks_usec[$k]} / $REPEAT_NO))
mean_clat_disks_usec[$k]=$((${mean_clat_disks_usec[$k]} / $REPEAT_NO))
mean_lat_disks_usec[$k]=$((${mean_lat_disks_usec[$k]} / REPEAT_NO))
p99_lat_disks_usec[$k]=$((${p99_lat_disks_usec[$k]} / REPEAT_NO))
p99_99_lat_disks_usec[$k]=$((${p99_99_lat_disks_usec[$k]} / REPEAT_NO))
stdev_disks_usec[$k]=$((${stdev_disks_usec[$k]} / REPEAT_NO))
mean_slat_disks_usec[$k]=$((${mean_slat_disks_usec[$k]} / REPEAT_NO))
mean_clat_disks_usec[$k]=$((${mean_clat_disks_usec[$k]} / REPEAT_NO))
else
mean_lat_disks_usec[$k]=0
p99_lat_disks_usec[$k]=0
@ -181,7 +181,7 @@ do
mean_clat_disks_usec[$k]=0
fi
bw[$k]=$((${bw[$k]} / $REPEAT_NO))
bw[$k]=$((${bw[$k]} / REPEAT_NO))
printf "%s,%s,%s,%s,%s,%s,%s,%s,%s\n" ${k} ${iops_disks[$k]} ${mean_lat_disks_usec[$k]} ${p99_lat_disks_usec[$k]}\
${p99_99_lat_disks_usec[$k]} ${stdev_disks_usec[$k]} ${mean_slat_disks_usec[$k]} ${mean_clat_disks_usec[$k]} ${bw[$k]} >> $result_file

View File

@ -109,13 +109,13 @@ function detect_rdma_nics()
function allocate_nic_ips()
{
(( count=$NVMF_IP_LEAST_ADDR ))
(( count=NVMF_IP_LEAST_ADDR ))
for nic_name in $(get_rdma_if_list); do
ip="$(get_ip_address $nic_name)"
if [ -z $ip ]; then
ip addr add $NVMF_IP_PREFIX.$count/24 dev $nic_name
ip link set $nic_name up
(( count=$count+1 ))
(( count=count+1 ))
fi
# dump configuration for debug log
ip addr show $nic_name

View File

@ -655,7 +655,7 @@ function vm_setup()
for c in $cpu_list; do
# if range is detected - count how many cpus
if [[ $c =~ [0-9]+-[0-9]+ ]]; then
val=$(($c-1))
val=$((c-1))
val=${val#-}
else
val=1

View File

@ -132,7 +132,7 @@ nest_lvol_bdevs=()
used_vms=""
# On each NVMe create one lvol store
for (( i=0; i<$max_disks; i++ ));do
for (( i=0; i<max_disks; i++ ));do
# Create base lvol store on NVMe
notice "Creating lvol store on device Nvme${i}n1"
@ -150,7 +150,7 @@ for (( i=0; i<$max_disks; i++ ));do
nest_ls_guid=$($rpc_py bdev_lvol_create_lvstore $lb_name lvs_n_$i -c 4194304)
nest_lvol_stores+=("$nest_ls_guid")
for (( j=0; j<$vm_count; j++)); do
for (( j=0; j<vm_count; j++)); do
notice "Creating nested lvol bdev for VM $i on lvol store $nest_ls_guid"
free_mb=$(get_lvs_free_mb "$nest_ls_guid")
nest_size=$((free_mb / (vm_count-j) ))
@ -160,7 +160,7 @@ for (( i=0; i<$max_disks; i++ ));do
fi
# Create base lvol bdevs
for (( j=0; j<$vm_count; j++)); do
for (( j=0; j<vm_count; j++)); do
notice "Creating lvol bdev for VM $i on lvol store $ls_guid"
free_mb=$(get_lvs_free_mb "$ls_guid")
size=$((free_mb / (vm_count-j) ))
@ -175,7 +175,7 @@ $rpc_py bdev_lvol_get_lvstores
echo "$bdev_info"
# Set up VMs
for (( i=0; i<$vm_count; i++)); do
for (( i=0; i<vm_count; i++)); do
vm="vm_$i"
# Get all lvol bdevs associated with this VM number
@ -255,7 +255,7 @@ sleep 2
notice "Cleaning up vhost - remove LUNs, controllers, lvol bdevs and lvol stores"
if [[ "$ctrl_type" == "spdk_vhost_scsi" ]]; then
for (( i=0; i<$vm_count; i++)); do
for (( i=0; i<vm_count; i++)); do
notice "Removing devices from vhost SCSI controller naa.0.$i"
for (( j=0; j<${#bdevs[@]}; j++)); do
$rpc_py vhost_scsi_controller_remove_target naa.0.$i $j
@ -265,7 +265,7 @@ if [[ "$ctrl_type" == "spdk_vhost_scsi" ]]; then
$rpc_py vhost_delete_controller naa.0.$i
done
elif [[ "$ctrl_type" == "spdk_vhost_blk" ]]; then
for (( i=0; i<$vm_count; i++)); do
for (( i=0; i<vm_count; i++)); do
for (( j=0; j<${#bdevs[@]}; j++)); do
notice "Removing vhost BLK controller naa.$j.$i"
$rpc_py vhost_delete_controller naa.$j.$i

View File

@ -88,7 +88,7 @@ function cleanup_lvol_cfg()
function cleanup_split_cfg()
{
notice "Removing split vbdevs"
for (( i=0; i<$max_disks; i++ ));do
for (( i=0; i<max_disks; i++ ));do
$rpc_py bdev_split_delete Nvme${i}n1
done
}
@ -208,15 +208,15 @@ if [[ "$ctrl_type" == "kernel_vhost" ]]; then
trap 'vm_kill_all; sleep 1; cleanup_kernel_vhost; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR
# Split disks using parted for kernel vhost
newline=$'\n'
for (( i=0; i<$max_disks; i++ ));do
for (( i=0; i<max_disks; i++ ));do
parted -s /dev/nvme${i}n1 mklabel msdos
parted -s /dev/nvme${i}n1 mkpart extended 2048s 100%
part_size=$((100/${splits[$i]})) # Split 100% of disk into roughly even parts
echo " Creating ${splits[$i]} partitions of relative disk size ${part_size}"
for p in $(seq 0 $((${splits[$i]} - 1))); do
p_start=$(($p*$part_size))
p_end=$(($p_start+$part_size))
p_start=$((p*part_size))
p_end=$((p_start+part_size))
parted -s /dev/nvme${i}n1 mkpart logical ${p_start}% ${p_end}%
done
done
@ -254,7 +254,7 @@ else
notice "Using split vbdevs"
trap 'cleanup_split_cfg; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR
split_bdevs=()
for (( i=0; i<$max_disks; i++ ));do
for (( i=0; i<max_disks; i++ ));do
out=$($rpc_py bdev_split_create Nvme${i}n1 ${splits[$i]})
for s in $(seq 0 $((${splits[$i]}-1))); do
split_bdevs+=("Nvme${i}n1p${s}")
@ -264,7 +264,7 @@ else
else
notice "Using logical volumes"
trap 'cleanup_lvol_cfg; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR
for (( i=0; i<$max_disks; i++ ));do
for (( i=0; i<max_disks; i++ ));do
ls_guid=$($rpc_py bdev_lvol_create_lvstore Nvme${i}n1 lvs_$i --clear-method none)
lvol_stores+=("$ls_guid")
for (( j=0; j<${splits[$i]}; j++)); do
@ -279,7 +279,7 @@ else
fi
# Prepare VMs and controllers
for (( i=0; i<$vm_count; i++)); do
for (( i=0; i<vm_count; i++)); do
vm="vm_$i"
setup_cmd="vm_setup --disk-type=$ctrl_type --force=$i --memory=$vm_memory"