app: drop -w parameter (wait for rpc init)

--wait-for-rpc can be used instead.

This parameter will eventually become obsolete,
so let's remove the short version. It will be
exclusively used for "workload" in perf apps.

Change-Id: Ib4b7001d0e756349b05788278c894d622bc89790
Signed-off-by: Dariusz Stojaczyk <dariuszx.stojaczyk@intel.com>
Reviewed-on: https://review.gerrithub.io/421863
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Dariusz Stojaczyk 2018-08-10 12:20:25 +02:00 committed by Jim Harris
parent 9bd8d2b40e
commit aed8986d73
30 changed files with 34 additions and 34 deletions

View File

@ -99,8 +99,6 @@ static struct option g_cmdline_options[SPDK_APP_MAX_CMDLINE_OPTIONS + 1] = {
{"mem-size", required_argument, NULL, MEM_SIZE_OPT_IDX},
#define NO_PCI_OPT_IDX 'u'
{"no-pci", no_argument, NULL, NO_PCI_OPT_IDX},
#define WAIT_FOR_RPC_OPT_IDX 'w'
{"wait-for-rpc", no_argument, NULL, WAIT_FOR_RPC_OPT_IDX},
#define PCI_BLACKLIST_OPT_IDX 'B'
{"pci-blacklist", required_argument, NULL, PCI_BLACKLIST_OPT_IDX},
#define TRACEFLAG_OPT_IDX 'L'
@ -111,6 +109,8 @@ static struct option g_cmdline_options[SPDK_APP_MAX_CMDLINE_OPTIONS + 1] = {
{"pci-whitelist", required_argument, NULL, PCI_WHITELIST_OPT_IDX},
#define SILENCE_NOTICELOG_OPT_IDX 257
{"silence-noticelog", no_argument, NULL, SILENCE_NOTICELOG_OPT_IDX},
#define WAIT_FOR_RPC_OPT_IDX 258
{"wait-for-rpc", no_argument, NULL, WAIT_FOR_RPC_OPT_IDX},
{NULL, no_argument, NULL, 0}
};
@ -888,7 +888,7 @@ spdk_app_parse_args(int argc, char **argv, struct spdk_app_opts *opts,
/* TBD: Replace warning by failure when RPCs for startup are prepared. */
if (opts->config_file && opts->delay_subsystem_init) {
fprintf(stderr,
"WARNING: -w and config file are used at the same time. "
"WARNING: --wait-for-rpc and config file are used at the same time. "
"- Please be careful one options might overwrite others.\n");
}

View File

@ -32,7 +32,7 @@ echo "IP=$TARGET_IP" >> /usr/local/etc/its.conf
timing_enter start_iscsi_tgt
$ISCSI_APP -m 0x1 -w &
$ISCSI_APP -m 0x1 --wait-for-rpc &
pid=$!
echo "Process pid: $pid"

View File

@ -63,7 +63,7 @@ fio_py="python $rootdir/scripts/fio.py"
timing_enter start_iscsi_tgt
$ISCSI_APP -m $ISCSI_TEST_CORE_MASK -w &
$ISCSI_APP -m $ISCSI_TEST_CORE_MASK --wait-for-rpc &
pid=$!
echo "Process pid: $pid"

View File

@ -15,7 +15,7 @@ rpc_py="python $rootdir/scripts/rpc.py"
timing_enter start_iscsi_tgt
$ISCSI_APP -w &
$ISCSI_APP --wait-for-rpc &
pid=$!
echo "Process pid: $pid"

View File

@ -25,7 +25,7 @@ function remove_backends() {
timing_enter start_iscsi_tgt
$ISCSI_APP -m $ISCSI_TEST_CORE_MASK -w &
$ISCSI_APP -m $ISCSI_TEST_CORE_MASK --wait-for-rpc &
pid=$!
echo "Process pid: $pid"

View File

@ -16,7 +16,7 @@ timing_enter start_iscsi_tgt
# Start the iSCSI target without using stub
# Reason: Two SPDK processes will be started
$ISCSI_APP -m 0x2 -p 1 -s 512 -w &
$ISCSI_APP -m 0x2 -p 1 -s 512 --wait-for-rpc &
pid=$!
echo "iSCSI target launched. pid: $pid"
trap "killprocess $pid;exit 1" SIGINT SIGTERM EXIT

View File

@ -43,7 +43,7 @@ for ((i = 0; i < 2; i++)); do
rpc_addr="/var/tmp/spdk${i}.sock"
# TODO: run the different iSCSI instances on non-overlapping CPU masks
$ISCSI_APP -r $rpc_addr -s 1000 -i $i -m $ISCSI_TEST_CORE_MASK -w &
$ISCSI_APP -r $rpc_addr -s 1000 -i $i -m $ISCSI_TEST_CORE_MASK --wait-for-rpc &
pid=$!
echo "Process pid: $pid"

View File

@ -22,7 +22,7 @@ fio_py="python $rootdir/scripts/fio.py"
timing_enter start_iscsi_tgt
$ISCSI_APP -m $ISCSI_TEST_CORE_MASK -w &
$ISCSI_APP -m $ISCSI_TEST_CORE_MASK --wait-for-rpc &
pid=$!
echo "Process pid: $pid"

View File

@ -35,7 +35,7 @@ timing_enter multiconnection
timing_enter start_iscsi_tgt
# Start the iSCSI target without using stub.
$ISCSI_APP -w &
$ISCSI_APP --wait-for-rpc &
iscsipid=$!
echo "iSCSI target launched. pid: $iscsipid"
trap "remove_backends; iscsicleanup; killprocess $iscsipid; exit 1" SIGINT SIGTERM EXIT

View File

@ -31,7 +31,7 @@ function run_nvme_remote() {
# Start the iSCSI target without using stub
iscsi_rpc_addr="/var/tmp/spdk-iscsi.sock"
ISCSI_APP="$rootdir/app/iscsi_tgt/iscsi_tgt"
$ISCSI_APP -r "$iscsi_rpc_addr" -m 0x1 -p 0 -s 512 -w &
$ISCSI_APP -r "$iscsi_rpc_addr" -m 0x1 -p 0 -s 512 --wait-for-rpc &
iscsipid=$!
echo "iSCSI target launched. pid: $iscsipid"
trap "killprocess $iscsipid; killprocess $nvmfpid; exit 1" SIGINT SIGTERM EXIT
@ -64,7 +64,7 @@ timing_enter nvme_remote
# Start the NVMf target
NVMF_APP="$rootdir/app/nvmf_tgt/nvmf_tgt"
$NVMF_APP -m 0x2 -p 1 -s 512 -w &
$NVMF_APP -m 0x2 -p 1 -s 512 --wait-for-rpc &
nvmfpid=$!
echo "NVMf target launched. pid: $nvmfpid"
trap "killprocess $nvmfpid; exit 1" SIGINT SIGTERM EXIT

View File

@ -18,7 +18,7 @@ fio_py="python $rootdir/scripts/fio.py"
timing_enter iscsi_pmem
timing_enter start_iscsi_target
$ISCSI_APP -m $ISCSI_TEST_CORE_MASK -w &
$ISCSI_APP -m $ISCSI_TEST_CORE_MASK --wait-for-rpc &
pid=$!
echo "Process pid: $pid"

View File

@ -22,7 +22,7 @@ fio_py="python $rootdir/scripts/fio.py"
timing_enter start_iscsi_tgt
$ISCSI_APP -m $ISCSI_TEST_CORE_MASK -w &
$ISCSI_APP -m $ISCSI_TEST_CORE_MASK --wait-for-rpc &
pid=$!
trap "killprocess $pid; rbd_cleanup; exit 1" SIGINT SIGTERM EXIT

View File

@ -21,7 +21,7 @@ fi
timing_enter start_iscsi_tgt
$ISCSI_APP -w &
$ISCSI_APP --wait-for-rpc &
pid=$!
echo "Process pid: $pid"

View File

@ -22,7 +22,7 @@ rpc_config_py="python $testdir/rpc_config.py"
timing_enter start_iscsi_tgt
$ISCSI_APP -w &
$ISCSI_APP --wait-for-rpc &
pid=$!
echo "Process pid: $pid"

View File

@ -17,7 +17,7 @@ null_json_config=$JSON_DIR/null_json_config.json
function run_spdk_tgt() {
echo "Running spdk target"
$SPDK_BUILD_DIR/app/spdk_tgt/spdk_tgt -m 0x1 -p 0 -s 1024 -w &
$SPDK_BUILD_DIR/app/spdk_tgt/spdk_tgt -m 0x1 -p 0 -s 1024 --wait-for-rpc &
spdk_tgt_pid=$!
echo "Waiting for app to run..."
@ -36,7 +36,7 @@ function load_nvme() {
}
function run_initiator() {
$SPDK_BUILD_DIR/app/spdk_tgt/spdk_tgt -m 0x2 -p 0 -g -u -s 1024 -r /var/tmp/virtio.sock -w &
$SPDK_BUILD_DIR/app/spdk_tgt/spdk_tgt -m 0x2 -p 0 -g -u -s 1024 -r /var/tmp/virtio.sock --wait-for-rpc &
virtio_pid=$!
waitforlisten $virtio_pid /var/tmp/virtio.sock
}

View File

@ -27,7 +27,7 @@ fi
timing_enter discovery
timing_enter start_nvmf_tgt
# Start up the NVMf target in another process
$NVMF_APP -m 0xF -w &
$NVMF_APP -m 0xF --wait-for-rpc &
nvmfpid=$!
trap "killprocess $nvmfpid; exit 1" SIGINT SIGTERM EXIT

View File

@ -23,7 +23,7 @@ timing_enter fs_test
for incapsule in 0 4096; do
# Start up the NVMf target in another process
$NVMF_APP -m 0xF -w &
$NVMF_APP -m 0xF --wait-for-rpc &
nvmfpid=$!
trap "killprocess $nvmfpid; exit 1" SIGINT SIGTERM EXIT

View File

@ -22,7 +22,7 @@ fi
timing_enter fio
timing_enter start_nvmf_tgt
# Start up the NVMf target in another process
$NVMF_APP -m 0xF -w &
$NVMF_APP -m 0xF --wait-for-rpc &
nvmfpid=$!
trap "killprocess $nvmfpid; exit 1" SIGINT SIGTERM EXIT

View File

@ -19,7 +19,7 @@ fi
timing_enter aer
timing_enter start_nvmf_tgt
$NVMF_APP -m 0xF -w &
$NVMF_APP -m 0xF --wait-for-rpc &
nvmfpid=$!
trap "killprocess $nvmfpid; exit 1" SIGINT SIGTERM EXIT

View File

@ -22,7 +22,7 @@ fi
timing_enter bdevperf
timing_enter start_nvmf_tgt
$NVMF_APP -m 0xF -w &
$NVMF_APP -m 0xF --wait-for-rpc &
nvmfpid=$!
trap "killprocess $nvmfpid; exit 1" SIGINT SIGTERM EXIT

View File

@ -25,7 +25,7 @@ fi
timing_enter fio
timing_enter start_nvmf_tgt
$NVMF_APP -m 0xF -w &
$NVMF_APP -m 0xF --wait-for-rpc &
nvmfpid=$!
trap "killprocess $nvmfpid; exit 1" SIGINT SIGTERM EXIT

View File

@ -21,7 +21,7 @@ fi
timing_enter identify
timing_enter start_nvmf_tgt
$NVMF_APP -m 0xF -w &
$NVMF_APP -m 0xF --wait-for-rpc &
nvmfpid=$!
trap "killprocess $nvmfpid; exit 1" SIGINT SIGTERM EXIT

View File

@ -22,7 +22,7 @@ fi
timing_enter perf
timing_enter start_nvmf_tgt
$NVMF_APP -m 0xF -w -i 0 &
$NVMF_APP -m 0xF --wait-for-rpc -i 0 &
nvmfpid=$!
trap "killprocess $nvmfpid; exit 1" SIGINT SIGTERM EXIT

View File

@ -40,7 +40,7 @@ fi
timing_enter lvol_integrity
timing_enter start_nvmf_tgt
# Start up the NVMf target in another process
$NVMF_APP -m 0xF -w &
$NVMF_APP -m 0xF --wait-for-rpc &
pid=$!
trap "disconnect_nvmf; killprocess $pid; exit 1" SIGINT SIGTERM EXIT

View File

@ -31,7 +31,7 @@ fi
timing_enter multiconnection
timing_enter start_nvmf_tgt
# Start up the NVMf target in another process
$NVMF_APP -m 0xF -w &
$NVMF_APP -m 0xF --wait-for-rpc &
pid=$!
trap "killprocess $pid; exit 1" SIGINT SIGTERM EXIT

View File

@ -28,7 +28,7 @@ fi
timing_enter nvme_cli
timing_enter start_nvmf_tgt
$NVMF_APP -m 0xF -w &
$NVMF_APP -m 0xF --wait-for-rpc &
nvmfpid=$!
trap "killprocess $nvmfpid; exit 1" SIGINT SIGTERM EXIT

View File

@ -19,7 +19,7 @@ fi
timing_enter rpc
timing_enter start_nvmf_tgt
# Start up the NVMf target in another process
$NVMF_APP -m 0xF -w &
$NVMF_APP -m 0xF --wait-for-rpc &
pid=$!
trap "killprocess $pid; exit 1" SIGINT SIGTERM EXIT

View File

@ -22,7 +22,7 @@ fi
timing_enter shutdown
timing_enter start_nvmf_tgt
# Start up the NVMf target in another process
$NVMF_APP -m 0xF -w &
$NVMF_APP -m 0xF --wait-for-rpc &
pid=$!
trap "killprocess $pid; nvmfcleanup; exit 1" SIGINT SIGTERM EXIT

View File

@ -93,7 +93,7 @@ function migration_tc2_configure_vhost()
notice "Running nvmf_tgt..."
mkdir -p $nvmf_dir
rm -f $nvmf_dir/*
$SPDK_BUILD_DIR/app/nvmf_tgt/nvmf_tgt -s 512 -m 0x4 -r $nvmf_dir/rpc.sock -w &
$SPDK_BUILD_DIR/app/nvmf_tgt/nvmf_tgt -s 512 -m 0x4 -r $nvmf_dir/rpc.sock --wait-for-rpc &
local nvmf_tgt_pid=$!
echo $nvmf_tgt_pid > $nvmf_dir/nvmf_tgt.pid
waitforlisten "$nvmf_tgt_pid" "$nvmf_dir/rpc.sock"

View File

@ -101,7 +101,7 @@ function host1_start_nvmf()
rm -rf $nvmf_dir/*
trap 'host1_cleanup_nvmf SIGKILL; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
$SPDK_BUILD_DIR/app/nvmf_tgt/nvmf_tgt -s 512 -m 0xF -r $nvmf_dir/nvmf_rpc.sock -w &
$SPDK_BUILD_DIR/app/nvmf_tgt/nvmf_tgt -s 512 -m 0xF -r $nvmf_dir/nvmf_rpc.sock --wait-for-rpc &
nvmf_tgt_pid=$!
echo $nvmf_tgt_pid > $nvmf_dir/nvmf_tgt.pid
waitforlisten "$nvmf_tgt_pid" "$nvmf_dir/nvmf_rpc.sock"