nvmf: nvmf_subsystem_remove_host RPC now disconnects hosts
If a host matching the removed hosts' NQN exists, it is now disconnected. Change-Id: I0bec29eda2dc220114b9197d4eb765899b9e1517 Signed-off-by: Ben Walker <benjamin.walker@intel.com> Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/4684 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Community-CI: Broadcom CI
This commit is contained in:
parent
c5840d77b3
commit
6723bd0c0f
@ -1474,6 +1474,7 @@ rpc_nvmf_subsystem_remove_ns(struct spdk_jsonrpc_request *request,
|
||||
SPDK_RPC_REGISTER("nvmf_subsystem_remove_ns", rpc_nvmf_subsystem_remove_ns, SPDK_RPC_RUNTIME)
|
||||
|
||||
struct nvmf_rpc_host_ctx {
|
||||
struct spdk_jsonrpc_request *request;
|
||||
char *nqn;
|
||||
char *host;
|
||||
char *tgt_name;
|
||||
@ -1544,53 +1545,83 @@ rpc_nvmf_subsystem_add_host(struct spdk_jsonrpc_request *request,
|
||||
}
|
||||
SPDK_RPC_REGISTER("nvmf_subsystem_add_host", rpc_nvmf_subsystem_add_host, SPDK_RPC_RUNTIME)
|
||||
|
||||
static void
|
||||
rpc_nvmf_subsystem_remove_host_done(void *_ctx, int status)
|
||||
{
|
||||
struct nvmf_rpc_host_ctx *ctx = _ctx;
|
||||
struct spdk_json_write_ctx *w;
|
||||
|
||||
w = spdk_jsonrpc_begin_result(ctx->request);
|
||||
spdk_json_write_bool(w, true);
|
||||
spdk_jsonrpc_end_result(ctx->request, w);
|
||||
nvmf_rpc_host_ctx_free(ctx);
|
||||
free(ctx);
|
||||
}
|
||||
|
||||
static void
|
||||
rpc_nvmf_subsystem_remove_host(struct spdk_jsonrpc_request *request,
|
||||
const struct spdk_json_val *params)
|
||||
{
|
||||
struct nvmf_rpc_host_ctx ctx = {};
|
||||
struct nvmf_rpc_host_ctx *ctx;
|
||||
struct spdk_nvmf_subsystem *subsystem;
|
||||
struct spdk_nvmf_tgt *tgt;
|
||||
struct spdk_json_write_ctx *w;
|
||||
int rc;
|
||||
|
||||
if (spdk_json_decode_object(params, nvmf_rpc_subsystem_host_decoder,
|
||||
SPDK_COUNTOF(nvmf_rpc_subsystem_host_decoder),
|
||||
&ctx)) {
|
||||
SPDK_ERRLOG("spdk_json_decode_object failed\n");
|
||||
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters");
|
||||
nvmf_rpc_host_ctx_free(&ctx);
|
||||
ctx = calloc(1, sizeof(*ctx));
|
||||
if (ctx == NULL) {
|
||||
SPDK_ERRLOG("Unable to allocate context to perform RPC\n");
|
||||
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, "Out of memory");
|
||||
return;
|
||||
}
|
||||
|
||||
tgt = spdk_nvmf_get_tgt(ctx.tgt_name);
|
||||
ctx->request = request;
|
||||
|
||||
if (spdk_json_decode_object(params, nvmf_rpc_subsystem_host_decoder,
|
||||
SPDK_COUNTOF(nvmf_rpc_subsystem_host_decoder),
|
||||
ctx)) {
|
||||
SPDK_ERRLOG("spdk_json_decode_object failed\n");
|
||||
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters");
|
||||
nvmf_rpc_host_ctx_free(ctx);
|
||||
free(ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
tgt = spdk_nvmf_get_tgt(ctx->tgt_name);
|
||||
if (!tgt) {
|
||||
SPDK_ERRLOG("Unable to find a target object.\n");
|
||||
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
|
||||
"Unable to find a target.");
|
||||
nvmf_rpc_host_ctx_free(&ctx);
|
||||
nvmf_rpc_host_ctx_free(ctx);
|
||||
free(ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
subsystem = spdk_nvmf_tgt_find_subsystem(tgt, ctx.nqn);
|
||||
subsystem = spdk_nvmf_tgt_find_subsystem(tgt, ctx->nqn);
|
||||
if (!subsystem) {
|
||||
SPDK_ERRLOG("Unable to find subsystem with NQN %s\n", ctx.nqn);
|
||||
SPDK_ERRLOG("Unable to find subsystem with NQN %s\n", ctx->nqn);
|
||||
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters");
|
||||
nvmf_rpc_host_ctx_free(&ctx);
|
||||
nvmf_rpc_host_ctx_free(ctx);
|
||||
free(ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
rc = spdk_nvmf_subsystem_remove_host(subsystem, ctx.host);
|
||||
rc = spdk_nvmf_subsystem_remove_host(subsystem, ctx->host);
|
||||
if (rc != 0) {
|
||||
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, "Internal error");
|
||||
nvmf_rpc_host_ctx_free(&ctx);
|
||||
nvmf_rpc_host_ctx_free(ctx);
|
||||
free(ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
w = spdk_jsonrpc_begin_result(request);
|
||||
spdk_json_write_bool(w, true);
|
||||
spdk_jsonrpc_end_result(request, w);
|
||||
nvmf_rpc_host_ctx_free(&ctx);
|
||||
rc = spdk_nvmf_subsystem_disconnect_host(subsystem, ctx->host,
|
||||
rpc_nvmf_subsystem_remove_host_done,
|
||||
ctx);
|
||||
if (rc != 0) {
|
||||
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, "Internal error");
|
||||
nvmf_rpc_host_ctx_free(ctx);
|
||||
free(ctx);
|
||||
return;
|
||||
}
|
||||
}
|
||||
SPDK_RPC_REGISTER("nvmf_subsystem_remove_host", rpc_nvmf_subsystem_remove_host,
|
||||
SPDK_RPC_RUNTIME)
|
||||
|
@ -364,7 +364,8 @@ function gen_nvmf_target_json() {
|
||||
"traddr": "$NVMF_FIRST_TARGET_IP",
|
||||
"adrfam": "ipv4",
|
||||
"trsvcid": "$NVMF_PORT",
|
||||
"subnqn": "nqn.2016-06.io.spdk:cnode$subsystem"
|
||||
"subnqn": "nqn.2016-06.io.spdk:cnode$subsystem",
|
||||
"hostnqn": "nqn.2016-06.io.spdk:host$subsystem"
|
||||
},
|
||||
"method": "bdev_nvme_attach_controller"
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ run_test "nvmf_example" test/nvmf/target/nvmf_example.sh "${TEST_ARGS[@]}"
|
||||
run_test "nvmf_filesystem" test/nvmf/target/filesystem.sh "${TEST_ARGS[@]}"
|
||||
run_test "nvmf_discovery" test/nvmf/target/discovery.sh "${TEST_ARGS[@]}"
|
||||
run_test "nvmf_connect_disconnect" test/nvmf/target/connect_disconnect.sh "${TEST_ARGS[@]}"
|
||||
run_test "nvmf_host_management" test/nvmf/target/host_management.sh "${TEST_ARGS[@]}"
|
||||
if [ $SPDK_TEST_NVME_CLI -eq 1 ]; then
|
||||
run_test "nvmf_nvme_cli" test/nvmf/target/nvme_cli.sh "${TEST_ARGS[@]}"
|
||||
fi
|
||||
|
107
test/nvmf/target/host_management.sh
Executable file
107
test/nvmf/target/host_management.sh
Executable file
@ -0,0 +1,107 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
testdir=$(readlink -f $(dirname $0))
|
||||
rootdir=$(readlink -f $testdir/../../..)
|
||||
source $rootdir/test/common/autotest_common.sh
|
||||
source $rootdir/test/nvmf/common.sh
|
||||
|
||||
MALLOC_BDEV_SIZE=64
|
||||
MALLOC_BLOCK_SIZE=512
|
||||
|
||||
rpc_py="$rootdir/scripts/rpc.py"
|
||||
|
||||
function starttarget() {
|
||||
# Start the target
|
||||
nvmfappstart -m 0x1E
|
||||
|
||||
$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
|
||||
|
||||
timing_enter create_subsystem
|
||||
# Create subsystem
|
||||
rm -rf $testdir/rpcs.txt
|
||||
cat <<- EOL >> $testdir/rpcs.txt
|
||||
bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0
|
||||
nvmf_create_subsystem nqn.2016-06.io.spdk:cnode0 -s SPDK0
|
||||
nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode0 Malloc0
|
||||
nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode0 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
|
||||
nvmf_subsystem_add_host nqn.2016-06.io.spdk:cnode0 nqn.2016-06.io.spdk:host0
|
||||
EOL
|
||||
$rpc_py < $testdir/rpcs.txt
|
||||
timing_exit create_subsystems
|
||||
|
||||
}
|
||||
|
||||
function stoptarget() {
|
||||
rm -f ./local-job0-0-verify.state
|
||||
rm -rf $testdir/bdevperf.conf
|
||||
rm -rf $testdir/rpcs.txt
|
||||
|
||||
nvmftestfini
|
||||
}
|
||||
|
||||
function waitforio() {
|
||||
# $1 = RPC socket
|
||||
if [ -z "$1" ]; then
|
||||
exit 1
|
||||
fi
|
||||
# $2 = bdev name
|
||||
if [ -z "$2" ]; then
|
||||
exit 1
|
||||
fi
|
||||
local ret=1
|
||||
local i
|
||||
for ((i = 10; i != 0; i--)); do
|
||||
read_io_count=$($rpc_py -s $1 bdev_get_iostat -b $2 | jq -r '.bdevs[0].num_read_ops')
|
||||
# A few I/O will happen during initial examine. So wait until at least 100 I/O
|
||||
# have completed to know that bdevperf is really generating the I/O.
|
||||
if [ $read_io_count -ge 100 ]; then
|
||||
ret=0
|
||||
break
|
||||
fi
|
||||
sleep 0.25
|
||||
done
|
||||
return $ret
|
||||
}
|
||||
|
||||
# Add a host, start I/O, remove host, re-add host
|
||||
function nvmf_host_management() {
|
||||
starttarget
|
||||
|
||||
# Run bdevperf
|
||||
$rootdir/test/bdev/bdevperf/bdevperf -r /var/tmp/bdevperf.sock --json <(gen_nvmf_target_json "0") -q 64 -o 65536 -w verify -t 10 &
|
||||
perfpid=$!
|
||||
waitforlisten $perfpid /var/tmp/bdevperf.sock
|
||||
$rpc_py -s /var/tmp/bdevperf.sock framework_wait_init
|
||||
|
||||
# Expand the trap to clean up bdevperf if something goes wrong
|
||||
trap 'process_shm --id $NVMF_APP_SHM_ID; kill -9 $perfpid || true; nvmftestfini; exit 1' SIGINT SIGTERM EXIT
|
||||
|
||||
waitforio /var/tmp/bdevperf.sock Nvme0n1
|
||||
|
||||
# Remove the host while bdevperf is still running, then re-add it quickly. The host
|
||||
# may attempt to reconnect.
|
||||
$rpc_py nvmf_subsystem_remove_host nqn.2016-06.io.spdk:cnode0 nqn.2016-06.io.spdk:host0
|
||||
$rpc_py nvmf_subsystem_add_host nqn.2016-06.io.spdk:cnode0 nqn.2016-06.io.spdk:host0
|
||||
|
||||
sleep 1
|
||||
|
||||
# TODO: Right now the NVMe-oF initiator will not correctly detect broken connections
|
||||
# and so it will never shut down. Just kill it.
|
||||
kill -9 $perfpid || true
|
||||
|
||||
# Run bdevperf
|
||||
$rootdir/test/bdev/bdevperf/bdevperf -r /var/tmp/bdevperf.sock --json <(gen_nvmf_target_json "0") -q 64 -o 65536 -w verify -t 1 &
|
||||
perfpid=$!
|
||||
waitforlisten $perfpid /var/tmp/bdevperf.sock
|
||||
$rpc_py -s /var/tmp/bdevperf.sock framework_wait_init
|
||||
|
||||
sleep 2
|
||||
|
||||
stoptarget
|
||||
}
|
||||
|
||||
nvmftestinit
|
||||
|
||||
run_test "nvmf_host_management" nvmf_host_management
|
||||
|
||||
trap - SIGINT SIGTERM EXIT
|
Loading…
Reference in New Issue
Block a user