rpc: Rename stop_nbd_disk to nbd_stop_disk

Change-Id: I235460f445d7bc1dd03eaeaf794016ca808e5ebc
Signed-off-by: Pawel Kaminski <pawelx.kaminski@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/468583
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Paul Luse <paul.e.luse@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Pawel Kaminski 2019-09-17 06:47:41 -04:00 committed by Ben Walker
parent 0a993323f9
commit d242f5a041
10 changed files with 57 additions and 54 deletions

View File

@ -266,17 +266,17 @@ Example command
This will expose an SPDK bdev `Malloc0` under the `/dev/nbd0` block device.
To remove NBD device user should use `stop_nbd_disk` RPC command.
To remove NBD device user should use `nbd_stop_disk` RPC command.
Example command
`rpc.py stop_nbd_disk /dev/nbd0`
`rpc.py nbd_stop_disk /dev/nbd0`
To display full or specified nbd device list user should use `get_nbd_disks` RPC command.
Example command
`rpc.py stop_nbd_disk -n /dev/nbd0`
`rpc.py nbd_stop_disk -n /dev/nbd0`
## Creating a GPT partition table using NBD {#bdev_ug_gpt_create_part}
@ -295,7 +295,7 @@ parted -s /dev/nbd0 mkpart MyPartition '0%' '50%'
sgdisk -t 1:7c5222bd-8f5d-4087-9c00-bf9843c7b58c /dev/nbd0
# Stop the NBD device (stop exporting /dev/nbd0).
rpc.py stop_nbd_disk /dev/nbd0
rpc.py nbd_stop_disk /dev/nbd0
# Now Nvme0n1 is configured with a GPT partition table, and
# the first partition will be automatically exposed as

View File

@ -245,7 +245,7 @@ Example response:
"delete_ip_address",
"add_ip_address",
"get_nbd_disks",
"stop_nbd_disk",
"nbd_stop_disk",
"nbd_start_disk",
"get_log_flags",
"clear_log_flag",
@ -5559,7 +5559,7 @@ Example response:
}
~~~
## stop_nbd_disk {#rpc_stop_nbd_disk}
## nbd_stop_disk {#rpc_nbd_stop_disk}
Stop one NBD disk which is based on SPDK bdev.
@ -5579,7 +5579,7 @@ Example request:
"nbd_device": "/dev/nbd1",
},
"jsonrpc": "2.0",
"method": "stop_nbd_disk",
"method": "nbd_stop_disk",
"id": 1
}
~~~

View File

@ -234,18 +234,18 @@ invalid:
SPDK_RPC_REGISTER("nbd_start_disk", spdk_rpc_nbd_start_disk, SPDK_RPC_RUNTIME)
SPDK_RPC_REGISTER_ALIAS_DEPRECATED(nbd_start_disk, start_nbd_disk)
struct rpc_stop_nbd_disk {
struct rpc_nbd_stop_disk {
char *nbd_device;
};
static void
free_rpc_stop_nbd_disk(struct rpc_stop_nbd_disk *req)
free_rpc_nbd_stop_disk(struct rpc_nbd_stop_disk *req)
{
free(req->nbd_device);
}
static const struct spdk_json_object_decoder rpc_stop_nbd_disk_decoders[] = {
{"nbd_device", offsetof(struct rpc_stop_nbd_disk, nbd_device), spdk_json_decode_string},
static const struct spdk_json_object_decoder rpc_nbd_stop_disk_decoders[] = {
{"nbd_device", offsetof(struct rpc_nbd_stop_disk, nbd_device), spdk_json_decode_string},
};
struct nbd_disconnect_arg {
@ -272,17 +272,17 @@ nbd_disconnect_thread(void *arg)
}
static void
spdk_rpc_stop_nbd_disk(struct spdk_jsonrpc_request *request,
spdk_rpc_nbd_stop_disk(struct spdk_jsonrpc_request *request,
const struct spdk_json_val *params)
{
struct rpc_stop_nbd_disk req = {};
struct rpc_nbd_stop_disk req = {};
struct spdk_nbd_disk *nbd;
pthread_t tid;
struct nbd_disconnect_arg *thd_arg = NULL;
int rc;
if (spdk_json_decode_object(params, rpc_stop_nbd_disk_decoders,
SPDK_COUNTOF(rpc_stop_nbd_disk_decoders),
if (spdk_json_decode_object(params, rpc_nbd_stop_disk_decoders,
SPDK_COUNTOF(rpc_nbd_stop_disk_decoders),
&req)) {
SPDK_ERRLOG("spdk_json_decode_object failed\n");
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
@ -335,10 +335,11 @@ spdk_rpc_stop_nbd_disk(struct spdk_jsonrpc_request *request,
}
out:
free_rpc_stop_nbd_disk(&req);
free_rpc_nbd_stop_disk(&req);
}
SPDK_RPC_REGISTER("stop_nbd_disk", spdk_rpc_stop_nbd_disk, SPDK_RPC_RUNTIME)
SPDK_RPC_REGISTER("nbd_stop_disk", spdk_rpc_nbd_stop_disk, SPDK_RPC_RUNTIME)
SPDK_RPC_REGISTER_ALIAS_DEPRECATED(nbd_stop_disk, stop_nbd_disk)
static void
spdk_rpc_dump_nbd_info(struct spdk_json_write_ctx *w,

View File

@ -1472,13 +1472,14 @@ Format: 'user:u1 secret:s1 muser:mu1 msecret:ms1,user:u2 secret:s2 muser:mu2 mse
p.add_argument('nbd_device', help='Nbd device name to be assigned. Example: /dev/nbd0.', nargs='?')
p.set_defaults(func=nbd_start_disk)
def stop_nbd_disk(args):
rpc.nbd.stop_nbd_disk(args.client,
def nbd_stop_disk(args):
rpc.nbd.nbd_stop_disk(args.client,
nbd_device=args.nbd_device)
p = subparsers.add_parser('stop_nbd_disk', help='Stop a nbd disk')
p = subparsers.add_parser('nbd_stop_disk', aliases=['stop_nbd_disk'],
help='Stop a nbd disk')
p.add_argument('nbd_device', help='Nbd device name to be stopped. Example: /dev/nbd0.')
p.set_defaults(func=stop_nbd_disk)
p.set_defaults(func=nbd_stop_disk)
def get_nbd_disks(args):
print_dict(rpc.nbd.get_nbd_disks(args.client,

View File

@ -11,9 +11,10 @@ def nbd_start_disk(client, bdev_name, nbd_device):
return client.call('nbd_start_disk', params)
def stop_nbd_disk(client, nbd_device):
@deprecated_alias('stop_nbd_disk')
def nbd_stop_disk(client, nbd_device):
params = {'nbd_device': nbd_device}
return client.call('stop_nbd_disk', params)
return client.call('nbd_stop_disk', params)
def get_nbd_disks(client, nbd_device=None):

View File

@ -46,7 +46,7 @@ function nbd_stop_disks() {
local i
for i in ${nbd_list[@]}; do
$rootdir/scripts/rpc.py -s $rpc_server stop_nbd_disk $i
$rootdir/scripts/rpc.py -s $rpc_server nbd_stop_disk $i
waitfornbd_exit $(basename $i)
done
}

View File

@ -62,7 +62,7 @@ $rpc_py save_config > $testdir/config/ftl.json
dd if=/dev/urandom of=/dev/nbd0 bs=4K count=$data_size oflag=dsync
# Calculate checksum of the data written
dd if=/dev/nbd0 bs=4K count=$data_size | md5sum > $testdir/testfile.md5
$rpc_py stop_nbd_disk /dev/nbd0
$rpc_py nbd_stop_disk /dev/nbd0
# Force kill bdev service (dirty shutdown) and start it again
kill -9 $svcpid

View File

@ -116,7 +116,7 @@ def clear_iscsi_subsystem(args, iscsi_config):
def get_nbd_destroy_method(nbd):
delete_method_map = {'nbd_start_disk': "stop_nbd_disk"
delete_method_map = {'nbd_start_disk': "nbd_stop_disk"
}
return delete_method_map[nbd['method']]

View File

@ -174,9 +174,9 @@ class Commands_Rpc(object):
output, rc = self.rpc.nbd_start_disk(bdev_name, nbd_name)
return rc
def stop_nbd_disk(self, nbd_name):
print("INFO: RPC COMMAND stop_nbd_disk")
output, rc = self.rpc.stop_nbd_disk(nbd_name)
def nbd_stop_disk(self, nbd_name):
print("INFO: RPC COMMAND nbd_stop_disk")
output, rc = self.rpc.nbd_stop_disk(nbd_name)
return rc
def bdev_lvol_get_lvstores(self, name=None):

View File

@ -39,7 +39,7 @@ current_fio_pid = -1
#
# Tests with thin provisioned lvol bdevs, snapshots and clones are using nbd devices.
# Before writing/reading to lvol bdev, bdev is installed with rpc nbd_start_disk.
# After finishing writing/reading, rpc stop_nbd_disk is used.
# After finishing writing/reading, rpc nbd_stop_disk is used.
def is_process_alive(pid):
@ -1525,7 +1525,7 @@ class TestCases(object):
if free_clusters_third_fio != 0:
fail_count += 1
fail_count += self.c.stop_nbd_disk(nbd_name)
fail_count += self.c.nbd_stop_disk(nbd_name)
# destroy thin provisioned lvol bdev
fail_count += self.c.bdev_lvol_delete(lvol_bdev['name'])
lvs = self.c.bdev_lvol_get_lvstores(self.lvs_name)[0]
@ -1586,8 +1586,8 @@ class TestCases(object):
# and check if they return zeroes
fail_count += self.run_fio_test(nbd_name1, 0, size, "read", "0x00")
fail_count += self.c.stop_nbd_disk(nbd_name0)
fail_count += self.c.stop_nbd_disk(nbd_name1)
fail_count += self.c.nbd_stop_disk(nbd_name0)
fail_count += self.c.nbd_stop_disk(nbd_name1)
# destroy thin provisioned lvol bdev
fail_count += self.c.bdev_lvol_delete(lvol_bdev0['name'])
fail_count += self.c.bdev_lvol_delete(lvol_bdev1['name'])
@ -1630,7 +1630,7 @@ class TestCases(object):
# on the whole lvol bdev perform write operation with verification
fail_count += self.run_fio_test(nbd_name, 0, size, "write", "0xcc")
fail_count += self.c.stop_nbd_disk(nbd_name)
fail_count += self.c.nbd_stop_disk(nbd_name)
# destroy thin provisioned lvol bdev
fail_count += self.c.bdev_lvol_delete(lvol_bdev['name'])
# destroy lvol store
@ -1667,7 +1667,7 @@ class TestCases(object):
nbd_name = "/dev/nbd0"
fail_count += self.c.nbd_start_disk(uuid_bdev, nbd_name)
fail_count += self.run_fio_test(nbd_name, 0, size*MEGABYTE, "write", "0xcc", 0)
fail_count += self.c.stop_nbd_disk(nbd_name)
fail_count += self.c.nbd_stop_disk(nbd_name)
# Save number of free clusters for lvs
lvs = self.c.bdev_lvol_get_lvstores()[0]
free_clusters_start = int(lvs['free_clusters'])
@ -1691,7 +1691,7 @@ class TestCases(object):
fail_count += self.c.nbd_start_disk(uuid_bdev, nbd_name)
fail_count += self.run_fio_test(nbd_name, int(lbd_size * MEGABYTE / 2),
int(lbd_size * MEGABYTE / 2), "write", "0xcc", 0)
fail_count += self.c.stop_nbd_disk(nbd_name)
fail_count += self.c.nbd_stop_disk(nbd_name)
# Check if free clusters on lvs equals to zero
lvs = self.c.bdev_lvol_get_lvstores()[0]
if int(lvs['free_clusters']) != 0:
@ -1773,8 +1773,8 @@ class TestCases(object):
offset = "75%"
fail_count += self.run_fio_test(nbd_name0, offset, size, "read", "0x00")
fail_count += self.c.stop_nbd_disk(nbd_name0)
fail_count += self.c.stop_nbd_disk(nbd_name1)
fail_count += self.c.nbd_stop_disk(nbd_name0)
fail_count += self.c.nbd_stop_disk(nbd_name1)
# destroy thin provisioned lvol bdev
fail_count += self.c.bdev_lvol_delete(lvol_bdev0['name'])
fail_count += self.c.bdev_lvol_delete(lvol_bdev1['name'])
@ -1831,8 +1831,8 @@ class TestCases(object):
# check if operation didn't fail
fail_count += self.run_fio_test(nbd_name1, 0, size, "write", "0xee")
fail_count += self.c.stop_nbd_disk(nbd_name0)
fail_count += self.c.stop_nbd_disk(nbd_name1)
fail_count += self.c.nbd_stop_disk(nbd_name0)
fail_count += self.c.nbd_stop_disk(nbd_name1)
# destroy thin provisioned lvol bdevs
fail_count += self.c.bdev_lvol_delete(lvol_bdev0['name'])
fail_count += self.c.bdev_lvol_delete(lvol_bdev1['name'])
@ -2157,7 +2157,7 @@ class TestCases(object):
# Check if filling snapshot of lvol bdev fails
fail_count += self.run_fio_test(nbd_name0, 0, size, "write", "0xcc", 1)
fail_count += self.c.stop_nbd_disk(nbd_name0)
fail_count += self.c.nbd_stop_disk(nbd_name0)
# Destroy lvol bdev
fail_count += self.c.bdev_lvol_delete(lvol_bdev['name'])
# Destroy snapshot
@ -2230,7 +2230,7 @@ class TestCases(object):
# Compare thin provisioned lvol bdev with its snapshot and check if it fails
fail_count += self.compare_two_disks(nbd_name[0], nbd_name[2], 1)
for nbd in nbd_name:
fail_count += self.c.stop_nbd_disk(nbd)
fail_count += self.c.nbd_stop_disk(nbd)
# Delete lvol bdevs
fail_count += self.c.bdev_lvol_delete(lvol_bdev0['name'])
fail_count += self.c.bdev_lvol_delete(lvol_bdev1['name'])
@ -2290,7 +2290,7 @@ class TestCases(object):
thread.join()
# Check that write operation ended with success
fail_count += thread.rv
fail_count += self.c.stop_nbd_disk(nbd_name)
fail_count += self.c.nbd_stop_disk(nbd_name)
# Destroy lvol bdev
fail_count += self.c.bdev_lvol_delete(lvol_bdev['name'])
# Delete snapshot
@ -2466,7 +2466,7 @@ class TestCases(object):
fail_count += self.compare_two_disks(nbd_name[2], nbd_name[3], 0)
for nbd in nbd_name:
fail_count += self.c.stop_nbd_disk(nbd)
fail_count += self.c.nbd_stop_disk(nbd)
# Destroy lvol bdev
fail_count += self.c.bdev_lvol_delete(lvol_bdev['name'])
# Destroy two clones
@ -2607,7 +2607,7 @@ class TestCases(object):
fail_count += self.c.nbd_start_disk(lvol_bdev['name'], nbd_name)
fill_size = size * MEGABYTE
fail_count += self.run_fio_test(nbd_name, 0, fill_size, "write", "0xcc", 0)
self.c.stop_nbd_disk(nbd_name)
self.c.nbd_stop_disk(nbd_name)
# Create snapshot of thick provisioned lvol bdev
fail_count += self.c.bdev_lvol_snapshot(lvol_bdev['name'], snapshot_name)
@ -2626,7 +2626,7 @@ class TestCases(object):
MEGABYTE, "write", "0xdd", 0)
fail_count += self.run_fio_test(nbd_name, second_fill * MEGABYTE,
MEGABYTE, "write", "0xdd", 0)
self.c.stop_nbd_disk(nbd_name)
self.c.nbd_stop_disk(nbd_name)
# Do inflate
fail_count += self.c.bdev_lvol_inflate(lvol_clone['name'])
@ -2649,7 +2649,7 @@ class TestCases(object):
fail_count += self.run_fio_test(nbd_name, (second_fill + 1) * MEGABYTE,
(size - second_fill - 1) * MEGABYTE,
"read", "0xcc")
self.c.stop_nbd_disk(nbd_name)
self.c.nbd_stop_disk(nbd_name)
# Destroy lvol bdev
fail_count += self.c.bdev_lvol_delete(lvol_bdev['name'])
@ -2886,8 +2886,8 @@ class TestCases(object):
fail_count += self.run_fio_test(nbd_name1, 0, size, "write", "0xcc", 0)
# Stop nbd disks
fail_count += self.c.stop_nbd_disk(nbd_name0)
fail_count += self.c.stop_nbd_disk(nbd_name1)
fail_count += self.c.nbd_stop_disk(nbd_name0)
fail_count += self.c.nbd_stop_disk(nbd_name1)
# Destroy clone lvol bdev
fail_count += self.c.bdev_lvol_delete(clone_bdev['name'])
# Destroy lvol bdev
@ -2956,7 +2956,7 @@ class TestCases(object):
fail_count += 1
# Delete snapshot - should succeed
fail_count += self.c.stop_nbd_disk(nbd_name1)
fail_count += self.c.nbd_stop_disk(nbd_name1)
fail_count += self.c.bdev_lvol_delete(snapshot_bdev['name'])
# Check data consistency
@ -2967,7 +2967,7 @@ class TestCases(object):
fail_count += self.run_fio_test(nbd_name0, half_size, size-1, "read", "0xcc")
# Destroy lvol bdev
fail_count += self.c.stop_nbd_disk(nbd_name0)
fail_count += self.c.nbd_stop_disk(nbd_name0)
fail_count += self.c.bdev_lvol_delete(lvol_bdev['name'])
# Destroy lvol store
@ -3066,9 +3066,9 @@ class TestCases(object):
# Verify snapshots
fail_count += self.run_fio_test(nbd_name1, 0, size-1, "read", "0xcc")
fail_count += self.c.stop_nbd_disk(nbd_name1)
fail_count += self.c.nbd_stop_disk(nbd_name1)
fail_count += self.run_fio_test(nbd_name2, second_part, size-second_part, "read", "0xcc")
fail_count += self.c.stop_nbd_disk(nbd_name2)
fail_count += self.c.nbd_stop_disk(nbd_name2)
# Delete snapshot - should succeed
fail_count += self.c.bdev_lvol_delete(snapshot_bdev2['name'])
@ -3084,7 +3084,7 @@ class TestCases(object):
fail_count += 1
fail_count += self.run_fio_test(nbd_name0, first_part, second_part-first_part, "read", "0xee")
fail_count += self.run_fio_test(nbd_name0, second_part, size-second_part, "read", "0xdd")
fail_count += self.c.stop_nbd_disk(nbd_name0)
fail_count += self.c.nbd_stop_disk(nbd_name0)
# Destroy snapshot
fail_count += self.c.bdev_lvol_delete(snapshot_bdev['name'])