sma: forbid deleting devices with attached volumes

It makes it possible for stateless clients to send CreateDevice /
DeleteDevice each time a volume is attached / detached.  Deleting a
device with attached volumes results in FAILED_PRECONDITION error, which
a client can simply ignore.  The device will be deleted during the final
DeleteDevice call, once all volumes are detached.

We limit this behavior to device types that support AttachVolume /
DetachVolume methods, otherwise it would be impossible to delete such
devices.

Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Change-Id: I244b2b09455ec1430970c70f3fbb739cc9069754
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/15670
Reviewed-by: Jing Yan <jing1.yan@intel.com>
Reviewed-by: Filip Szufnarowski <filip.szufnarowski@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Konrad Sztyber 2022-11-28 16:31:55 +01:00 committed by Tomasz Zawadzki
parent 5a3e64efe4
commit b7964a3c5a
7 changed files with 32 additions and 5 deletions

View File

@ -172,7 +172,10 @@ service StorageManagementAgent {
// volumes (e.g. an NVMeoF subsystem).
rpc CreateDevice (CreateDeviceRequest)
returns (CreateDeviceResponse) {}
// Deletes a device
// Deletes a device. It is only allowed to delete a device with volumes still
// attached if that device doesn't support attaching volumes through
// AttachVolume (e.g. virtio-blk). In other cases, it is forbidden and
// FAILED_PRECONDITION status will be returned.
rpc DeleteDevice (DeleteDeviceRequest)
returns (DeleteDeviceResponse) {}
// Attaches a volume to a specified device making it available through that

View File

@ -12,10 +12,12 @@ class DeviceException(Exception):
class DeviceManager:
def __init__(self, name, protocol, client):
def __init__(self, name, protocol, client, allow_delete_volumes=False):
self._client = client
self.protocol = protocol
self.name = name
# Configures whether the device allows deleting a device with attached volumes
self.allow_delete_volumes = allow_delete_volumes
def init(self, config):
pass

View File

@ -20,7 +20,7 @@ from .device import DeviceException, DeviceManager
class VhostBlkDeviceManager(DeviceManager):
def __init__(self, client):
super().__init__('vhost_blk', 'virtio_blk', client)
super().__init__('vhost_blk', 'virtio_blk', client, allow_delete_volumes=True)
def init(self, config):
self._buses = config.get('buses', [])

View File

@ -97,8 +97,12 @@ class StorageManagementAgent(pb2_grpc.StorageManagementAgentServicer):
if device is None:
raise DeviceException(grpc.StatusCode.NOT_FOUND,
'Invalid device handle')
if not device.allow_delete_volumes and self._volume_mgr.has_volumes(request.handle):
raise DeviceException(grpc.StatusCode.FAILED_PRECONDITION,
'Device has attached volumes')
device.delete_device(request)
# Remove all volumes attached to that device
# Either there are no volumes attached to this device or we're allowed to delete it
# with volumes still attached
self._volume_mgr.disconnect_device_volumes(request.handle)
except DeviceException as ex:
context.set_details(ex.message)

View File

@ -320,3 +320,9 @@ class VolumeManager:
volumes = [i for i, v in self._volumes.items() if v.device_handle == device_handle]
for volume_id in volumes:
self._disconnect_volume(volume_id)
@_locked
def has_volumes(self, device_handle):
"""Checks whether a given device has volumes attached to it"""
return next(filter(lambda v: v.device_handle == device_handle,
self._volumes.values()), None) is not None

View File

@ -242,6 +242,7 @@ delete_device $device
device=$(create_device $uuid AES_CBC $key0 | jq -r '.handle')
verify_crypto_volume $localnqn $uuid
detach_volume $device $uuid
delete_device $device
# Try to create a device with incorrect volume crypto params, check that it fails and everything

View File

@ -260,7 +260,15 @@ detach_volume $device_id $t1uuid
$rpc_py bdev_nvme_get_discovery_info | jq -r '.[].trid.trsvcid' | grep $t1dscport
$rpc_py bdev_nvme_get_discovery_info | jq -r '.[].trid.trsvcid' | grep $t2dscport1
# Delete the device and verify that this also causes the volumes to be disconnected
# Try to delete the device and verify that it fails if it has volumes attached to it
NOT delete_device $device_id
[[ $($rpc_py bdev_nvme_get_discovery_info | jq -r '. | length') -eq 2 ]]
$rpc_py bdev_nvme_get_discovery_info | jq -r '.[].trid.trsvcid' | grep $t1dscport
$rpc_py bdev_nvme_get_discovery_info | jq -r '.[].trid.trsvcid' | grep $t2dscport1
# After detaching the second volume, it should be possible to delete the device
detach_volume $device_id $t2uuid
delete_device $device_id
[[ $($rpc_py bdev_nvme_get_discovery_info | jq -r '. | length') -eq 0 ]]
@ -299,6 +307,9 @@ $rpc_py nvmf_get_subsystems $localnqn | jq -r '.[].namespaces[].uuid' | grep $t2
$rpc_py nvmf_get_subsystems $localnqn | jq -r '.[].namespaces[].uuid' | grep $t2uuid2
# Reset the device
detach_volume $device_id $t1uuid
detach_volume $device_id $t2uuid
detach_volume $device_id $t2uuid2
delete_device $device_id
[[ $($rpc_py bdev_nvme_get_discovery_info | jq -r '. | length') -eq 0 ]]