Spdk/python/spdk/rpc/bdev.py

1706 lines
52 KiB
Python
Raw Normal View History

# SPDX-License-Identifier: BSD-3-Clause
# Copyright (C) 2017 Intel Corporation.
# All rights reserved.
def bdev_set_options(client, bdev_io_pool_size=None, bdev_io_cache_size=None, bdev_auto_examine=None,
small_buf_pool_size=None, large_buf_pool_size=None):
"""Set parameters for the bdev subsystem.
Args:
bdev_io_pool_size: number of bdev_io structures in shared buffer pool (optional)
bdev_io_cache_size: maximum number of bdev_io structures cached per thread (optional)
bdev_auto_examine: if set to false, the bdev layer will not examine every disks automatically (optional)
small_buf_pool_size: maximum number of small buffer (8KB buffer) pool size (optional)
large_buf_pool_size: maximum number of large buffer (64KB buffer) pool size (optional)
"""
params = {}
if bdev_io_pool_size:
params['bdev_io_pool_size'] = bdev_io_pool_size
if bdev_io_cache_size:
params['bdev_io_cache_size'] = bdev_io_cache_size
if bdev_auto_examine is not None:
params["bdev_auto_examine"] = bdev_auto_examine
if small_buf_pool_size:
params['small_buf_pool_size'] = small_buf_pool_size
if large_buf_pool_size:
params['large_buf_pool_size'] = large_buf_pool_size
return client.call('bdev_set_options', params)
def bdev_examine(client, name):
"""Examine a bdev manually. If the bdev does not exist yet when this RPC is called,
it will be examined when it is created
Args:
name: name of the bdev
"""
params = {
'name': name
}
return client.call('bdev_examine', params)
def bdev_wait_for_examine(client):
"""Report when all bdevs have been examined
"""
return client.call('bdev_wait_for_examine')
def bdev_compress_create(client, base_bdev_name, pm_path, lb_size):
"""Construct a compress virtual block device.
Args:
base_bdev_name: name of the underlying base bdev
pm_path: path to persistent memory
lb_size: logical block size for the compressed vol in bytes. Must be 4K or 512.
Returns:
Name of created virtual block device.
"""
params = {'base_bdev_name': base_bdev_name, 'pm_path': pm_path}
if lb_size:
params['lb_size'] = lb_size
return client.call('bdev_compress_create', params)
def bdev_compress_delete(client, name):
"""Delete compress virtual block device.
Args:
name: name of compress vbdev to delete
"""
params = {'name': name}
return client.call('bdev_compress_delete', params)
def bdev_compress_set_pmd(client, pmd):
"""Set pmd options for the bdev compress.
Args:
pmd: 0 = auto-select, 1 = QAT, 2 = ISAL, 3 = mlx5_pci
"""
params = {'pmd': pmd}
return client.call('bdev_compress_set_pmd', params)
def bdev_compress_get_orphans(client, name=None):
"""Get a list of comp bdevs that do not have a pmem file (aka orphaned).
Args:
name: comp bdev name to query (optional; if omitted, query all comp bdevs)
Returns:
List of comp bdev names.
"""
params = {}
if name:
params['name'] = name
return client.call('bdev_compress_get_orphans', params)
def bdev_crypto_create(client, base_bdev_name, name, crypto_pmd, key, cipher=None, key2=None):
"""Construct a crypto virtual block device.
Args:
base_bdev_name: name of the underlying base bdev
name: name for the crypto vbdev
crypto_pmd: name of of the DPDK crypto driver to use
key: key
Returns:
Name of created virtual block device.
"""
params = {'base_bdev_name': base_bdev_name, 'name': name, 'crypto_pmd': crypto_pmd, 'key': key}
if cipher:
params['cipher'] = cipher
if key2:
params['key2'] = key2
return client.call('bdev_crypto_create', params)
def bdev_crypto_delete(client, name):
"""Delete crypto virtual block device.
Args:
name: name of crypto vbdev to delete
"""
params = {'name': name}
return client.call('bdev_crypto_delete', params)
def bdev_ocf_create(client, name, mode, cache_line_size, cache_bdev_name, core_bdev_name):
"""Add an OCF block device
Args:
name: name of constructed OCF bdev
mode: OCF cache mode: {'wb', 'wt', 'pt', 'wa', 'wi', 'wo'}
cache_line_size: OCF cache line size. The unit is KiB: {4, 8, 16, 32, 64}
cache_bdev_name: name of underlying cache bdev
core_bdev_name: name of underlying core bdev
Returns:
Name of created block device
"""
params = {
'name': name,
'mode': mode,
'cache_bdev_name': cache_bdev_name,
'core_bdev_name': core_bdev_name,
}
if cache_line_size:
params['cache_line_size'] = cache_line_size
return client.call('bdev_ocf_create', params)
def bdev_ocf_delete(client, name):
"""Delete an OCF device
Args:
name: name of OCF bdev
"""
params = {'name': name}
return client.call('bdev_ocf_delete', params)
def bdev_ocf_get_stats(client, name):
"""Get statistics of chosen OCF block device
Args:
name: name of OCF bdev
Returns:
Statistics as json object
"""
params = {'name': name}
return client.call('bdev_ocf_get_stats', params)
def bdev_ocf_get_bdevs(client, name=None):
"""Get list of OCF devices including unregistered ones
Args:
name: name of OCF vbdev or name of cache device or name of core device (optional)
Returns:
Array of OCF devices with their current status
"""
params = None
if name:
params = {'name': name}
return client.call('bdev_ocf_get_bdevs', params)
def bdev_ocf_set_cache_mode(client, name, mode):
"""Set cache mode of OCF block device
Args:
name: name of OCF bdev
mode: OCF cache mode: {'wb', 'wt', 'pt', 'wa', 'wi', 'wo'}
Returns:
New cache mode name
"""
params = {
'name': name,
'mode': mode,
}
return client.call('bdev_ocf_set_cache_mode', params)
def bdev_ocf_set_seqcutoff(client, name, policy, threshold, promotion_count):
"""Set sequential cutoff parameters on all cores for the given OCF cache device
Args:
name: Name of OCF cache bdev
policy: Sequential cutoff policy
threshold: Activation threshold [KiB] (optional)
promotion_count: Promotion request count (optional)
"""
params = {
'name': name,
'policy': policy,
}
if threshold:
params['threshold'] = threshold
if promotion_count:
params['promotion_count'] = promotion_count
return client.call('bdev_ocf_set_seqcutoff', params)
def bdev_ocf_flush_start(client, name):
"""Start flushing OCF cache device
Args:
name: name of OCF bdev
"""
params = {
'name': name,
}
return client.call('bdev_ocf_flush_start', params)
def bdev_ocf_flush_status(client, name):
"""Get flush status of OCF cache device
Args:
name: name of OCF bdev
Returns:
Flush status
"""
params = {
'name': name,
}
return client.call('bdev_ocf_flush_status', params)
def bdev_malloc_create(client, num_blocks, block_size, name=None, uuid=None, optimal_io_boundary=None,
md_size=None, md_interleave=None, dif_type=None, dif_is_head_of_md=None):
"""Construct a malloc block device.
Args:
num_blocks: size of block device in blocks
block_size: Data block size of device; must be a power of 2 and at least 512
name: name of block device (optional)
uuid: UUID of block device (optional)
optimal_io_boundary: Split on optimal IO boundary, in number of blocks, default 0 (disabled, optional)
md_size: metadata size of device (0, 8, 16, 32, 64, or 128), default 0 (optional)
md_interleave: metadata location, interleaved if set, and separated if omitted (optional)
dif_type: protection information type (optional)
dif_is_head_of_md: protection information is in the first 8 bytes of metadata (optional)
Returns:
Name of created block device.
"""
params = {'num_blocks': num_blocks, 'block_size': block_size}
if name:
params['name'] = name
if uuid:
params['uuid'] = uuid
if optimal_io_boundary:
params['optimal_io_boundary'] = optimal_io_boundary
if md_size:
params['md_size'] = md_size
if md_interleave:
params['md_interleave'] = md_interleave
if dif_type:
params['dif_type'] = dif_type
if dif_is_head_of_md:
params['dif_is_head_of_md'] = dif_is_head_of_md
return client.call('bdev_malloc_create', params)
def bdev_malloc_delete(client, name):
"""Delete malloc block device.
Args:
bdev_name: name of malloc bdev to delete
"""
params = {'name': name}
return client.call('bdev_malloc_delete', params)
def bdev_null_create(client, num_blocks, block_size, name, uuid=None, md_size=None,
dif_type=None, dif_is_head_of_md=None):
"""Construct a null block device.
Args:
num_blocks: size of block device in blocks
block_size: block size of device; data part size must be a power of 2 and at least 512
name: name of block device
uuid: UUID of block device (optional)
md_size: metadata size of device (optional)
dif_type: protection information type (optional)
dif_is_head_of_md: protection information is in the first 8 bytes of metadata (optional)
Returns:
Name of created block device.
"""
params = {'name': name, 'num_blocks': num_blocks,
'block_size': block_size}
if uuid:
params['uuid'] = uuid
if md_size:
params['md_size'] = md_size
if dif_type:
params['dif_type'] = dif_type
if dif_is_head_of_md:
params['dif_is_head_of_md'] = dif_is_head_of_md
return client.call('bdev_null_create', params)
def bdev_null_delete(client, name):
"""Remove null bdev from the system.
Args:
name: name of null bdev to delete
"""
params = {'name': name}
return client.call('bdev_null_delete', params)
def bdev_null_resize(client, name, new_size):
"""Resize null bdev in the system.
Args:
name: name of null bdev to resize
new_size: new bdev size of resize operation. The unit is MiB
"""
params = {
'name': name,
'new_size': new_size,
}
return client.call('bdev_null_resize', params)
def bdev_raid_get_bdevs(client, category):
bdev: add raid bdev module Raid module: ============ - SPDK raid bdev module is a new bdev module which is responsible for striping various NVMe devices and expose the raid bdev to bdev layer which would enhance the performance and capacity. - It can support theoretically 256 base devices (currently it is being tested max upto 8 base devices) - Multiple strip sizes like 32KB, 64KB, 128KB, 256KB, 512KB etc is supported. Most of the current testing is focused on 64KB strip size. - New RPC commands like "create raid bdev", "destroy raid bdev" and "get raid bdevs" are introduced to configure raid bdev dynamically in a running SPDK system. - Currently raid bdev configuration parameters are persisted in the current SPDK configuration file for across reboot support. DDF will be introduced later. High level testing done: ======================= - Raid bdev is created with 8 base NVMe devices via configuration file and is exposed to initiator via existing methods. Initiator is able to see a single NVMe namespace with capacity equal to sum of the minimum capacities of 8 devices. Initiator was able to run raw read/write workload, file system workload etc (tested with XFS file system workload). - Multiple raid bdevs are also created and exposed to initiator and tested with file system and other workloads for read/write IO. - LVS / LVOL are created over raid bdev and exposed to initiator. Testing was done for raw read/write workloads and XFS file system workloads. - RPC testing is done where on the running SPDK system raid bdevs are created out of NVMe base devices. These raid bdevs (and LVOLs over raid bdevs) are then exposed to initiator and IO workload was tested for raw read/write and XFS file system workload. - RPC testing is done for delete raid bdevs where all raid bdevs are deleted in running SPDK system. - RPC testing is done for get raid bdevs where existing list of raid bdev names is printed (it can be all raid bdevs or only online or only configuring or only offline). - RPC testing is done where raid bdevs and underlying NVMe devices relationship was returned in JSON RPC commands Change-Id: I10ae1266f8f2cca3c106e4df8c1c0993ddf435d8 Signed-off-by: Kunal Sablok <kunal.sablok@intel.com> Reviewed-on: https://review.gerrithub.io/410484 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
2018-05-08 11:30:29 +00:00
"""Get list of raid bdevs based on category
Args:
category: any one of all or online or configuring or offline
Returns:
List of raid bdev details
bdev: add raid bdev module Raid module: ============ - SPDK raid bdev module is a new bdev module which is responsible for striping various NVMe devices and expose the raid bdev to bdev layer which would enhance the performance and capacity. - It can support theoretically 256 base devices (currently it is being tested max upto 8 base devices) - Multiple strip sizes like 32KB, 64KB, 128KB, 256KB, 512KB etc is supported. Most of the current testing is focused on 64KB strip size. - New RPC commands like "create raid bdev", "destroy raid bdev" and "get raid bdevs" are introduced to configure raid bdev dynamically in a running SPDK system. - Currently raid bdev configuration parameters are persisted in the current SPDK configuration file for across reboot support. DDF will be introduced later. High level testing done: ======================= - Raid bdev is created with 8 base NVMe devices via configuration file and is exposed to initiator via existing methods. Initiator is able to see a single NVMe namespace with capacity equal to sum of the minimum capacities of 8 devices. Initiator was able to run raw read/write workload, file system workload etc (tested with XFS file system workload). - Multiple raid bdevs are also created and exposed to initiator and tested with file system and other workloads for read/write IO. - LVS / LVOL are created over raid bdev and exposed to initiator. Testing was done for raw read/write workloads and XFS file system workloads. - RPC testing is done where on the running SPDK system raid bdevs are created out of NVMe base devices. These raid bdevs (and LVOLs over raid bdevs) are then exposed to initiator and IO workload was tested for raw read/write and XFS file system workload. - RPC testing is done for delete raid bdevs where all raid bdevs are deleted in running SPDK system. - RPC testing is done for get raid bdevs where existing list of raid bdev names is printed (it can be all raid bdevs or only online or only configuring or only offline). - RPC testing is done where raid bdevs and underlying NVMe devices relationship was returned in JSON RPC commands Change-Id: I10ae1266f8f2cca3c106e4df8c1c0993ddf435d8 Signed-off-by: Kunal Sablok <kunal.sablok@intel.com> Reviewed-on: https://review.gerrithub.io/410484 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
2018-05-08 11:30:29 +00:00
"""
params = {'category': category}
return client.call('bdev_raid_get_bdevs', params)
bdev: add raid bdev module Raid module: ============ - SPDK raid bdev module is a new bdev module which is responsible for striping various NVMe devices and expose the raid bdev to bdev layer which would enhance the performance and capacity. - It can support theoretically 256 base devices (currently it is being tested max upto 8 base devices) - Multiple strip sizes like 32KB, 64KB, 128KB, 256KB, 512KB etc is supported. Most of the current testing is focused on 64KB strip size. - New RPC commands like "create raid bdev", "destroy raid bdev" and "get raid bdevs" are introduced to configure raid bdev dynamically in a running SPDK system. - Currently raid bdev configuration parameters are persisted in the current SPDK configuration file for across reboot support. DDF will be introduced later. High level testing done: ======================= - Raid bdev is created with 8 base NVMe devices via configuration file and is exposed to initiator via existing methods. Initiator is able to see a single NVMe namespace with capacity equal to sum of the minimum capacities of 8 devices. Initiator was able to run raw read/write workload, file system workload etc (tested with XFS file system workload). - Multiple raid bdevs are also created and exposed to initiator and tested with file system and other workloads for read/write IO. - LVS / LVOL are created over raid bdev and exposed to initiator. Testing was done for raw read/write workloads and XFS file system workloads. - RPC testing is done where on the running SPDK system raid bdevs are created out of NVMe base devices. These raid bdevs (and LVOLs over raid bdevs) are then exposed to initiator and IO workload was tested for raw read/write and XFS file system workload. - RPC testing is done for delete raid bdevs where all raid bdevs are deleted in running SPDK system. - RPC testing is done for get raid bdevs where existing list of raid bdev names is printed (it can be all raid bdevs or only online or only configuring or only offline). - RPC testing is done where raid bdevs and underlying NVMe devices relationship was returned in JSON RPC commands Change-Id: I10ae1266f8f2cca3c106e4df8c1c0993ddf435d8 Signed-off-by: Kunal Sablok <kunal.sablok@intel.com> Reviewed-on: https://review.gerrithub.io/410484 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
2018-05-08 11:30:29 +00:00
def bdev_raid_create(client, name, raid_level, base_bdevs, strip_size=None, strip_size_kb=None):
"""Create raid bdev. Either strip size arg will work but one is required.
bdev: add raid bdev module Raid module: ============ - SPDK raid bdev module is a new bdev module which is responsible for striping various NVMe devices and expose the raid bdev to bdev layer which would enhance the performance and capacity. - It can support theoretically 256 base devices (currently it is being tested max upto 8 base devices) - Multiple strip sizes like 32KB, 64KB, 128KB, 256KB, 512KB etc is supported. Most of the current testing is focused on 64KB strip size. - New RPC commands like "create raid bdev", "destroy raid bdev" and "get raid bdevs" are introduced to configure raid bdev dynamically in a running SPDK system. - Currently raid bdev configuration parameters are persisted in the current SPDK configuration file for across reboot support. DDF will be introduced later. High level testing done: ======================= - Raid bdev is created with 8 base NVMe devices via configuration file and is exposed to initiator via existing methods. Initiator is able to see a single NVMe namespace with capacity equal to sum of the minimum capacities of 8 devices. Initiator was able to run raw read/write workload, file system workload etc (tested with XFS file system workload). - Multiple raid bdevs are also created and exposed to initiator and tested with file system and other workloads for read/write IO. - LVS / LVOL are created over raid bdev and exposed to initiator. Testing was done for raw read/write workloads and XFS file system workloads. - RPC testing is done where on the running SPDK system raid bdevs are created out of NVMe base devices. These raid bdevs (and LVOLs over raid bdevs) are then exposed to initiator and IO workload was tested for raw read/write and XFS file system workload. - RPC testing is done for delete raid bdevs where all raid bdevs are deleted in running SPDK system. - RPC testing is done for get raid bdevs where existing list of raid bdev names is printed (it can be all raid bdevs or only online or only configuring or only offline). - RPC testing is done where raid bdevs and underlying NVMe devices relationship was returned in JSON RPC commands Change-Id: I10ae1266f8f2cca3c106e4df8c1c0993ddf435d8 Signed-off-by: Kunal Sablok <kunal.sablok@intel.com> Reviewed-on: https://review.gerrithub.io/410484 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
2018-05-08 11:30:29 +00:00
Args:
name: user defined raid bdev name
strip_size (deprecated): strip size of raid bdev in KB, supported values like 8, 16, 32, 64, 128, 256, etc
strip_size_kb: strip size of raid bdev in KB, supported values like 8, 16, 32, 64, 128, 256, etc
bdev: add raid bdev module Raid module: ============ - SPDK raid bdev module is a new bdev module which is responsible for striping various NVMe devices and expose the raid bdev to bdev layer which would enhance the performance and capacity. - It can support theoretically 256 base devices (currently it is being tested max upto 8 base devices) - Multiple strip sizes like 32KB, 64KB, 128KB, 256KB, 512KB etc is supported. Most of the current testing is focused on 64KB strip size. - New RPC commands like "create raid bdev", "destroy raid bdev" and "get raid bdevs" are introduced to configure raid bdev dynamically in a running SPDK system. - Currently raid bdev configuration parameters are persisted in the current SPDK configuration file for across reboot support. DDF will be introduced later. High level testing done: ======================= - Raid bdev is created with 8 base NVMe devices via configuration file and is exposed to initiator via existing methods. Initiator is able to see a single NVMe namespace with capacity equal to sum of the minimum capacities of 8 devices. Initiator was able to run raw read/write workload, file system workload etc (tested with XFS file system workload). - Multiple raid bdevs are also created and exposed to initiator and tested with file system and other workloads for read/write IO. - LVS / LVOL are created over raid bdev and exposed to initiator. Testing was done for raw read/write workloads and XFS file system workloads. - RPC testing is done where on the running SPDK system raid bdevs are created out of NVMe base devices. These raid bdevs (and LVOLs over raid bdevs) are then exposed to initiator and IO workload was tested for raw read/write and XFS file system workload. - RPC testing is done for delete raid bdevs where all raid bdevs are deleted in running SPDK system. - RPC testing is done for get raid bdevs where existing list of raid bdev names is printed (it can be all raid bdevs or only online or only configuring or only offline). - RPC testing is done where raid bdevs and underlying NVMe devices relationship was returned in JSON RPC commands Change-Id: I10ae1266f8f2cca3c106e4df8c1c0993ddf435d8 Signed-off-by: Kunal Sablok <kunal.sablok@intel.com> Reviewed-on: https://review.gerrithub.io/410484 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
2018-05-08 11:30:29 +00:00
raid_level: raid level of raid bdev, supported values 0
base_bdevs: Space separated names of Nvme bdevs in double quotes, like "Nvme0n1 Nvme1n1 Nvme2n1"
Returns:
None
"""
params = {'name': name, 'raid_level': raid_level, 'base_bdevs': base_bdevs}
if strip_size:
params['strip_size'] = strip_size
if strip_size_kb:
params['strip_size_kb'] = strip_size_kb
bdev: add raid bdev module Raid module: ============ - SPDK raid bdev module is a new bdev module which is responsible for striping various NVMe devices and expose the raid bdev to bdev layer which would enhance the performance and capacity. - It can support theoretically 256 base devices (currently it is being tested max upto 8 base devices) - Multiple strip sizes like 32KB, 64KB, 128KB, 256KB, 512KB etc is supported. Most of the current testing is focused on 64KB strip size. - New RPC commands like "create raid bdev", "destroy raid bdev" and "get raid bdevs" are introduced to configure raid bdev dynamically in a running SPDK system. - Currently raid bdev configuration parameters are persisted in the current SPDK configuration file for across reboot support. DDF will be introduced later. High level testing done: ======================= - Raid bdev is created with 8 base NVMe devices via configuration file and is exposed to initiator via existing methods. Initiator is able to see a single NVMe namespace with capacity equal to sum of the minimum capacities of 8 devices. Initiator was able to run raw read/write workload, file system workload etc (tested with XFS file system workload). - Multiple raid bdevs are also created and exposed to initiator and tested with file system and other workloads for read/write IO. - LVS / LVOL are created over raid bdev and exposed to initiator. Testing was done for raw read/write workloads and XFS file system workloads. - RPC testing is done where on the running SPDK system raid bdevs are created out of NVMe base devices. These raid bdevs (and LVOLs over raid bdevs) are then exposed to initiator and IO workload was tested for raw read/write and XFS file system workload. - RPC testing is done for delete raid bdevs where all raid bdevs are deleted in running SPDK system. - RPC testing is done for get raid bdevs where existing list of raid bdev names is printed (it can be all raid bdevs or only online or only configuring or only offline). - RPC testing is done where raid bdevs and underlying NVMe devices relationship was returned in JSON RPC commands Change-Id: I10ae1266f8f2cca3c106e4df8c1c0993ddf435d8 Signed-off-by: Kunal Sablok <kunal.sablok@intel.com> Reviewed-on: https://review.gerrithub.io/410484 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
2018-05-08 11:30:29 +00:00
return client.call('bdev_raid_create', params)
bdev: add raid bdev module Raid module: ============ - SPDK raid bdev module is a new bdev module which is responsible for striping various NVMe devices and expose the raid bdev to bdev layer which would enhance the performance and capacity. - It can support theoretically 256 base devices (currently it is being tested max upto 8 base devices) - Multiple strip sizes like 32KB, 64KB, 128KB, 256KB, 512KB etc is supported. Most of the current testing is focused on 64KB strip size. - New RPC commands like "create raid bdev", "destroy raid bdev" and "get raid bdevs" are introduced to configure raid bdev dynamically in a running SPDK system. - Currently raid bdev configuration parameters are persisted in the current SPDK configuration file for across reboot support. DDF will be introduced later. High level testing done: ======================= - Raid bdev is created with 8 base NVMe devices via configuration file and is exposed to initiator via existing methods. Initiator is able to see a single NVMe namespace with capacity equal to sum of the minimum capacities of 8 devices. Initiator was able to run raw read/write workload, file system workload etc (tested with XFS file system workload). - Multiple raid bdevs are also created and exposed to initiator and tested with file system and other workloads for read/write IO. - LVS / LVOL are created over raid bdev and exposed to initiator. Testing was done for raw read/write workloads and XFS file system workloads. - RPC testing is done where on the running SPDK system raid bdevs are created out of NVMe base devices. These raid bdevs (and LVOLs over raid bdevs) are then exposed to initiator and IO workload was tested for raw read/write and XFS file system workload. - RPC testing is done for delete raid bdevs where all raid bdevs are deleted in running SPDK system. - RPC testing is done for get raid bdevs where existing list of raid bdev names is printed (it can be all raid bdevs or only online or only configuring or only offline). - RPC testing is done where raid bdevs and underlying NVMe devices relationship was returned in JSON RPC commands Change-Id: I10ae1266f8f2cca3c106e4df8c1c0993ddf435d8 Signed-off-by: Kunal Sablok <kunal.sablok@intel.com> Reviewed-on: https://review.gerrithub.io/410484 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
2018-05-08 11:30:29 +00:00
def bdev_raid_delete(client, name):
"""Delete raid bdev
bdev: add raid bdev module Raid module: ============ - SPDK raid bdev module is a new bdev module which is responsible for striping various NVMe devices and expose the raid bdev to bdev layer which would enhance the performance and capacity. - It can support theoretically 256 base devices (currently it is being tested max upto 8 base devices) - Multiple strip sizes like 32KB, 64KB, 128KB, 256KB, 512KB etc is supported. Most of the current testing is focused on 64KB strip size. - New RPC commands like "create raid bdev", "destroy raid bdev" and "get raid bdevs" are introduced to configure raid bdev dynamically in a running SPDK system. - Currently raid bdev configuration parameters are persisted in the current SPDK configuration file for across reboot support. DDF will be introduced later. High level testing done: ======================= - Raid bdev is created with 8 base NVMe devices via configuration file and is exposed to initiator via existing methods. Initiator is able to see a single NVMe namespace with capacity equal to sum of the minimum capacities of 8 devices. Initiator was able to run raw read/write workload, file system workload etc (tested with XFS file system workload). - Multiple raid bdevs are also created and exposed to initiator and tested with file system and other workloads for read/write IO. - LVS / LVOL are created over raid bdev and exposed to initiator. Testing was done for raw read/write workloads and XFS file system workloads. - RPC testing is done where on the running SPDK system raid bdevs are created out of NVMe base devices. These raid bdevs (and LVOLs over raid bdevs) are then exposed to initiator and IO workload was tested for raw read/write and XFS file system workload. - RPC testing is done for delete raid bdevs where all raid bdevs are deleted in running SPDK system. - RPC testing is done for get raid bdevs where existing list of raid bdev names is printed (it can be all raid bdevs or only online or only configuring or only offline). - RPC testing is done where raid bdevs and underlying NVMe devices relationship was returned in JSON RPC commands Change-Id: I10ae1266f8f2cca3c106e4df8c1c0993ddf435d8 Signed-off-by: Kunal Sablok <kunal.sablok@intel.com> Reviewed-on: https://review.gerrithub.io/410484 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
2018-05-08 11:30:29 +00:00
Args:
name: raid bdev name
Returns:
None
"""
params = {'name': name}
return client.call('bdev_raid_delete', params)
bdev: add raid bdev module Raid module: ============ - SPDK raid bdev module is a new bdev module which is responsible for striping various NVMe devices and expose the raid bdev to bdev layer which would enhance the performance and capacity. - It can support theoretically 256 base devices (currently it is being tested max upto 8 base devices) - Multiple strip sizes like 32KB, 64KB, 128KB, 256KB, 512KB etc is supported. Most of the current testing is focused on 64KB strip size. - New RPC commands like "create raid bdev", "destroy raid bdev" and "get raid bdevs" are introduced to configure raid bdev dynamically in a running SPDK system. - Currently raid bdev configuration parameters are persisted in the current SPDK configuration file for across reboot support. DDF will be introduced later. High level testing done: ======================= - Raid bdev is created with 8 base NVMe devices via configuration file and is exposed to initiator via existing methods. Initiator is able to see a single NVMe namespace with capacity equal to sum of the minimum capacities of 8 devices. Initiator was able to run raw read/write workload, file system workload etc (tested with XFS file system workload). - Multiple raid bdevs are also created and exposed to initiator and tested with file system and other workloads for read/write IO. - LVS / LVOL are created over raid bdev and exposed to initiator. Testing was done for raw read/write workloads and XFS file system workloads. - RPC testing is done where on the running SPDK system raid bdevs are created out of NVMe base devices. These raid bdevs (and LVOLs over raid bdevs) are then exposed to initiator and IO workload was tested for raw read/write and XFS file system workload. - RPC testing is done for delete raid bdevs where all raid bdevs are deleted in running SPDK system. - RPC testing is done for get raid bdevs where existing list of raid bdev names is printed (it can be all raid bdevs or only online or only configuring or only offline). - RPC testing is done where raid bdevs and underlying NVMe devices relationship was returned in JSON RPC commands Change-Id: I10ae1266f8f2cca3c106e4df8c1c0993ddf435d8 Signed-off-by: Kunal Sablok <kunal.sablok@intel.com> Reviewed-on: https://review.gerrithub.io/410484 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
2018-05-08 11:30:29 +00:00
def bdev_aio_create(client, filename, name, block_size=None, readonly=False):
"""Construct a Linux AIO block device.
Args:
filename: path to device or file (ex: /dev/sda)
name: name of block device
block_size: block size of device (optional; autodetected if omitted)
readonly: set aio bdev as read-only
Returns:
Name of created block device.
"""
params = {'name': name,
'filename': filename}
if block_size:
params['block_size'] = block_size
if readonly:
params['readonly'] = readonly
return client.call('bdev_aio_create', params)
def bdev_aio_rescan(client, name):
"""Rescan a Linux AIO block device.
Args:
bdev_name: name of aio bdev to delete
"""
params = {'name': name}
return client.call('bdev_aio_rescan', params)
def bdev_aio_delete(client, name):
"""Remove aio bdev from the system.
Args:
bdev_name: name of aio bdev to delete
"""
params = {'name': name}
return client.call('bdev_aio_delete', params)
def bdev_uring_create(client, filename, name, block_size=None):
"""Create a bdev with Linux io_uring backend.
Args:
filename: path to device or file (ex: /dev/nvme0n1)
name: name of bdev
block_size: block size of device (optional; autodetected if omitted)
Returns:
Name of created bdev.
"""
params = {'name': name,
'filename': filename}
if block_size:
params['block_size'] = block_size
return client.call('bdev_uring_create', params)
def bdev_uring_delete(client, name):
"""Delete a uring bdev.
Args:
name: name of uring bdev to delete
"""
params = {'name': name}
return client.call('bdev_uring_delete', params)
def bdev_xnvme_create(client, filename, name, io_mechanism):
"""Create a bdev with xNVMe backend.
Args:
filename: path to device or file (ex: /dev/nvme0n1)
name: name of xNVMe bdev to create
io_mechanism: I/O mechanism to use (ex: io_uring, io_uring_cmd, etc.)
Returns:
Name of created bdev.
"""
params = {'name': name,
'filename': filename,
'io_mechanism': io_mechanism}
return client.call('bdev_xnvme_create', params)
def bdev_xnvme_delete(client, name):
"""Delete a xNVMe bdev.
Args:
name: name of xNVMe bdev to delete
"""
params = {'name': name}
return client.call('bdev_xnvme_delete', params)
def bdev_nvme_set_options(client, action_on_timeout=None, timeout_us=None, timeout_admin_us=None,
keep_alive_timeout_ms=None, retry_count=None, arbitration_burst=None,
low_priority_weight=None, medium_priority_weight=None, high_priority_weight=None,
nvme_adminq_poll_period_us=None, nvme_ioq_poll_period_us=None, io_queue_requests=None,
delay_cmd_submit=None, transport_retry_count=None, bdev_retry_count=None,
transport_ack_timeout=None, ctrlr_loss_timeout_sec=None, reconnect_delay_sec=None,
fast_io_fail_timeout_sec=None, disable_auto_failback=None, generate_uuids=None):
"""Set options for the bdev nvme. This is startup command.
Args:
action_on_timeout: action to take on command time out. Valid values are: none, reset, abort (optional)
timeout_us: Timeout for each command, in microseconds. If 0, don't track timeouts (optional)
timeout_admin_us: Timeout for each admin command, in microseconds. If 0, treat same as io timeouts (optional)
keep_alive_timeout_ms: Keep alive timeout period in millisecond, default is 10s (optional)
retry_count: The number of attempts per I/O when an I/O fails (deprecated) (optional)
arbitration_burst: The value is expressed as a power of two (optional)
low_priority_weight: The number of commands that may be executed from the low priority queue at one time (optional)
medium_priority_weight: The number of commands that may be executed from the medium priority queue at one time (optional)
high_priority_weight: The number of commands that may be executed from the high priority queue at one time (optional)
nvme_adminq_poll_period_us: How often the admin queue is polled for asynchronous events in microseconds (optional)
nvme_ioq_poll_period_us: How often to poll I/O queues for completions in microseconds (optional)
io_queue_requests: The number of requests allocated for each NVMe I/O queue. Default: 512 (optional)
delay_cmd_submit: Enable delayed NVMe command submission to allow batching of multiple commands (optional)
transport_retry_count: The number of attempts per I/O in the transport layer when an I/O fails (optional)
bdev_retry_count: The number of attempts per I/O in the bdev layer when an I/O fails. -1 means infinite retries. (optional)
transport_ack_timeout: Time to wait ack until packet retransmission for RDMA or until closes connection for TCP.
Range 0-31 where 0 is driver-specific default value (optional)
ctrlr_loss_timeout_sec: Time to wait until ctrlr is reconnected before deleting ctrlr.
-1 means infinite reconnect retries. 0 means no reconnect retry.
If reconnect_delay_sec is zero, ctrlr_loss_timeout_sec has to be zero.
If reconnect_delay_sec is non-zero, ctrlr_loss_timeout_sec has to be -1 or not less than reconnect_delay_sec.
This can be overridden by bdev_nvme_attach_controller. (optional)
reconnect_delay_sec: Time to delay a reconnect retry.
If ctrlr_loss_timeout_sec is zero, reconnect_delay_sec has to be zero.
If ctrlr_loss_timeout_sec is -1, reconnect_delay_sec has to be non-zero.
If ctrlr_loss_timeout_sec is not -1 or zero, reconnect_sec has to be non-zero and less than ctrlr_loss_timeout_sec.
This can be overridden by bdev_nvme_attach_controller. (optional)
fail_io_fast_timeout_sec: Time to wait until ctrlr is reconnected before failing I/O to ctrlr.
0 means no such timeout.
If fast_io_fail_timeout_sec is not zero, it has to be not less than reconnect_delay_sec and less than
ctrlr_loss_timeout_sec if ctrlr_loss_timeout_sec is not -1.
This can be overridden by bdev_nvme_attach_controller. (optional)
disable_auto_failback: Disable automatic failback. bdev_nvme_set_preferred_path can be used to do manual failback.
By default, immediately failback to the preferred I/O path if it is restored. (optional)
generate_uuids: Enable generation of unique identifiers for NVMe bdevs only if they do not provide UUID themselves.
These strings are based on device serial number and namespace ID and will always be the same for that device.
"""
params = {}
if action_on_timeout:
params['action_on_timeout'] = action_on_timeout
if timeout_us is not None:
params['timeout_us'] = timeout_us
if timeout_admin_us is not None:
params['timeout_admin_us'] = timeout_admin_us
if keep_alive_timeout_ms is not None:
params['keep_alive_timeout_ms'] = keep_alive_timeout_ms
if retry_count is not None:
print("WARNING: retry_count is deprecated, please use transport_retry_count.")
params['retry_count'] = retry_count
if arbitration_burst is not None:
params['arbitration_burst'] = arbitration_burst
if low_priority_weight is not None:
params['low_priority_weight'] = low_priority_weight
if medium_priority_weight is not None:
params['medium_priority_weight'] = medium_priority_weight
if high_priority_weight is not None:
params['high_priority_weight'] = high_priority_weight
if nvme_adminq_poll_period_us:
params['nvme_adminq_poll_period_us'] = nvme_adminq_poll_period_us
if nvme_ioq_poll_period_us is not None:
params['nvme_ioq_poll_period_us'] = nvme_ioq_poll_period_us
if io_queue_requests is not None:
params['io_queue_requests'] = io_queue_requests
if delay_cmd_submit is not None:
params['delay_cmd_submit'] = delay_cmd_submit
if transport_retry_count is not None:
params['transport_retry_count'] = transport_retry_count
if bdev_retry_count is not None:
params['bdev_retry_count'] = bdev_retry_count
if transport_ack_timeout is not None:
params['transport_ack_timeout'] = transport_ack_timeout
if ctrlr_loss_timeout_sec is not None:
params['ctrlr_loss_timeout_sec'] = ctrlr_loss_timeout_sec
if reconnect_delay_sec is not None:
params['reconnect_delay_sec'] = reconnect_delay_sec
if fast_io_fail_timeout_sec is not None:
params['fast_io_fail_timeout_sec'] = fast_io_fail_timeout_sec
if disable_auto_failback is not None:
params['disable_auto_failback'] = disable_auto_failback
if generate_uuids is not None:
params['generate_uuids'] = generate_uuids
return client.call('bdev_nvme_set_options', params)
def bdev_nvme_set_hotplug(client, enable, period_us=None):
"""Set options for the bdev nvme. This is startup command.
Args:
enable: True to enable hotplug, False to disable.
period_us: how often the hotplug is processed for insert and remove events. Set 0 to reset to default. (optional)
"""
params = {'enable': enable}
if period_us:
params['period_us'] = period_us
return client.call('bdev_nvme_set_hotplug', params)
def bdev_nvme_attach_controller(client, name, trtype, traddr, adrfam=None, trsvcid=None,
priority=None, subnqn=None, hostnqn=None, hostaddr=None,
hostsvcid=None, prchk_reftag=None, prchk_guard=None,
bdev/nvme: Retry reconnecting ctrlr after seconds if reset failed Previously reconnect retry was not controlled and was repeated indefinitely. This patch adds two options, ctrlr_loss_timeout_sec and reconnect_delay_sec, to nvme_ctrlr and add reset_start_tsc, reconnect_is_delayed, and reconnect_delay_timer to nvme_ctrlr to control reconnect retry. Both of ctrlr_loss_timeout_sec and reconnect_delay_sec are initialized to zero. This means reconnect is not throttled as we did before this patch. A few more changes are added. Change nvme_io_path_is_failed() to return false if reset is throttled even if nvme_ctrlr is reseting or is to be reconnected. spdk_nvme_ctrlr_reconnect_poll_async() may continue returning -EAGAIN infinitely. To check out such exceptional case, use ctrlr_loss_timeout_sec. Not only ctrlr reset but also non-multipath ctrlr failover is controlled. So we need to include path failover into ctrlr reconnect. When the active path is removed and switched to one of the alternative paths, if ctrlr reconnect is scheduled, connecting to the alternative path is left to the scheduled reconnect. If reset or reconnect ctrlr is failed and the retry is scheduled, switch the active path to one of alternative paths. Restore unit test cases removed in the previous patches. Change-Id: Idec636c4eced39eb47ff4ef6fde72d6fd9fe4f85 Signed-off-by: Shuhei Matsumoto <smatsumoto@nvidia.com> Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/10128 Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com> Community-CI: Mellanox Build Bot Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Monica Kenguva <monica.kenguva@intel.com>
2022-01-13 07:03:36 +00:00
hdgst=None, ddgst=None, fabrics_timeout=None, multipath=None, num_io_queues=None,
ctrlr_loss_timeout_sec=None, reconnect_delay_sec=None,
fast_io_fail_timeout_sec=None, psk=None):
"""Construct block device for each NVMe namespace in the attached controller.
Args:
name: bdev name prefix; "n" + namespace ID will be appended to create unique names
trtype: transport type ("PCIe", "RDMA", "FC", "TCP")
traddr: transport address (PCI BDF or IP address)
adrfam: address family ("IPv4", "IPv6", "IB", or "FC")
trsvcid: transport service ID (port number for IP-based addresses)
priority: transport connection priority (Sock priority for TCP-based transports; optional)
subnqn: subsystem NQN to connect to (optional)
hostnqn: NQN to connect from (optional)
hostaddr: host transport address (IP address for IP-based transports, NULL for PCIe or FC; optional)
hostsvcid: host transport service ID (port number for IP-based transports, NULL for PCIe or FC; optional)
prchk_reftag: Enable checking of PI reference tag for I/O processing (optional)
prchk_guard: Enable checking of PI guard for I/O processing (optional)
hdgst: Enable TCP header digest (optional)
ddgst: Enable TCP data digest (optional)
fabrics_timeout: Fabrics connect timeout in us (optional)
multipath: The behavior when multiple paths are created ("disable", "failover", or "multipath"; failover if not specified)
num_io_queues: The number of IO queues to request during initialization. (optional)
bdev/nvme: Retry reconnecting ctrlr after seconds if reset failed Previously reconnect retry was not controlled and was repeated indefinitely. This patch adds two options, ctrlr_loss_timeout_sec and reconnect_delay_sec, to nvme_ctrlr and add reset_start_tsc, reconnect_is_delayed, and reconnect_delay_timer to nvme_ctrlr to control reconnect retry. Both of ctrlr_loss_timeout_sec and reconnect_delay_sec are initialized to zero. This means reconnect is not throttled as we did before this patch. A few more changes are added. Change nvme_io_path_is_failed() to return false if reset is throttled even if nvme_ctrlr is reseting or is to be reconnected. spdk_nvme_ctrlr_reconnect_poll_async() may continue returning -EAGAIN infinitely. To check out such exceptional case, use ctrlr_loss_timeout_sec. Not only ctrlr reset but also non-multipath ctrlr failover is controlled. So we need to include path failover into ctrlr reconnect. When the active path is removed and switched to one of the alternative paths, if ctrlr reconnect is scheduled, connecting to the alternative path is left to the scheduled reconnect. If reset or reconnect ctrlr is failed and the retry is scheduled, switch the active path to one of alternative paths. Restore unit test cases removed in the previous patches. Change-Id: Idec636c4eced39eb47ff4ef6fde72d6fd9fe4f85 Signed-off-by: Shuhei Matsumoto <smatsumoto@nvidia.com> Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/10128 Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com> Community-CI: Mellanox Build Bot Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Monica Kenguva <monica.kenguva@intel.com>
2022-01-13 07:03:36 +00:00
ctrlr_loss_timeout_sec: Time to wait until ctrlr is reconnected before deleting ctrlr.
-1 means infinite reconnect retries. 0 means no reconnect retry.
If reconnect_delay_sec is zero, ctrlr_loss_timeout_sec has to be zero.
If reconnect_delay_sec is non-zero, ctrlr_loss_timeout_sec has to be -1 or not less than reconnect_delay_sec.
(optional)
reconnect_delay_sec: Time to delay a reconnect retry.
If ctrlr_loss_timeout_sec is zero, reconnect_delay_sec has to be zero.
If ctrlr_loss_timeout_sec is -1, reconnect_delay_sec has to be non-zero.
If ctrlr_loss_timeout_sec is not -1 or zero, reconnect_sec has to be non-zero and less than ctrlr_loss_timeout_sec.
(optional)
fail_io_fast_timeout_sec: Time to wait until ctrlr is reconnected before failing I/O to ctrlr.
0 means no such timeout.
If fast_io_fail_timeout_sec is not zero, it has to be not less than reconnect_delay_sec and less than
ctrlr_loss_timeout_sec if ctrlr_loss_timeout_sec is not -1. (optional)
psk: Set PSK and enable TCP SSL socket implementation (optional)
Returns:
Names of created block devices.
"""
params = {'name': name,
'trtype': trtype,
'traddr': traddr}
if hostnqn:
params['hostnqn'] = hostnqn
if hostaddr:
params['hostaddr'] = hostaddr
if hostsvcid:
params['hostsvcid'] = hostsvcid
if adrfam:
params['adrfam'] = adrfam
if trsvcid:
params['trsvcid'] = trsvcid
if priority:
params['priority'] = priority
if subnqn:
params['subnqn'] = subnqn
if prchk_reftag:
params['prchk_reftag'] = prchk_reftag
if prchk_guard:
params['prchk_guard'] = prchk_guard
if hdgst:
params['hdgst'] = hdgst
if ddgst:
params['ddgst'] = ddgst
if fabrics_timeout:
params['fabrics_connect_timeout_us'] = fabrics_timeout
if multipath:
params['multipath'] = multipath
if num_io_queues:
params['num_io_queues'] = num_io_queues
bdev/nvme: Retry reconnecting ctrlr after seconds if reset failed Previously reconnect retry was not controlled and was repeated indefinitely. This patch adds two options, ctrlr_loss_timeout_sec and reconnect_delay_sec, to nvme_ctrlr and add reset_start_tsc, reconnect_is_delayed, and reconnect_delay_timer to nvme_ctrlr to control reconnect retry. Both of ctrlr_loss_timeout_sec and reconnect_delay_sec are initialized to zero. This means reconnect is not throttled as we did before this patch. A few more changes are added. Change nvme_io_path_is_failed() to return false if reset is throttled even if nvme_ctrlr is reseting or is to be reconnected. spdk_nvme_ctrlr_reconnect_poll_async() may continue returning -EAGAIN infinitely. To check out such exceptional case, use ctrlr_loss_timeout_sec. Not only ctrlr reset but also non-multipath ctrlr failover is controlled. So we need to include path failover into ctrlr reconnect. When the active path is removed and switched to one of the alternative paths, if ctrlr reconnect is scheduled, connecting to the alternative path is left to the scheduled reconnect. If reset or reconnect ctrlr is failed and the retry is scheduled, switch the active path to one of alternative paths. Restore unit test cases removed in the previous patches. Change-Id: Idec636c4eced39eb47ff4ef6fde72d6fd9fe4f85 Signed-off-by: Shuhei Matsumoto <smatsumoto@nvidia.com> Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/10128 Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com> Community-CI: Mellanox Build Bot Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Monica Kenguva <monica.kenguva@intel.com>
2022-01-13 07:03:36 +00:00
if ctrlr_loss_timeout_sec is not None:
params['ctrlr_loss_timeout_sec'] = ctrlr_loss_timeout_sec
if reconnect_delay_sec is not None:
params['reconnect_delay_sec'] = reconnect_delay_sec
if fast_io_fail_timeout_sec is not None:
params['fast_io_fail_timeout_sec'] = fast_io_fail_timeout_sec
if psk:
params['psk'] = psk
return client.call('bdev_nvme_attach_controller', params)
def bdev_nvme_detach_controller(client, name, trtype=None, traddr=None,
adrfam=None, trsvcid=None, subnqn=None,
hostaddr=None, hostsvcid=None):
"""Detach NVMe controller and delete any associated bdevs. Optionally,
If all of the transport ID options are specified, only remove that
transport path from the specified controller. If that is the only
available path for the controller, this will also result in the
controller being detached and the associated bdevs being deleted.
Args:
name: controller name
trtype: transport type ("PCIe", "RDMA")
traddr: transport address (PCI BDF or IP address)
adrfam: address family ("IPv4", "IPv6", "IB", or "FC")
trsvcid: transport service ID (port number for IP-based addresses)
subnqn: subsystem NQN to connect to (optional)
hostaddr: Host address (IP address)
hostsvcid: transport service ID on host side (port number)
"""
params = {'name': name}
if trtype:
params['trtype'] = trtype
if traddr:
params['traddr'] = traddr
if adrfam:
params['adrfam'] = adrfam
if trsvcid:
params['trsvcid'] = trsvcid
if subnqn:
params['subnqn'] = subnqn
if hostaddr:
params['hostaddr'] = hostaddr
if hostsvcid:
params['hostsvcid'] = hostsvcid
return client.call('bdev_nvme_detach_controller', params)
def bdev_nvme_reset_controller(client, name):
"""Reset NVMe controller.
Args:
name: controller name
"""
params = {'name': name}
return client.call('bdev_nvme_reset_controller', params)
def bdev_nvme_start_discovery(client, name, trtype, traddr, adrfam=None, trsvcid=None,
hostnqn=None, wait_for_attach=None, ctrlr_loss_timeout_sec=None,
reconnect_delay_sec=None, fast_io_fail_timeout_sec=None,
attach_timeout_ms=None):
"""Start discovery with the specified discovery subsystem
Args:
name: bdev name prefix; "n" + namespace ID will be appended to create unique names
trtype: transport type ("PCIe", "RDMA", "FC", "TCP")
traddr: transport address (PCI BDF or IP address)
adrfam: address family ("IPv4", "IPv6", "IB", or "FC")
trsvcid: transport service ID (port number for IP-based addresses)
hostnqn: NQN to connect from (optional)
wait_for_attach: Wait to complete RPC until all discovered NVM subsystems have attached (optional)
ctrlr_loss_timeout_sec: Time to wait until ctrlr is reconnected before deleting ctrlr.
-1 means infinite reconnect retries. 0 means no reconnect retry.
If reconnect_delay_sec is zero, ctrlr_loss_timeout_sec has to be zero.
If reconnect_delay_sec is non-zero, ctrlr_loss_timeout_sec has to be -1 or not less than reconnect_delay_sec.
(optional)
reconnect_delay_sec: Time to delay a reconnect retry.
If ctrlr_loss_timeout_sec is zero, reconnect_delay_sec has to be zero.
If ctrlr_loss_timeout_sec is -1, reconnect_delay_sec has to be non-zero.
If ctrlr_loss_timeout_sec is not -1 or zero, reconnect_sec has to be non-zero and less than ctrlr_loss_timeout_sec.
(optional)
fail_io_fast_timeout_sec: Time to wait until ctrlr is reconnected before failing I/O to ctrlr.
0 means no such timeout.
If fast_io_fail_timeout_sec is not zero, it has to be not less than reconnect_delay_sec and less than
ctrlr_loss_timeout_sec if ctrlr_loss_timeout_sec is not -1. (optional)
attach_timeout_ms: Time to wait until the discovery and all discovered NVM subsystems are attached (optional)
"""
params = {'name': name,
'trtype': trtype,
'traddr': traddr}
if hostnqn:
params['hostnqn'] = hostnqn
if adrfam:
params['adrfam'] = adrfam
if trsvcid:
params['trsvcid'] = trsvcid
if wait_for_attach:
params['wait_for_attach'] = True
if attach_timeout_ms is not None:
params['attach_timeout_ms'] = attach_timeout_ms
if ctrlr_loss_timeout_sec is not None:
params['ctrlr_loss_timeout_sec'] = ctrlr_loss_timeout_sec
if reconnect_delay_sec is not None:
params['reconnect_delay_sec'] = reconnect_delay_sec
if fast_io_fail_timeout_sec is not None:
params['fast_io_fail_timeout_sec'] = fast_io_fail_timeout_sec
return client.call('bdev_nvme_start_discovery', params)
def bdev_nvme_stop_discovery(client, name):
"""Stop a previously started discovery service
Args:
name: name of discovery service to start
"""
params = {'name': name}
return client.call('bdev_nvme_stop_discovery', params)
def bdev_nvme_get_discovery_info(client):
"""Get information about the automatic discovery
"""
return client.call('bdev_nvme_get_discovery_info')
def bdev_nvme_get_io_paths(client, name):
"""Display all or the specified NVMe bdev's active I/O paths
Args:
name: Name of the NVMe bdev (optional)
Returns:
List of active I/O paths
"""
params = {}
if name:
params['name'] = name
return client.call('bdev_nvme_get_io_paths', params)
def bdev_nvme_set_preferred_path(client, name, cntlid):
"""Set the preferred I/O path for an NVMe bdev when in multipath mode
Args:
name: NVMe bdev name
cntlid: NVMe-oF controller ID
"""
params = {'name': name,
'cntlid': cntlid}
return client.call('bdev_nvme_set_preferred_path', params)
def bdev_nvme_set_multipath_policy(client, name, policy):
"""Set multipath policy of the NVMe bdev
Args:
name: NVMe bdev name
policy: Multipath policy (active_passive or active_active)
"""
params = {'name': name,
'policy': policy}
return client.call('bdev_nvme_set_multipath_policy', params)
def bdev_nvme_cuse_register(client, name):
"""Register CUSE devices on NVMe controller.
Args:
name: Name of the operating NVMe controller
"""
params = {'name': name}
return client.call('bdev_nvme_cuse_register', params)
def bdev_nvme_cuse_unregister(client, name):
"""Unregister CUSE devices on NVMe controller.
Args:
name: Name of the operating NVMe controller
"""
params = {'name': name}
return client.call('bdev_nvme_cuse_unregister', params)
def bdev_zone_block_create(client, name, base_bdev, zone_capacity, optimal_open_zones):
"""Creates a virtual zone device on top of existing non-zoned bdev.
Args:
name: Zone device name
base_bdev: Base Nvme bdev name
zone_capacity: Surfaced zone capacity in blocks
optimal_open_zones: Number of zones required to reach optimal write speed (optional, default: 1)
Returns:
Name of created block device.
"""
params = {'name': name,
'base_bdev': base_bdev,
'zone_capacity': zone_capacity,
'optimal_open_zones': optimal_open_zones}
return client.call('bdev_zone_block_create', params)
def bdev_zone_block_delete(client, name):
"""Remove block zone bdev from the system.
Args:
name: name of block zone bdev to delete
"""
params = {'name': name}
return client.call('bdev_zone_block_delete', params)
def bdev_rbd_register_cluster(client, name, user=None, config_param=None, config_file=None, key_file=None):
"""Create a Rados Cluster object of the Ceph RBD backend.
Args:
name: name of Rados Cluster
user: Ceph user name (optional)
config_param: map of config keys to values (optional)
config_file: file path of Ceph configuration file (optional)
key_file: file path of Ceph key file (optional)
Returns:
Name of registered Rados Cluster object.
"""
params = {'name': name}
if user is not None:
params['user_id'] = user
if config_param is not None:
params['config_param'] = config_param
if config_file is not None:
params['config_file'] = config_file
if key_file is not None:
params['key_file'] = key_file
return client.call('bdev_rbd_register_cluster', params)
def bdev_rbd_unregister_cluster(client, name):
"""Remove Rados cluster object from the system.
Args:
name: name of Rados cluster object to unregister
"""
params = {'name': name}
return client.call('bdev_rbd_unregister_cluster', params)
def bdev_rbd_get_clusters_info(client, name):
"""Get the cluster(s) info
Args:
name: name of Rados cluster object to query (optional; if omitted, query all clusters)
Returns:
List of registered Rados cluster information objects.
"""
params = {}
if name:
params['name'] = name
return client.call('bdev_rbd_get_clusters_info', params)
def bdev_rbd_create(client, pool_name, rbd_name, block_size, name=None, user=None, config=None, cluster_name=None, uuid=None):
"""Create a Ceph RBD block device.
Args:
pool_name: Ceph RBD pool name
rbd_name: Ceph RBD image name
block_size: block size of RBD volume
name: name of block device (optional)
user: Ceph user name (optional)
config: map of config keys to values (optional)
cluster_name: Name to identify Rados cluster (optional)
uuid: UUID of block device (optional)
Returns:
Name of created block device.
"""
params = {
'pool_name': pool_name,
'rbd_name': rbd_name,
'block_size': block_size,
}
if name:
params['name'] = name
if user is not None:
params['user_id'] = user
if config is not None:
params['config'] = config
if cluster_name is not None:
params['cluster_name'] = cluster_name
else:
print("WARNING:bdev_rbd_create should be used with specifying -c to have a cluster name after bdev_rbd_register_cluster.")
if uuid is not None:
params['uuid'] = uuid
return client.call('bdev_rbd_create', params)
def bdev_rbd_delete(client, name):
"""Remove rbd bdev from the system.
Args:
name: name of rbd bdev to delete
"""
params = {'name': name}
return client.call('bdev_rbd_delete', params)
def bdev_rbd_resize(client, name, new_size):
"""Resize rbd bdev in the system.
Args:
name: name of rbd bdev to resize
new_size: new bdev size of resize operation. The unit is MiB
"""
params = {
'name': name,
'new_size': new_size,
}
return client.call('bdev_rbd_resize', params)
def bdev_error_create(client, base_name):
"""Construct an error injection block device.
Args:
base_name: base bdev name
"""
params = {'base_name': base_name}
return client.call('bdev_error_create', params)
def bdev_delay_create(client, base_bdev_name, name, avg_read_latency, p99_read_latency, avg_write_latency, p99_write_latency):
"""Construct a delay block device.
Args:
base_bdev_name: name of the existing bdev
name: name of block device
avg_read_latency: complete 99% of read ops with this delay
p99_read_latency: complete 1% of read ops with this delay
avg_write_latency: complete 99% of write ops with this delay
p99_write_latency: complete 1% of write ops with this delay
Returns:
Name of created block device.
"""
params = {
'base_bdev_name': base_bdev_name,
'name': name,
'avg_read_latency': avg_read_latency,
'p99_read_latency': p99_read_latency,
'avg_write_latency': avg_write_latency,
'p99_write_latency': p99_write_latency,
}
return client.call('bdev_delay_create', params)
def bdev_delay_delete(client, name):
"""Remove delay bdev from the system.
Args:
name: name of delay bdev to delete
"""
params = {'name': name}
return client.call('bdev_delay_delete', params)
def bdev_delay_update_latency(client, delay_bdev_name, latency_type, latency_us):
"""Update the latency value for a delay block device
Args:
delay_bdev_name: name of the delay bdev
latency_type: 'one of: avg_read, avg_write, p99_read, p99_write. No other values accepted.'
latency_us: 'new latency value.'
Returns:
True if successful, or a specific error otherwise.
"""
params = {
'delay_bdev_name': delay_bdev_name,
'latency_type': latency_type,
'latency_us': latency_us,
}
return client.call('bdev_delay_update_latency', params)
def bdev_error_delete(client, name):
"""Remove error bdev from the system.
Args:
bdev_name: name of error bdev to delete
"""
params = {'name': name}
return client.call('bdev_error_delete', params)
def bdev_iscsi_set_options(client, timeout_sec):
"""Set options for the bdev iscsi.
Args:
timeout_sec: Timeout for command, in seconds, if 0, don't track timeout
"""
params = {}
if timeout_sec is not None:
params['timeout_sec'] = timeout_sec
return client.call('bdev_iscsi_set_options', params)
def bdev_iscsi_create(client, name, url, initiator_iqn):
"""Construct an iSCSI block device.
Args:
name: name of block device
url: iSCSI URL
initiator_iqn: IQN name to be used by initiator
Returns:
Name of created block device.
"""
params = {
'name': name,
'url': url,
'initiator_iqn': initiator_iqn,
}
return client.call('bdev_iscsi_create', params)
def bdev_iscsi_delete(client, name):
"""Remove iSCSI bdev from the system.
Args:
bdev_name: name of iSCSI bdev to delete
"""
params = {'name': name}
return client.call('bdev_iscsi_delete', params)
def bdev_pmem_create(client, pmem_file, name):
"""Construct a libpmemblk block device.
Args:
pmem_file: path to pmemblk pool file
name: name of block device
Returns:
Name of created block device.
"""
params = {
'pmem_file': pmem_file,
'name': name
}
return client.call('bdev_pmem_create', params)
def bdev_pmem_delete(client, name):
"""Remove pmem bdev from the system.
Args:
name: name of pmem bdev to delete
"""
params = {'name': name}
return client.call('bdev_pmem_delete', params)
def bdev_passthru_create(client, base_bdev_name, name):
"""Construct a pass-through block device.
Args:
base_bdev_name: name of the existing bdev
name: name of block device
Returns:
Name of created block device.
"""
params = {
'base_bdev_name': base_bdev_name,
'name': name,
}
return client.call('bdev_passthru_create', params)
def bdev_passthru_delete(client, name):
"""Remove pass through bdev from the system.
Args:
name: name of pass through bdev to delete
"""
params = {'name': name}
return client.call('bdev_passthru_delete', params)
def bdev_opal_create(client, nvme_ctrlr_name, nsid, locking_range_id, range_start, range_length, password):
"""Create opal virtual block devices from a base nvme bdev.
Args:
nvme_ctrlr_name: name of the nvme ctrlr
nsid: namespace ID of nvme ctrlr
locking_range_id: locking range ID corresponding to this virtual bdev
range_start: start address of this locking range
range_length: length of this locking range
password: admin password of base nvme bdev
Returns:
Name of the new created block devices.
"""
params = {
'nvme_ctrlr_name': nvme_ctrlr_name,
'nsid': nsid,
'locking_range_id': locking_range_id,
'range_start': range_start,
'range_length': range_length,
'password': password,
}
return client.call('bdev_opal_create', params)
def bdev_opal_get_info(client, bdev_name, password):
"""Get opal locking range info.
Args:
bdev_name: name of opal vbdev to get info
password: admin password
Returns:
Locking range info.
"""
params = {
'bdev_name': bdev_name,
'password': password,
}
return client.call('bdev_opal_get_info', params)
def bdev_opal_delete(client, bdev_name, password):
"""Delete opal virtual bdev from the system.
Args:
bdev_name: name of opal vbdev to delete
password: admin password of base nvme bdev
"""
params = {
'bdev_name': bdev_name,
'password': password,
}
return client.call('bdev_opal_delete', params)
def bdev_opal_new_user(client, bdev_name, admin_password, user_id, user_password):
"""Add a user to opal bdev who can set lock state for this bdev.
Args:
bdev_name: name of opal vbdev
admin_password: admin password
user_id: ID of the user who will be added to this opal bdev
user_password: password set for this user
"""
params = {
'bdev_name': bdev_name,
'admin_password': admin_password,
'user_id': user_id,
'user_password': user_password,
}
return client.call('bdev_opal_new_user', params)
def bdev_opal_set_lock_state(client, bdev_name, user_id, password, lock_state):
"""set lock state for an opal bdev.
Args:
bdev_name: name of opal vbdev
user_id: ID of the user who will set lock state
password: password of the user
lock_state: lock state to set
"""
params = {
'bdev_name': bdev_name,
'user_id': user_id,
'password': password,
'lock_state': lock_state,
}
return client.call('bdev_opal_set_lock_state', params)
def bdev_split_create(client, base_bdev, split_count, split_size_mb=None):
"""Create split block devices from a base bdev.
Args:
base_bdev: name of bdev to split
split_count: number of split bdevs to create
split_size_mb: size of each split volume in MiB (optional)
Returns:
List of created block devices.
"""
params = {
'base_bdev': base_bdev,
'split_count': split_count,
}
if split_size_mb:
params['split_size_mb'] = split_size_mb
return client.call('bdev_split_create', params)
def bdev_split_delete(client, base_bdev):
"""Delete split block devices.
Args:
base_bdev: name of previously split bdev
"""
params = {
'base_bdev': base_bdev,
}
return client.call('bdev_split_delete', params)
def bdev_ftl_create(client, name, base_bdev, **kwargs):
"""Construct FTL bdev
Args:
name: name of the bdev
base_bdev: name of the base bdev
kwargs: optional parameters
"""
params = {'name': name,
'base_bdev': base_bdev}
for key, value in kwargs.items():
if value is not None:
params[key] = value
return client.call('bdev_ftl_create', params)
def bdev_ftl_load(client, name, base_bdev, **kwargs):
"""Load FTL bdev
Args:
name: name of the bdev
base_bdev: name of the base bdev
kwargs: optional parameters
"""
params = {'name': name,
'base_bdev': base_bdev}
for key, value in kwargs.items():
if value is not None:
params[key] = value
return client.call('bdev_ftl_load', params)
def bdev_ftl_unload(client, name, fast_shutdown):
"""Unload FTL bdev
Args:
name: name of the bdev
"""
params = {'name': name}
return client.call('bdev_ftl_unload', params)
def bdev_ftl_delete(client, name, fast_shutdown):
"""Delete FTL bdev
Args:
name: name of the bdev
"""
params = {'name': name,
'fast_shutdown': fast_shutdown}
return client.call('bdev_ftl_delete', params)
def bdev_ftl_unmap(client, name, lba, num_blocks):
"""FTL unmap
Args:
name: name of the bdev
lba: starting lba to be unmapped
num_blocks: number of blocks to unmap
"""
params = {'name': name,
'lba': lba,
'num_blocks': num_blocks}
return client.call('bdev_ftl_unmap', params)
def bdev_ftl_get_stats(client, name):
"""get FTL stats
Args:
name: name of the bdev
"""
params = {'name': name}
return client.call('bdev_ftl_get_stats', params)
def bdev_get_bdevs(client, name=None, timeout=None):
"""Get information about block devices.
Args:
name: bdev name to query (optional; if omitted, query all bdevs)
timeout: time in ms to wait for the bdev with specified name to appear
Returns:
List of bdev information objects.
"""
params = {}
if name:
params['name'] = name
if timeout:
params['timeout'] = timeout
return client.call('bdev_get_bdevs', params)
RPC/Bdev: display the per channel IO statistics for required Bdev Add a new parameter "-c" to display the per channel IO statistics for required Bdev ./scripts/rpc.py bdev_get_iostat -b Malloc0 -h usage: rpc.py [options] bdev_get_iostat [-h] [-b NAME] [-c] optional arguments: -h, --help show this help message and exit -b NAME, --name NAME Name of the Blockdev. Example: Nvme0n1 -c, --per-channel Display per channel IO stats for specified device This could give more intuitive information on each channel's processing of the IOs with the associated thread on the same Bdev. Please also be aware that the IO statistics are collected from SPDK thread's related channel's information. So that it is more relating to the SPDK thread. And in the dynamic scheduling case, different SPDK thread could be running on the same Core. In this case, any seperate channel's IO statistics are returned to the RPC call and if needed, further parse of the data is needed to get the per Core information although usually there is one thread per Core. On the other hand, user could run the framework_get_reactors RPC method to get the relationship of the thread and CPU Cores so as to get the precise information of IO runnings on each thread and each Core for the same Bdev. Change-Id: I39d6a2c9faa868e3c1d7fd0fb6e7c020df982585 Signed-off-by: GangCao <gang.cao@intel.com> Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/13011 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Community-CI: Mellanox Build Bot Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
2022-06-10 08:50:35 +00:00
def bdev_get_iostat(client, name=None, per_channel=None):
"""Get I/O statistics for block devices.
Args:
name: bdev name to query (optional; if omitted, query all bdevs)
RPC/Bdev: display the per channel IO statistics for required Bdev Add a new parameter "-c" to display the per channel IO statistics for required Bdev ./scripts/rpc.py bdev_get_iostat -b Malloc0 -h usage: rpc.py [options] bdev_get_iostat [-h] [-b NAME] [-c] optional arguments: -h, --help show this help message and exit -b NAME, --name NAME Name of the Blockdev. Example: Nvme0n1 -c, --per-channel Display per channel IO stats for specified device This could give more intuitive information on each channel's processing of the IOs with the associated thread on the same Bdev. Please also be aware that the IO statistics are collected from SPDK thread's related channel's information. So that it is more relating to the SPDK thread. And in the dynamic scheduling case, different SPDK thread could be running on the same Core. In this case, any seperate channel's IO statistics are returned to the RPC call and if needed, further parse of the data is needed to get the per Core information although usually there is one thread per Core. On the other hand, user could run the framework_get_reactors RPC method to get the relationship of the thread and CPU Cores so as to get the precise information of IO runnings on each thread and each Core for the same Bdev. Change-Id: I39d6a2c9faa868e3c1d7fd0fb6e7c020df982585 Signed-off-by: GangCao <gang.cao@intel.com> Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/13011 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Community-CI: Mellanox Build Bot Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
2022-06-10 08:50:35 +00:00
per_channel: display per channel IO stats for specified bdev
Returns:
I/O statistics for the requested block devices.
"""
params = {}
if name:
params['name'] = name
RPC/Bdev: display the per channel IO statistics for required Bdev Add a new parameter "-c" to display the per channel IO statistics for required Bdev ./scripts/rpc.py bdev_get_iostat -b Malloc0 -h usage: rpc.py [options] bdev_get_iostat [-h] [-b NAME] [-c] optional arguments: -h, --help show this help message and exit -b NAME, --name NAME Name of the Blockdev. Example: Nvme0n1 -c, --per-channel Display per channel IO stats for specified device This could give more intuitive information on each channel's processing of the IOs with the associated thread on the same Bdev. Please also be aware that the IO statistics are collected from SPDK thread's related channel's information. So that it is more relating to the SPDK thread. And in the dynamic scheduling case, different SPDK thread could be running on the same Core. In this case, any seperate channel's IO statistics are returned to the RPC call and if needed, further parse of the data is needed to get the per Core information although usually there is one thread per Core. On the other hand, user could run the framework_get_reactors RPC method to get the relationship of the thread and CPU Cores so as to get the precise information of IO runnings on each thread and each Core for the same Bdev. Change-Id: I39d6a2c9faa868e3c1d7fd0fb6e7c020df982585 Signed-off-by: GangCao <gang.cao@intel.com> Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/13011 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Community-CI: Mellanox Build Bot Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
2022-06-10 08:50:35 +00:00
if per_channel:
params['per_channel'] = per_channel
return client.call('bdev_get_iostat', params)
def bdev_reset_iostat(client, name=None):
"""Reset I/O statistics for block devices.
Args:
name: bdev name to reset (optional; if omitted, reset all bdevs)
"""
params = {}
if name:
params['name'] = name
return client.call('bdev_reset_iostat', params)
def bdev_enable_histogram(client, name, enable):
"""Control whether histogram is enabled for specified bdev.
Args:
bdev_name: name of bdev
"""
params = {'name': name, "enable": enable}
return client.call('bdev_enable_histogram', params)
def bdev_get_histogram(client, name):
"""Get histogram for specified bdev.
Args:
bdev_name: name of bdev
"""
params = {'name': name}
return client.call('bdev_get_histogram', params)
def bdev_error_inject_error(client, name, io_type, error_type, num,
corrupt_offset, corrupt_value):
"""Inject an error via an error bdev.
Args:
name: name of error bdev
io_type: one of "clear", "read", "write", "unmap", "flush", or "all"
error_type: one of "failure", "pending", or "corrupt_data"
num: number of commands to fail
corrupt_offset: offset in bytes to xor with corrupt_value
corrupt_value: value for xor (1-255, 0 is invalid)
"""
params = {
'name': name,
'io_type': io_type,
'error_type': error_type,
}
if num:
params['num'] = num
if corrupt_offset:
params['corrupt_offset'] = corrupt_offset
if corrupt_value:
params['corrupt_value'] = corrupt_value
return client.call('bdev_error_inject_error', params)
def bdev_set_qd_sampling_period(client, name, period):
"""Enable queue depth tracking on a specified bdev.
Args:
name: name of a bdev on which to track queue depth.
period: period (in microseconds) at which to update the queue depth reading. If set to 0, polling will be disabled.
"""
params = {}
params['name'] = name
params['period'] = period
return client.call('bdev_set_qd_sampling_period', params)
def bdev_set_qos_limit(
client,
name,
rw_ios_per_sec=None,
rw_mbytes_per_sec=None,
r_mbytes_per_sec=None,
w_mbytes_per_sec=None):
"""Set QoS rate limit on a block device.
Args:
name: name of block device
rw_ios_per_sec: R/W IOs per second limit (>=1000, example: 20000). 0 means unlimited.
rw_mbytes_per_sec: R/W megabytes per second limit (>=10, example: 100). 0 means unlimited.
r_mbytes_per_sec: Read megabytes per second limit (>=10, example: 100). 0 means unlimited.
w_mbytes_per_sec: Write megabytes per second limit (>=10, example: 100). 0 means unlimited.
"""
params = {}
params['name'] = name
if rw_ios_per_sec is not None:
params['rw_ios_per_sec'] = rw_ios_per_sec
if rw_mbytes_per_sec is not None:
params['rw_mbytes_per_sec'] = rw_mbytes_per_sec
if r_mbytes_per_sec is not None:
params['r_mbytes_per_sec'] = r_mbytes_per_sec
if w_mbytes_per_sec is not None:
params['w_mbytes_per_sec'] = w_mbytes_per_sec
return client.call('bdev_set_qos_limit', params)
def bdev_nvme_apply_firmware(client, bdev_name, filename):
"""Download and commit firmware to NVMe device.
Args:
bdev_name: name of NVMe block device
filename: filename of the firmware to download
"""
params = {
'filename': filename,
'bdev_name': bdev_name,
}
return client.call('bdev_nvme_apply_firmware', params)
def bdev_nvme_get_transport_statistics(client):
"""Get bdev_nvme poll group transport statistics"""
return client.call('bdev_nvme_get_transport_statistics')
def bdev_nvme_get_controller_health_info(client, name):
"""Display health log of the required NVMe bdev controller.
Args:
name: name of the required NVMe bdev controller
Returns:
Health log for the requested NVMe bdev controller.
"""
params = {}
params['name'] = name
return client.call('bdev_nvme_get_controller_health_info', params)
bdev/daos: introduction of daos bdev This commmit introduces a new bdev type backed up by DAOS DFS. Design wise this bdev is a file named as the bdev itself in the DAOS POSIX container that uses daos event queue per io channel. Having an event queue per io channel is showing the best IO throughput. The implementation uses the independent pool and container connections per device's channel for the best IO throughput. The semantic of usage is the same as any other bdev type. To build SPDK with daos support, daos-devel package has to be installed. The current supported DAOS version is v2.X, please see the installatoin and setup guide here: https://docs.daos.io/v2.0/ $ ./configure --with-daos To run it, the target machine should have daos_agent up and running, as well as the pool and POSIX container ready to use, please see the detailed requirements here: https://docs.daos.io/v2.0/admin/hardware/. To export bdev over tcp: $ ./nvmf_tgt & $ ./scripts/rpc.py nvmf_create_transport -t TCP -u 2097152 -i 2097152 $ ./scripts/rpc.py bdev_daos_create daosdev0 <pool-label> <cont-label> 1048576 4096 $ ./scripts/rpc.py nvmf_create_subsystem nqn.2016-06.io.spdk1:cnode1 -a -s SPDK00000000000001 -d SPDK_Virtual_Controller_1 $ ./scripts/rpc.py nvmf_subsystem_add_ns nqn.2016-06.io.spdk1:cnode1 daosdev0 $ ./scripts/rpc.py nvmf_subsystem_add_listener nqn.2016-06.io.spdk1:cnode1 -t tcp -a <IP> -s 4420 On the initiator side, make sure that `nvme-tcp` module is loaded then connect drives, for instance: $ nvme connect-all -t tcp -a 172.31.91.61 -s 4420 $ nvme list Signed-off-by: Denis Barakhtanov <denis.barahtanov@croit.io> Change-Id: I51945465122e0fb96de4326db742169419966806 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/12260 Community-CI: Mellanox Build Bot Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com>
2022-03-08 23:55:01 +00:00
def bdev_daos_create(client, num_blocks, block_size, pool, cont, name, oclass=None, uuid=None):
bdev/daos: introduction of daos bdev This commmit introduces a new bdev type backed up by DAOS DFS. Design wise this bdev is a file named as the bdev itself in the DAOS POSIX container that uses daos event queue per io channel. Having an event queue per io channel is showing the best IO throughput. The implementation uses the independent pool and container connections per device's channel for the best IO throughput. The semantic of usage is the same as any other bdev type. To build SPDK with daos support, daos-devel package has to be installed. The current supported DAOS version is v2.X, please see the installatoin and setup guide here: https://docs.daos.io/v2.0/ $ ./configure --with-daos To run it, the target machine should have daos_agent up and running, as well as the pool and POSIX container ready to use, please see the detailed requirements here: https://docs.daos.io/v2.0/admin/hardware/. To export bdev over tcp: $ ./nvmf_tgt & $ ./scripts/rpc.py nvmf_create_transport -t TCP -u 2097152 -i 2097152 $ ./scripts/rpc.py bdev_daos_create daosdev0 <pool-label> <cont-label> 1048576 4096 $ ./scripts/rpc.py nvmf_create_subsystem nqn.2016-06.io.spdk1:cnode1 -a -s SPDK00000000000001 -d SPDK_Virtual_Controller_1 $ ./scripts/rpc.py nvmf_subsystem_add_ns nqn.2016-06.io.spdk1:cnode1 daosdev0 $ ./scripts/rpc.py nvmf_subsystem_add_listener nqn.2016-06.io.spdk1:cnode1 -t tcp -a <IP> -s 4420 On the initiator side, make sure that `nvme-tcp` module is loaded then connect drives, for instance: $ nvme connect-all -t tcp -a 172.31.91.61 -s 4420 $ nvme list Signed-off-by: Denis Barakhtanov <denis.barahtanov@croit.io> Change-Id: I51945465122e0fb96de4326db742169419966806 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/12260 Community-CI: Mellanox Build Bot Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com>
2022-03-08 23:55:01 +00:00
"""Construct DAOS block device.
Args:
num_blocks: size of block device in blocks
block_size: block size of device; must be a power of 2 and at least 512
name: name of block device (also the name of the backend file on DAOS DFS)
pool: UUID of DAOS pool
cont: UUID of DAOS container
uuid: UUID of block device (optional)
oclass: DAOS object class (optional)
bdev/daos: introduction of daos bdev This commmit introduces a new bdev type backed up by DAOS DFS. Design wise this bdev is a file named as the bdev itself in the DAOS POSIX container that uses daos event queue per io channel. Having an event queue per io channel is showing the best IO throughput. The implementation uses the independent pool and container connections per device's channel for the best IO throughput. The semantic of usage is the same as any other bdev type. To build SPDK with daos support, daos-devel package has to be installed. The current supported DAOS version is v2.X, please see the installatoin and setup guide here: https://docs.daos.io/v2.0/ $ ./configure --with-daos To run it, the target machine should have daos_agent up and running, as well as the pool and POSIX container ready to use, please see the detailed requirements here: https://docs.daos.io/v2.0/admin/hardware/. To export bdev over tcp: $ ./nvmf_tgt & $ ./scripts/rpc.py nvmf_create_transport -t TCP -u 2097152 -i 2097152 $ ./scripts/rpc.py bdev_daos_create daosdev0 <pool-label> <cont-label> 1048576 4096 $ ./scripts/rpc.py nvmf_create_subsystem nqn.2016-06.io.spdk1:cnode1 -a -s SPDK00000000000001 -d SPDK_Virtual_Controller_1 $ ./scripts/rpc.py nvmf_subsystem_add_ns nqn.2016-06.io.spdk1:cnode1 daosdev0 $ ./scripts/rpc.py nvmf_subsystem_add_listener nqn.2016-06.io.spdk1:cnode1 -t tcp -a <IP> -s 4420 On the initiator side, make sure that `nvme-tcp` module is loaded then connect drives, for instance: $ nvme connect-all -t tcp -a 172.31.91.61 -s 4420 $ nvme list Signed-off-by: Denis Barakhtanov <denis.barahtanov@croit.io> Change-Id: I51945465122e0fb96de4326db742169419966806 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/12260 Community-CI: Mellanox Build Bot Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com>
2022-03-08 23:55:01 +00:00
Returns:
Name of created block device.
"""
params = {'num_blocks': num_blocks, 'block_size': block_size, 'pool': pool, 'cont': cont, 'name': name}
if uuid:
params['uuid'] = uuid
if oclass:
params['oclass'] = oclass
bdev/daos: introduction of daos bdev This commmit introduces a new bdev type backed up by DAOS DFS. Design wise this bdev is a file named as the bdev itself in the DAOS POSIX container that uses daos event queue per io channel. Having an event queue per io channel is showing the best IO throughput. The implementation uses the independent pool and container connections per device's channel for the best IO throughput. The semantic of usage is the same as any other bdev type. To build SPDK with daos support, daos-devel package has to be installed. The current supported DAOS version is v2.X, please see the installatoin and setup guide here: https://docs.daos.io/v2.0/ $ ./configure --with-daos To run it, the target machine should have daos_agent up and running, as well as the pool and POSIX container ready to use, please see the detailed requirements here: https://docs.daos.io/v2.0/admin/hardware/. To export bdev over tcp: $ ./nvmf_tgt & $ ./scripts/rpc.py nvmf_create_transport -t TCP -u 2097152 -i 2097152 $ ./scripts/rpc.py bdev_daos_create daosdev0 <pool-label> <cont-label> 1048576 4096 $ ./scripts/rpc.py nvmf_create_subsystem nqn.2016-06.io.spdk1:cnode1 -a -s SPDK00000000000001 -d SPDK_Virtual_Controller_1 $ ./scripts/rpc.py nvmf_subsystem_add_ns nqn.2016-06.io.spdk1:cnode1 daosdev0 $ ./scripts/rpc.py nvmf_subsystem_add_listener nqn.2016-06.io.spdk1:cnode1 -t tcp -a <IP> -s 4420 On the initiator side, make sure that `nvme-tcp` module is loaded then connect drives, for instance: $ nvme connect-all -t tcp -a 172.31.91.61 -s 4420 $ nvme list Signed-off-by: Denis Barakhtanov <denis.barahtanov@croit.io> Change-Id: I51945465122e0fb96de4326db742169419966806 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/12260 Community-CI: Mellanox Build Bot Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com>
2022-03-08 23:55:01 +00:00
return client.call('bdev_daos_create', params)
def bdev_daos_delete(client, name):
"""Delete DAOS block device.
Args:
bdev_name: name of DAOS bdev to delete
"""
params = {'name': name}
return client.call('bdev_daos_delete', params)
def bdev_daos_resize(client, name, new_size):
"""Resize DAOS bdev in the system.
Args:
name: name of DAOS bdev to resize
new_size: new bdev size of resize operation. The unit is MiB
"""
params = {
'name': name,
'new_size': new_size,
}
return client.call('bdev_daos_resize', params)