Spdk/CONFIG

220 lines
4.8 KiB
Plaintext
Raw Normal View History

# SPDX-License-Identifier: BSD-3-Clause
# Copyright (C) 2015 Intel Corporation.
# All rights reserved.
# Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
nvme: Added support for TP-8009, Auto-discovery of Discovery controllers for NVME initiator using mDNS using Avahi Approach: Avahi Daemon needs to be running to provide the mDNS server service. In the SPDK, Avahi-client library based client API is implemented. The client API will connect to the Avahi-daemon and receive events for new discovery and removal of an existing discovery entry. Following sets on new RPCs have been introduced. scripts/rpc.py bdev_nvme_start_mdns_discovery -b cdc_auto -s _nvme-disc._tcp User shall initiate an mDNS based discovery using this RPC. This will start a Avahi-client based poller looking for new discovery events from the Avahi server. On a new discovery of the discovery controller, the existing bdev_nvme_start_discovery API will be invoked with the trid of the discovery controller learnt. This will enable automatic connection of the initiator to the subsystems discovered from the discovery controller. Multiple mdns discovery instances can be run by specifying a unique bdev-prefix and a unique servicename to discover as parameters. scripts/rpc.py bdev_nvme_stop_mdns_discovery -b cdc_auto This will stop the Avahi poller that was started for the specified service.Internally bdev_nvme_stop_discovery API will be invoked for each of the discovery controllers learnt automatically by this instance of mdns discovery service. This will result in termination of connections to all the subsystems learnt by this mdns discovery instance. scripts/rpc.py bdev_nvme_get_mdns_discovery_info This RPC will display the list of mdns discovery instances running and the trid of the controllers discovered by these instances. Test Result: root@ubuntu-pm-18-226:~/param-spdk/spdk/build/bin# ./nvmf_tgt -i 1 -s 2048 -m 0xF root@ubuntu-pm-18-226:~/param-spdk/spdk# scripts/rpc.py bdev_nvme_start_mdns_discovery -b cdc_auto -s _nvme-disc._tcp root@ubuntu-pm-18-226:~/param-spdk/spdk# root@ubuntu-pm-18-226:~/param-spdk/spdk# scripts/rpc.py bdev_nvme_get_mdns_discovery_info [ { "name": "cdc_auto", "svcname": "_nvme-disc._tcp", "referrals": [ { "name": "cdc_auto0", "trid": { "trtype": "TCP", "adrfam": "IPv4", "traddr": "66.1.2.21", "trsvcid": "8009", "subnqn": "nqn.2014-08.org.nvmexpress.discovery" } }, { "name": "cdc_auto1", "trid": { "trtype": "TCP", "adrfam": "IPv4", "traddr": "66.1.1.21", "trsvcid": "8009", "subnqn": "nqn.2014-08.org.nvmexpress.discovery" } } ] } ] root@ubuntu-pm-18-226:~/param-spdk/spdk# root@ubuntu-pm-18-226:~/param-spdk/spdk# scripts/rpc.py bdev_nvme_get_discovery_info [ { "name": "cdc_auto0", "trid": { "trtype": "TCP", "adrfam": "IPv4", "traddr": "66.1.2.21", "trsvcid": "8009", "subnqn": "nqn.2014-08.org.nvmexpress.discovery" }, "referrals": [] }, { "name": "cdc_auto1", "trid": { "trtype": "TCP", "adrfam": "IPv4", "traddr": "66.1.1.21", "trsvcid": "8009", "subnqn": "nqn.2014-08.org.nvmexpress.discovery" }, "referrals": [] } ] root@ubuntu-pm-18-226:~/param-spdk/spdk# scripts/rpc.py bdev_get_bdevs [ { "name": "cdc_auto02n1", "aliases": [ "600110d6-1681-1681-0403-000045805c45" ], "product_name": "NVMe disk", "block_size": 512, "num_blocks": 32768, "uuid": "600110d6-1681-1681-0403-000045805c45", "assigned_rate_limits": { "rw_ios_per_sec": 0, "rw_mbytes_per_sec": 0, "r_mbytes_per_sec": 0, "w_mbytes_per_sec": 0 }, "claimed": false, "zoned": false, "supported_io_types": { "read": true, "write": true, "unmap": true, "write_zeroes": true, "flush": true, "reset": true, "compare": true, "compare_and_write": true, "abort": true, "nvme_admin": true, "nvme_io": true }, "driver_specific": { "nvme": [ { "trid": { "trtype": "TCP", "adrfam": "IPv4", "traddr": "66.1.1.40", "trsvcid": "4420", "subnqn": "nqn.2014-08.com.sanblaze:virtualun.virtualun.3.0" }, "ctrlr_data": { "cntlid": 3, "vendor_id": "0x0000", "model_number": "SANBlaze VLUN P3T0", "serial_number": "00-681681dc681681dc", "firmware_revision": "V10.5", "subnqn": "nqn.2014-08.com.sanblaze:virtualun.virtualun.3.0", "oacs": { "security": 0, "format": 1, "firmware": 0, "ns_manage": 1 }, "multi_ctrlr": true, "ana_reporting": true }, "vs": { "nvme_version": "2.0" }, "ns_data": { "id": 1, "ana_state": "optimized", "can_share": true } } ], "mp_policy": "active_passive" } }, { "name": "cdc_auto00n1", "aliases": [ "600110da-09a6-09a6-0302-00005eeb19b4" ], "product_name": "NVMe disk", "block_size": 512, "num_blocks": 2048, "uuid": "600110da-09a6-09a6-0302-00005eeb19b4", "assigned_rate_limits": { "rw_ios_per_sec": 0, "rw_mbytes_per_sec": 0, "r_mbytes_per_sec": 0, "w_mbytes_per_sec": 0 }, "claimed": false, "zoned": false, "supported_io_types": { "read": true, "write": true, "unmap": true, "write_zeroes": true, "flush": true, "reset": true, "compare": true, "compare_and_write": true, "abort": true, "nvme_admin": true, "nvme_io": true }, "driver_specific": { "nvme": [ { "trid": { "trtype": "TCP", "adrfam": "IPv4", "traddr": "66.1.2.40", "trsvcid": "4420", "subnqn": "nqn.2014-08.com.sanblaze:virtualun.virtualun.2.0" }, "ctrlr_data": { "cntlid": 1, "vendor_id": "0x0000", "model_number": "SANBlaze VLUN P2T0", "serial_number": "00-ab09a6f5ab09a6f5", "firmware_revision": "V10.5", "subnqn": "nqn.2014-08.com.sanblaze:virtualun.virtualun.2.0", "oacs": { "security": 0, "format": 1, "firmware": 0, "ns_manage": 1 }, "multi_ctrlr": true, "ana_reporting": true }, "vs": { "nvme_version": "2.0" }, "ns_data": { "id": 1, "ana_state": "optimized", "can_share": true } } ], "mp_policy": "active_passive" } }, { "name": "cdc_auto01n1", "aliases": [ "600110d6-dce8-dce8-0403-00010b2d3d8c" ], "product_name": "NVMe disk", "block_size": 512, "num_blocks": 32768, "uuid": "600110d6-dce8-dce8-0403-00010b2d3d8c", "assigned_rate_limits": { "rw_ios_per_sec": 0, "rw_mbytes_per_sec": 0, "r_mbytes_per_sec": 0, "w_mbytes_per_sec": 0 }, "claimed": false, "zoned": false, "supported_io_types": { "read": true, "write": true, "unmap": true, "write_zeroes": true, "flush": true, "reset": true, "compare": true, "compare_and_write": true, "abort": true, "nvme_admin": true, "nvme_io": true }, "driver_specific": { "nvme": [ { "trid": { "trtype": "TCP", "adrfam": "IPv4", "traddr": "66.1.1.40", "trsvcid": "4420", "subnqn": "nqn.2014-08.com.sanblaze:virtualun.virtualun.3.1" }, "ctrlr_data": { "cntlid": 3, "vendor_id": "0x0000", "model_number": "SANBlaze VLUN P3T1", "serial_number": "01-6ddce86d6ddce86d", "firmware_revision": "V10.5", "subnqn": "nqn.2014-08.com.sanblaze:virtualun.virtualun.3.1", "oacs": { "security": 0, "format": 1, "firmware": 0, "ns_manage": 1 }, "multi_ctrlr": true, "ana_reporting": true }, "vs": { "nvme_version": "2.0" }, "ns_data": { "id": 1, "ana_state": "optimized", "can_share": true } } ], "mp_policy": "active_passive" } }, { "name": "cdc_auto01n2", "aliases": [ "600110d6-dce8-dce8-0403-00010b2d3d8d" ], "product_name": "NVMe disk", "block_size": 512, "num_blocks": 32768, "uuid": "600110d6-dce8-dce8-0403-00010b2d3d8d", "assigned_rate_limits": { "rw_ios_per_sec": 0, "rw_mbytes_per_sec": 0, "r_mbytes_per_sec": 0, "w_mbytes_per_sec": 0 }, "claimed": false, "zoned": false, "supported_io_types": { "read": true, "write": true, "unmap": true, "write_zeroes": true, "flush": true, "reset": true, "compare": true, "compare_and_write": true, "abort": true, "nvme_admin": true, "nvme_io": true }, "driver_specific": { "nvme": [ { "trid": { "trtype": "TCP", "adrfam": "IPv4", "traddr": "66.1.1.40", "trsvcid": "4420", "subnqn": "nqn.2014-08.com.sanblaze:virtualun.virtualun.3.1" }, "ctrlr_data": { "cntlid": 3, "vendor_id": "0x0000", "model_number": "SANBlaze VLUN P3T1", "serial_number": "01-6ddce86d6ddce86d", "firmware_revision": "V10.5", "subnqn": "nqn.2014-08.com.sanblaze:virtualun.virtualun.3.1", "oacs": { "security": 0, "format": 1, "firmware": 0, "ns_manage": 1 }, "multi_ctrlr": true, "ana_reporting": true }, "vs": { "nvme_version": "2.0" }, "ns_data": { "id": 2, "ana_state": "optimized", "can_share": true } } ], "mp_policy": "active_passive" } } ] root@ubuntu-pm-18-226:~/param-spdk/spdk# root@ubuntu-pm-18-226:~/param-spdk/spdk# scripts/rpc.py bdev_nvme_stop_mdns_discovery -b cdc_auto root@ubuntu-pm-18-226:~/param-spdk/spdk# root@ubuntu-pm-18-226:~/param-spdk/spdk# scripts/rpc.py bdev_nvme_get_mdns_discovery_info [] root@ubuntu-pm-18-226:~/param-spdk/spdk# scripts/rpc.py bdev_nvme_get_discovery_info [] root@ubuntu-pm-18-226:~/param-spdk/spdk# scripts/rpc.py bdev_get_bdevs [] root@ubuntu-pm-18-226:~/param-spdk/spdk# Signed-off-by: Parameswaran Krishnamurthy <parameswaran.krishna@dell.com> Change-Id: Ic2c2e614e2549a655c7f81ae844b80d8505a4f02 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/15703 Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com> Reviewed-by: Shuhei Matsumoto <smatsumoto@nvidia.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Paul Luse <paul.e.luse@intel.com> Reviewed-by: Boris Glimcher <Boris.Glimcher@emc.com> Reviewed-by: <qun.wan@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
2022-11-30 20:11:23 +00:00
# Copyright (c) 2022 Dell Inc, or its subsidiaries.
#
# configure options: __CONFIGURE_OPTIONS__
# Installation prefix
CONFIG_PREFIX="/usr/local"
# Target architecture
CONFIG_ARCH=native
# Destination directory for the libraries
CONFIG_LIBDIR=
# Prefix for cross compilation
CONFIG_CROSS_PREFIX=
# Build with debug logging. Turn off for performance testing and normal usage
CONFIG_DEBUG=n
# Treat warnings as errors (fail the build on any warning).
CONFIG_WERROR=n
# Build with link-time optimization.
CONFIG_LTO=n
# Generate profile guided optimization data.
CONFIG_PGO_CAPTURE=n
# Use profile guided optimization data.
CONFIG_PGO_USE=n
# Build with code coverage instrumentation.
CONFIG_COVERAGE=n
# Build with Address Sanitizer enabled
CONFIG_ASAN=n
# Build with Undefined Behavior Sanitizer enabled
CONFIG_UBSAN=n
test/nvmf: fuzz nvmf target using LLVM's libFuzzer LLVM provides libFuzzer which does coverage-guided fuzzing of a library or application under test. For SPDK, we can use this as a new and better way to generate random commands to the SPDK nvmf target. By default, libFuzzer provides the main() and your source file just provides the function called by LLVM for each iteration of random data. But this doesn't really work for SPDK since we need to start the app framework and the nvmf target. So we specify -fsanitizer=fuzzer-no-link, explicitly specify the location of the fuzzer_no_main library and then call LLVMFuzzerRunDriver to start the fuzzing process once we are ready. Since this is all coverage-guided, we invoke the fuzzer inside the nvmf target application. So this patch creates a new target application called 'llvm_nvme_fuzz'. One core is needed to run the nvmf target, then we spawn a pthread to run the fuzzer against it. Currently there are two fuzzers defined. Fuzzer 0 does random testing of admin commands. Fuzzer 1 is focused solely on GET_LOG_PAGE and fuzzes a smaller subset of the bytes in the spdk_nvme_cmd. Additional fuzzers can be added in the future for other commands, testing I/O queues, data payloads, etc. You do need to specify CC and CXX when running configure, as well as specify the location of the special clang_rt.fuzz_no_main library. The path of that library is dependent on your clang version and architecture. If using clang-12 on x86_64 platform, it will look like: CC=clang-12 CXX=clang++-12 ./configure --with-fuzzer= \ /usr/lib/llvm-12/lib/clang/12.0.0/lib/linux/libclang_rt.fuzzer_no_main-x86_64.a Then just do the following to demonstrate the fuzzer tool. make test/nvmf/target/llvm_nvme_fuzz.sh --time=60 --fuzzer=0 Signed-off-by: Jim Harris <james.r.harris@intel.com> Change-Id: Iee0997501893ac284a3947a1db7a155c5ceb7849 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/10038 Reviewed-by: Changpeng Liu <changpeng.liu@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
2021-10-15 21:54:52 +00:00
# Build with LLVM fuzzing enabled
CONFIG_FUZZER=n
CONFIG_FUZZER_LIB=
# Build with Thread Sanitizer enabled
CONFIG_TSAN=n
# Build functional tests
CONFIG_TESTS=y
# Build unit tests
CONFIG_UNIT_TESTS=y
# Build examples
CONFIG_EXAMPLES=y
# Build apps
CONFIG_APPS=y
# Build with Control-flow Enforcement Technology (CET)
CONFIG_CET=n
# Directory that contains the desired SPDK environment library.
# By default, this is implemented using DPDK.
CONFIG_ENV=
# This directory should contain 'include' and 'lib' directories for your DPDK
# installation.
CONFIG_DPDK_DIR=
# Automatically set via pkg-config when bare --with-dpdk is set
CONFIG_DPDK_LIB_DIR=
CONFIG_DPDK_INC_DIR=
CONFIG_DPDK_PKG_CONFIG=n
# This directory should contain 'include' and 'lib' directories for WPDK.
CONFIG_WPDK_DIR=
# Build SPDK FIO plugin. Requires CONFIG_FIO_SOURCE_DIR set to a valid
# fio source code directory.
CONFIG_FIO_PLUGIN=n
# This directory should contain the source code directory for fio
# which is required for building the SPDK FIO plugin.
CONFIG_FIO_SOURCE_DIR=/usr/src/fio
# Enable RDMA support for the NVMf target.
# Requires ibverbs development libraries.
CONFIG_RDMA=n
CONFIG_RDMA_SEND_WITH_INVAL=n
CONFIG_RDMA_SET_ACK_TIMEOUT=n
CONFIG_RDMA_SET_TOS=n
CONFIG_RDMA_PROV=verbs
# Enable NVMe Character Devices.
CONFIG_NVME_CUSE=n
# Enable FC support for the NVMf target.
# Requires FC low level driver (from FC vendor)
CONFIG_FC=n
CONFIG_FC_PATH=
# Build Ceph RBD support in bdev modules
# Requires librbd development libraries
CONFIG_RBD=n
bdev/daos: introduction of daos bdev This commmit introduces a new bdev type backed up by DAOS DFS. Design wise this bdev is a file named as the bdev itself in the DAOS POSIX container that uses daos event queue per io channel. Having an event queue per io channel is showing the best IO throughput. The implementation uses the independent pool and container connections per device's channel for the best IO throughput. The semantic of usage is the same as any other bdev type. To build SPDK with daos support, daos-devel package has to be installed. The current supported DAOS version is v2.X, please see the installatoin and setup guide here: https://docs.daos.io/v2.0/ $ ./configure --with-daos To run it, the target machine should have daos_agent up and running, as well as the pool and POSIX container ready to use, please see the detailed requirements here: https://docs.daos.io/v2.0/admin/hardware/. To export bdev over tcp: $ ./nvmf_tgt & $ ./scripts/rpc.py nvmf_create_transport -t TCP -u 2097152 -i 2097152 $ ./scripts/rpc.py bdev_daos_create daosdev0 <pool-label> <cont-label> 1048576 4096 $ ./scripts/rpc.py nvmf_create_subsystem nqn.2016-06.io.spdk1:cnode1 -a -s SPDK00000000000001 -d SPDK_Virtual_Controller_1 $ ./scripts/rpc.py nvmf_subsystem_add_ns nqn.2016-06.io.spdk1:cnode1 daosdev0 $ ./scripts/rpc.py nvmf_subsystem_add_listener nqn.2016-06.io.spdk1:cnode1 -t tcp -a <IP> -s 4420 On the initiator side, make sure that `nvme-tcp` module is loaded then connect drives, for instance: $ nvme connect-all -t tcp -a 172.31.91.61 -s 4420 $ nvme list Signed-off-by: Denis Barakhtanov <denis.barahtanov@croit.io> Change-Id: I51945465122e0fb96de4326db742169419966806 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/12260 Community-CI: Mellanox Build Bot Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com>
2022-03-08 23:55:01 +00:00
# Build DAOS support in bdev modules
# Requires daos development libraries
CONFIG_DAOS=n
CONFIG_DAOS_DIR=
bdev/daos: introduction of daos bdev This commmit introduces a new bdev type backed up by DAOS DFS. Design wise this bdev is a file named as the bdev itself in the DAOS POSIX container that uses daos event queue per io channel. Having an event queue per io channel is showing the best IO throughput. The implementation uses the independent pool and container connections per device's channel for the best IO throughput. The semantic of usage is the same as any other bdev type. To build SPDK with daos support, daos-devel package has to be installed. The current supported DAOS version is v2.X, please see the installatoin and setup guide here: https://docs.daos.io/v2.0/ $ ./configure --with-daos To run it, the target machine should have daos_agent up and running, as well as the pool and POSIX container ready to use, please see the detailed requirements here: https://docs.daos.io/v2.0/admin/hardware/. To export bdev over tcp: $ ./nvmf_tgt & $ ./scripts/rpc.py nvmf_create_transport -t TCP -u 2097152 -i 2097152 $ ./scripts/rpc.py bdev_daos_create daosdev0 <pool-label> <cont-label> 1048576 4096 $ ./scripts/rpc.py nvmf_create_subsystem nqn.2016-06.io.spdk1:cnode1 -a -s SPDK00000000000001 -d SPDK_Virtual_Controller_1 $ ./scripts/rpc.py nvmf_subsystem_add_ns nqn.2016-06.io.spdk1:cnode1 daosdev0 $ ./scripts/rpc.py nvmf_subsystem_add_listener nqn.2016-06.io.spdk1:cnode1 -t tcp -a <IP> -s 4420 On the initiator side, make sure that `nvme-tcp` module is loaded then connect drives, for instance: $ nvme connect-all -t tcp -a 172.31.91.61 -s 4420 $ nvme list Signed-off-by: Denis Barakhtanov <denis.barahtanov@croit.io> Change-Id: I51945465122e0fb96de4326db742169419966806 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/12260 Community-CI: Mellanox Build Bot Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com>
2022-03-08 23:55:01 +00:00
# Build UBLK support
CONFIG_UBLK=n
# Build vhost library.
CONFIG_VHOST=y
# Build vhost initiator (Virtio) driver.
CONFIG_VIRTIO=y
# Build custom vfio-user transport for NVMf target and NVMe initiator.
CONFIG_VFIO_USER=n
CONFIG_VFIO_USER_DIR=
# Build with xNVMe
CONFIG_XNVME=n
# Enable the dependencies for building the DPDK accel compress module
CONFIG_DPDK_COMPRESSDEV=n
# Enable the dependencies for building the compress vbdev, includes the reduce library
CONFIG_VBDEV_COMPRESS=n
# Enable mlx5_pci dpdk compress PMD, enabled automatically if CONFIG_VBDEV_COMPRESS=y and libmlx5 exists
CONFIG_VBDEV_COMPRESS_MLX5=n
# Enable mlx5_pci dpdk crypto PMD, enabled automatically if CONFIG_CRYPTO=y and libmlx5 exists
CONFIG_CRYPTO_MLX5=n
# Requires libiscsi development libraries.
CONFIG_ISCSI_INITIATOR=n
bdev: add raid bdev module Raid module: ============ - SPDK raid bdev module is a new bdev module which is responsible for striping various NVMe devices and expose the raid bdev to bdev layer which would enhance the performance and capacity. - It can support theoretically 256 base devices (currently it is being tested max upto 8 base devices) - Multiple strip sizes like 32KB, 64KB, 128KB, 256KB, 512KB etc is supported. Most of the current testing is focused on 64KB strip size. - New RPC commands like "create raid bdev", "destroy raid bdev" and "get raid bdevs" are introduced to configure raid bdev dynamically in a running SPDK system. - Currently raid bdev configuration parameters are persisted in the current SPDK configuration file for across reboot support. DDF will be introduced later. High level testing done: ======================= - Raid bdev is created with 8 base NVMe devices via configuration file and is exposed to initiator via existing methods. Initiator is able to see a single NVMe namespace with capacity equal to sum of the minimum capacities of 8 devices. Initiator was able to run raw read/write workload, file system workload etc (tested with XFS file system workload). - Multiple raid bdevs are also created and exposed to initiator and tested with file system and other workloads for read/write IO. - LVS / LVOL are created over raid bdev and exposed to initiator. Testing was done for raw read/write workloads and XFS file system workloads. - RPC testing is done where on the running SPDK system raid bdevs are created out of NVMe base devices. These raid bdevs (and LVOLs over raid bdevs) are then exposed to initiator and IO workload was tested for raw read/write and XFS file system workload. - RPC testing is done for delete raid bdevs where all raid bdevs are deleted in running SPDK system. - RPC testing is done for get raid bdevs where existing list of raid bdev names is printed (it can be all raid bdevs or only online or only configuring or only offline). - RPC testing is done where raid bdevs and underlying NVMe devices relationship was returned in JSON RPC commands Change-Id: I10ae1266f8f2cca3c106e4df8c1c0993ddf435d8 Signed-off-by: Kunal Sablok <kunal.sablok@intel.com> Reviewed-on: https://review.gerrithub.io/410484 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
2018-05-08 11:30:29 +00:00
# Enable the dependencies for building the crypto vbdev
CONFIG_CRYPTO=n
# Build spdk shared libraries in addition to the static ones.
CONFIG_SHARED=n
# Build with VTune support.
CONFIG_VTUNE=n
CONFIG_VTUNE_DIR=
# Build Intel IPSEC_MB library
CONFIG_IPSEC_MB=n
# Enable OCF module
CONFIG_OCF=n
CONFIG_OCF_PATH=
CONFIG_CUSTOMOCF=n
# Build ISA-L library
CONFIG_ISAL=y
# Build ISA-L-crypto library
CONFIG_ISAL_CRYPTO=y
# Build with IO_URING support
CONFIG_URING=n
# Build IO_URING bdev with ZNS support
CONFIG_URING_ZNS=n
# Path to custom built IO_URING library
CONFIG_URING_PATH=
# Path to custom built OPENSSL library
CONFIG_OPENSSL_PATH=
# Build with FUSE support
CONFIG_FUSE=n
# Build with RAID5f support
CONFIG_RAID5F=n
lib/idxd: add low level idxd library Module, etc., will follow. Notes: * IDXD is an Intel silicon feature available in future Intel CPUs. Initial development is being done on a simulator. Once HW is available and the code fully tested the experimental label will be lifted. Spec can be found here: https://software.intel.com/en-us/download/intel-data-streaming-accelerator-preliminary-architecture-specification * The current implementation will only work with VFIO. * DSA has a number of engines that can be grouped based on application need such as type of memory being served or QoS. Engines are processing units and are assigned to groups. Work queues are on device structures that act as front-end groups for queueing descriptors. Full details on what is configurable & how will come in later doc patches. * There is a finite number of work queue slots that are divided amongst the number of desired work queues in some fashion (ie evenly). * SW (outside of the idxd lib) is required to manage flow control, to not over-run the work queues.This is provided in the accel plug-in module. The upper layers use public API to manage this. * Work queue submissions are done with a 64 byte atomic instruction * The design here creates a set of descriptor rings per channel that match the size of the work queues. Then, an spdk_bit_array is used to make sure we don't overrun a queue. If there are not slots available, the operation is put on a linked list to be retried later from the poller. * As we need to support any number of channels (we can't limit ourselves to the number of work queues) we need to dynamically size/resize our per channel descriptor rings based on the number of current channels. This is done from upper layers via public API into the lib. * As channels are created, the total number of work queue slots is divided across the channels evenly. Same thing when they are destroyed, remaining channels with see the ring sizes increase. This is done from upper layers via public API into the lib. * The sim has 64 total work queue entries (WQE) that get dolled out to the work queues (WQ) evenly. Signed-off-by: paul luse <paul.e.luse@intel.com> Change-Id: I899bbeda3cef3db05bea4197b8757e89dddb579d Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1809 Community-CI: Mellanox Build Bot Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Reviewed-by: Vitaliy Mysak <vitaliy.mysak@intel.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
2020-04-10 15:29:01 +00:00
# Build with IDXD support
# In this mode, SPDK fully controls the DSA device.
lib/idxd: add low level idxd library Module, etc., will follow. Notes: * IDXD is an Intel silicon feature available in future Intel CPUs. Initial development is being done on a simulator. Once HW is available and the code fully tested the experimental label will be lifted. Spec can be found here: https://software.intel.com/en-us/download/intel-data-streaming-accelerator-preliminary-architecture-specification * The current implementation will only work with VFIO. * DSA has a number of engines that can be grouped based on application need such as type of memory being served or QoS. Engines are processing units and are assigned to groups. Work queues are on device structures that act as front-end groups for queueing descriptors. Full details on what is configurable & how will come in later doc patches. * There is a finite number of work queue slots that are divided amongst the number of desired work queues in some fashion (ie evenly). * SW (outside of the idxd lib) is required to manage flow control, to not over-run the work queues.This is provided in the accel plug-in module. The upper layers use public API to manage this. * Work queue submissions are done with a 64 byte atomic instruction * The design here creates a set of descriptor rings per channel that match the size of the work queues. Then, an spdk_bit_array is used to make sure we don't overrun a queue. If there are not slots available, the operation is put on a linked list to be retried later from the poller. * As we need to support any number of channels (we can't limit ourselves to the number of work queues) we need to dynamically size/resize our per channel descriptor rings based on the number of current channels. This is done from upper layers via public API into the lib. * As channels are created, the total number of work queue slots is divided across the channels evenly. Same thing when they are destroyed, remaining channels with see the ring sizes increase. This is done from upper layers via public API into the lib. * The sim has 64 total work queue entries (WQE) that get dolled out to the work queues (WQ) evenly. Signed-off-by: paul luse <paul.e.luse@intel.com> Change-Id: I899bbeda3cef3db05bea4197b8757e89dddb579d Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1809 Community-CI: Mellanox Build Bot Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Reviewed-by: Vitaliy Mysak <vitaliy.mysak@intel.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
2020-04-10 15:29:01 +00:00
CONFIG_IDXD=n
# Build with USDT support
CONFIG_USDT=n
# Build with IDXD kernel support.
# In this mode, SPDK shares the DSA device with the kernel.
CONFIG_IDXD_KERNEL=n
# arc4random is available in stdlib.h
CONFIG_HAVE_ARC4RANDOM=n
# uuid_generate_sha1 is available in uuid/uuid.h
CONFIG_HAVE_UUID_GENERATE_SHA1=n
# Is DPDK using libbsd?
CONFIG_HAVE_LIBBSD=n
# Is DPDK using libarchive?
CONFIG_HAVE_LIBARCHIVE=n
# Path to IPSEC_MB used by DPDK
CONFIG_IPSEC_MB_DIR=
# Generate Storage Management Agent's protobuf interface
CONFIG_SMA=n
nvme: Added support for TP-8009, Auto-discovery of Discovery controllers for NVME initiator using mDNS using Avahi Approach: Avahi Daemon needs to be running to provide the mDNS server service. In the SPDK, Avahi-client library based client API is implemented. The client API will connect to the Avahi-daemon and receive events for new discovery and removal of an existing discovery entry. Following sets on new RPCs have been introduced. scripts/rpc.py bdev_nvme_start_mdns_discovery -b cdc_auto -s _nvme-disc._tcp User shall initiate an mDNS based discovery using this RPC. This will start a Avahi-client based poller looking for new discovery events from the Avahi server. On a new discovery of the discovery controller, the existing bdev_nvme_start_discovery API will be invoked with the trid of the discovery controller learnt. This will enable automatic connection of the initiator to the subsystems discovered from the discovery controller. Multiple mdns discovery instances can be run by specifying a unique bdev-prefix and a unique servicename to discover as parameters. scripts/rpc.py bdev_nvme_stop_mdns_discovery -b cdc_auto This will stop the Avahi poller that was started for the specified service.Internally bdev_nvme_stop_discovery API will be invoked for each of the discovery controllers learnt automatically by this instance of mdns discovery service. This will result in termination of connections to all the subsystems learnt by this mdns discovery instance. scripts/rpc.py bdev_nvme_get_mdns_discovery_info This RPC will display the list of mdns discovery instances running and the trid of the controllers discovered by these instances. Test Result: root@ubuntu-pm-18-226:~/param-spdk/spdk/build/bin# ./nvmf_tgt -i 1 -s 2048 -m 0xF root@ubuntu-pm-18-226:~/param-spdk/spdk# scripts/rpc.py bdev_nvme_start_mdns_discovery -b cdc_auto -s _nvme-disc._tcp root@ubuntu-pm-18-226:~/param-spdk/spdk# root@ubuntu-pm-18-226:~/param-spdk/spdk# scripts/rpc.py bdev_nvme_get_mdns_discovery_info [ { "name": "cdc_auto", "svcname": "_nvme-disc._tcp", "referrals": [ { "name": "cdc_auto0", "trid": { "trtype": "TCP", "adrfam": "IPv4", "traddr": "66.1.2.21", "trsvcid": "8009", "subnqn": "nqn.2014-08.org.nvmexpress.discovery" } }, { "name": "cdc_auto1", "trid": { "trtype": "TCP", "adrfam": "IPv4", "traddr": "66.1.1.21", "trsvcid": "8009", "subnqn": "nqn.2014-08.org.nvmexpress.discovery" } } ] } ] root@ubuntu-pm-18-226:~/param-spdk/spdk# root@ubuntu-pm-18-226:~/param-spdk/spdk# scripts/rpc.py bdev_nvme_get_discovery_info [ { "name": "cdc_auto0", "trid": { "trtype": "TCP", "adrfam": "IPv4", "traddr": "66.1.2.21", "trsvcid": "8009", "subnqn": "nqn.2014-08.org.nvmexpress.discovery" }, "referrals": [] }, { "name": "cdc_auto1", "trid": { "trtype": "TCP", "adrfam": "IPv4", "traddr": "66.1.1.21", "trsvcid": "8009", "subnqn": "nqn.2014-08.org.nvmexpress.discovery" }, "referrals": [] } ] root@ubuntu-pm-18-226:~/param-spdk/spdk# scripts/rpc.py bdev_get_bdevs [ { "name": "cdc_auto02n1", "aliases": [ "600110d6-1681-1681-0403-000045805c45" ], "product_name": "NVMe disk", "block_size": 512, "num_blocks": 32768, "uuid": "600110d6-1681-1681-0403-000045805c45", "assigned_rate_limits": { "rw_ios_per_sec": 0, "rw_mbytes_per_sec": 0, "r_mbytes_per_sec": 0, "w_mbytes_per_sec": 0 }, "claimed": false, "zoned": false, "supported_io_types": { "read": true, "write": true, "unmap": true, "write_zeroes": true, "flush": true, "reset": true, "compare": true, "compare_and_write": true, "abort": true, "nvme_admin": true, "nvme_io": true }, "driver_specific": { "nvme": [ { "trid": { "trtype": "TCP", "adrfam": "IPv4", "traddr": "66.1.1.40", "trsvcid": "4420", "subnqn": "nqn.2014-08.com.sanblaze:virtualun.virtualun.3.0" }, "ctrlr_data": { "cntlid": 3, "vendor_id": "0x0000", "model_number": "SANBlaze VLUN P3T0", "serial_number": "00-681681dc681681dc", "firmware_revision": "V10.5", "subnqn": "nqn.2014-08.com.sanblaze:virtualun.virtualun.3.0", "oacs": { "security": 0, "format": 1, "firmware": 0, "ns_manage": 1 }, "multi_ctrlr": true, "ana_reporting": true }, "vs": { "nvme_version": "2.0" }, "ns_data": { "id": 1, "ana_state": "optimized", "can_share": true } } ], "mp_policy": "active_passive" } }, { "name": "cdc_auto00n1", "aliases": [ "600110da-09a6-09a6-0302-00005eeb19b4" ], "product_name": "NVMe disk", "block_size": 512, "num_blocks": 2048, "uuid": "600110da-09a6-09a6-0302-00005eeb19b4", "assigned_rate_limits": { "rw_ios_per_sec": 0, "rw_mbytes_per_sec": 0, "r_mbytes_per_sec": 0, "w_mbytes_per_sec": 0 }, "claimed": false, "zoned": false, "supported_io_types": { "read": true, "write": true, "unmap": true, "write_zeroes": true, "flush": true, "reset": true, "compare": true, "compare_and_write": true, "abort": true, "nvme_admin": true, "nvme_io": true }, "driver_specific": { "nvme": [ { "trid": { "trtype": "TCP", "adrfam": "IPv4", "traddr": "66.1.2.40", "trsvcid": "4420", "subnqn": "nqn.2014-08.com.sanblaze:virtualun.virtualun.2.0" }, "ctrlr_data": { "cntlid": 1, "vendor_id": "0x0000", "model_number": "SANBlaze VLUN P2T0", "serial_number": "00-ab09a6f5ab09a6f5", "firmware_revision": "V10.5", "subnqn": "nqn.2014-08.com.sanblaze:virtualun.virtualun.2.0", "oacs": { "security": 0, "format": 1, "firmware": 0, "ns_manage": 1 }, "multi_ctrlr": true, "ana_reporting": true }, "vs": { "nvme_version": "2.0" }, "ns_data": { "id": 1, "ana_state": "optimized", "can_share": true } } ], "mp_policy": "active_passive" } }, { "name": "cdc_auto01n1", "aliases": [ "600110d6-dce8-dce8-0403-00010b2d3d8c" ], "product_name": "NVMe disk", "block_size": 512, "num_blocks": 32768, "uuid": "600110d6-dce8-dce8-0403-00010b2d3d8c", "assigned_rate_limits": { "rw_ios_per_sec": 0, "rw_mbytes_per_sec": 0, "r_mbytes_per_sec": 0, "w_mbytes_per_sec": 0 }, "claimed": false, "zoned": false, "supported_io_types": { "read": true, "write": true, "unmap": true, "write_zeroes": true, "flush": true, "reset": true, "compare": true, "compare_and_write": true, "abort": true, "nvme_admin": true, "nvme_io": true }, "driver_specific": { "nvme": [ { "trid": { "trtype": "TCP", "adrfam": "IPv4", "traddr": "66.1.1.40", "trsvcid": "4420", "subnqn": "nqn.2014-08.com.sanblaze:virtualun.virtualun.3.1" }, "ctrlr_data": { "cntlid": 3, "vendor_id": "0x0000", "model_number": "SANBlaze VLUN P3T1", "serial_number": "01-6ddce86d6ddce86d", "firmware_revision": "V10.5", "subnqn": "nqn.2014-08.com.sanblaze:virtualun.virtualun.3.1", "oacs": { "security": 0, "format": 1, "firmware": 0, "ns_manage": 1 }, "multi_ctrlr": true, "ana_reporting": true }, "vs": { "nvme_version": "2.0" }, "ns_data": { "id": 1, "ana_state": "optimized", "can_share": true } } ], "mp_policy": "active_passive" } }, { "name": "cdc_auto01n2", "aliases": [ "600110d6-dce8-dce8-0403-00010b2d3d8d" ], "product_name": "NVMe disk", "block_size": 512, "num_blocks": 32768, "uuid": "600110d6-dce8-dce8-0403-00010b2d3d8d", "assigned_rate_limits": { "rw_ios_per_sec": 0, "rw_mbytes_per_sec": 0, "r_mbytes_per_sec": 0, "w_mbytes_per_sec": 0 }, "claimed": false, "zoned": false, "supported_io_types": { "read": true, "write": true, "unmap": true, "write_zeroes": true, "flush": true, "reset": true, "compare": true, "compare_and_write": true, "abort": true, "nvme_admin": true, "nvme_io": true }, "driver_specific": { "nvme": [ { "trid": { "trtype": "TCP", "adrfam": "IPv4", "traddr": "66.1.1.40", "trsvcid": "4420", "subnqn": "nqn.2014-08.com.sanblaze:virtualun.virtualun.3.1" }, "ctrlr_data": { "cntlid": 3, "vendor_id": "0x0000", "model_number": "SANBlaze VLUN P3T1", "serial_number": "01-6ddce86d6ddce86d", "firmware_revision": "V10.5", "subnqn": "nqn.2014-08.com.sanblaze:virtualun.virtualun.3.1", "oacs": { "security": 0, "format": 1, "firmware": 0, "ns_manage": 1 }, "multi_ctrlr": true, "ana_reporting": true }, "vs": { "nvme_version": "2.0" }, "ns_data": { "id": 2, "ana_state": "optimized", "can_share": true } } ], "mp_policy": "active_passive" } } ] root@ubuntu-pm-18-226:~/param-spdk/spdk# root@ubuntu-pm-18-226:~/param-spdk/spdk# scripts/rpc.py bdev_nvme_stop_mdns_discovery -b cdc_auto root@ubuntu-pm-18-226:~/param-spdk/spdk# root@ubuntu-pm-18-226:~/param-spdk/spdk# scripts/rpc.py bdev_nvme_get_mdns_discovery_info [] root@ubuntu-pm-18-226:~/param-spdk/spdk# scripts/rpc.py bdev_nvme_get_discovery_info [] root@ubuntu-pm-18-226:~/param-spdk/spdk# scripts/rpc.py bdev_get_bdevs [] root@ubuntu-pm-18-226:~/param-spdk/spdk# Signed-off-by: Parameswaran Krishnamurthy <parameswaran.krishna@dell.com> Change-Id: Ic2c2e614e2549a655c7f81ae844b80d8505a4f02 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/15703 Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com> Reviewed-by: Shuhei Matsumoto <smatsumoto@nvidia.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Paul Luse <paul.e.luse@intel.com> Reviewed-by: Boris Glimcher <Boris.Glimcher@emc.com> Reviewed-by: <qun.wan@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
2022-11-30 20:11:23 +00:00
# Build with Avahi support
CONFIG_AVAHI=n