2020-05-07 18:24:00 +00:00
|
|
|
{
|
|
|
|
global:
|
|
|
|
|
|
|
|
# public functions from nvme.h
|
|
|
|
spdk_nvme_transport_register;
|
|
|
|
spdk_nvme_transport_available;
|
|
|
|
spdk_nvme_transport_available_by_name;
|
nvme_rdma: Support SRQ for I/O qpairs
Support SRQ in RDMA transport of NVMe-oF initiator.
Add a new spdk_nvme_transport_opts structure and add rdma_srq_size
to the spdk_nvme_transport_opts structure.
For the user of the NVMe driver, provide two public APIs,
spdk_nvme_transport_get_opts() and spdk_nvme_transport_set_opts().
In the NVMe driver, the instance of spdk_nvme_transport_opts,
g_spdk_nvme_transport_opts, is accessible throughtout.
From an issue that async event handling caused conflicts between
initiator and target, the NVMe-oF RDMA initiator does not handle
the LAST_WQE_REACHED event. Hence, it may geta WC for a already
destroyed QP. To clarify this, add a comment in the source code.
The following is a result of a small performance evaluation using
SPDK NVMe perf tool. Even for queue_depth=1, overhead was less than 1%.
Eventually, we may be able to enable SRQ by default for NVMe-oF
initiator.
1.1 randwrite, qd=1, srq=enabled
./build/examples/perf -q 1 -s 1024 -w randwrite -t 30 -c 0XF -o 4096 -r
========================================================
Latency(us)
Device Information : IOPS MiB/s Average min max
RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 0: 162411.97 634.42 6.14 5.42 284.07
RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 1: 163095.87 637.09 6.12 5.41 423.95
RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 2: 164725.30 643.46 6.06 5.32 165.60
RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 3: 162548.57 634.96 6.14 5.39 227.24
========================================================
Total : 652781.70 2549.93 6.12
1.2 randwrite, qd=1, srq=disabled
./build/examples/perf -q 1 -s 1024 -w randwrite -t 30 -c 0XF -o 4096 -r
========================================================
Latency(us)
Device Information : IOPS MiB/s Average min max
RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 0: 163398.03 638.27 6.11 5.33 240.76
RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 1: 164632.47 643.10 6.06 5.29 125.22
RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 2: 164694.40 643.34 6.06 5.31 408.43
RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 3: 164007.13 640.65 6.08 5.33 170.10
========================================================
Total : 656732.03 2565.36 6.08 5.29 408.43
2.1 randread, qd=1, srq=enabled
./build/examples/perf -q 1 -s 1024 -w randread -t 30 -c 0xF -o 4096 -r '
========================================================
Latency(us)
Device Information : IOPS MiB/s Average min max
RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 0: 153514.40 599.67 6.50 5.97 277.22
RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 1: 153567.57 599.87 6.50 5.95 408.06
RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 2: 153590.33 599.96 6.50 5.88 134.74
RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 3: 153357.40 599.05 6.51 5.97 229.03
========================================================
Total : 614029.70 2398.55 6.50 5.88 408.06
2.2 randread, qd=1, srq=disabled
./build/examples/perf -q 1 -s 1024 -w randread -t 30 -c 0XF -o 4096 -r '
========================================================
Latency(us)
Device Information : IOPS MiB/s Average min max
RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 0: 154452.40 603.33 6.46 5.94 233.15
RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 1: 154711.67 604.34 6.45 5.91 25.55
RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 2: 154717.70 604.37 6.45 5.88 130.92
RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 3: 154713.77 604.35 6.45 5.91 128.19
========================================================
Total : 618595.53 2416.39 6.45 5.88 233.15
3.1 randwrite, qd=32, srq=enabled
./build/examples/perf -q 32 -s 1024 -w randwrite -t 30 -c 0XF -o 4096 -r 'trtype:RDMA adrfam:IPv4 traddr:1.1.18.1 trsvcid:4420'
========================================================
Latency(us)
Device Information : IOPS MiB/s Average min max
RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 0: 672608.17 2627.38 47.56 11.33 326.96
RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 1: 672386.20 2626.51 47.58 11.03 221.88
RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 2: 673343.70 2630.25 47.51 9.11 387.54
RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 3: 672799.10 2628.12 47.55 10.48 552.80
========================================================
Total : 2691137.17 10512.25 47.55 9.11 552.80
3.2 randwrite, qd=32, srq=disabled
./build/examples/perf -q 32 -s 1024 -w randwrite -t 30 -c 0XF -o 4096 -r 'trtype:RDMA adrfam:IPv4 traddr:1.1.18.1 trsvcid:4420'
========================================================
Latency(us)
Device Information : IOPS MiB/s Average min max
RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 0: 672647.53 2627.53 47.56 11.13 389.95
RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 1: 672756.50 2627.96 47.55 9.53 394.83
RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 2: 672464.63 2626.81 47.57 9.48 528.07
RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 3: 673250.73 2629.89 47.52 9.43 389.83
========================================================
Total : 2691119.40 10512.19 47.55 9.43 528.07
4.1 randread, qd=32, srq=enabled
./build/examples/perf -q 32 -s 1024 -w randread -t 30 -c 0xF -o 4096 -r
========================================================
Latency(us)
Device Information : IOPS MiB/s Average min max
RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 0: 677286.30 2645.65 47.23 12.29 335.90
RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 1: 677554.97 2646.70 47.22 20.39 196.21
RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 2: 677086.07 2644.87 47.25 19.17 386.26
RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 3: 677654.93 2647.09 47.21 18.92 181.05
========================================================
Total : 2709582.27 10584.31 47.23 12.29 386.26
4.2 randread, qd=32, srq=disabled
./build/examples/perf -q 32 -s 1024 -w randread -t 30 -c 0XF -o 4096 -r
========================================================
Latency(us)
Device Information : IOPS MiB/s Average min max
RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 0: 677432.60 2646.22 47.22 13.05 435.91
RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 1: 677450.43 2646.29 47.22 16.26 178.60
RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 2: 677647.10 2647.06 47.21 17.82 177.83
RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 3: 677047.33 2644.72 47.25 15.62 308.21
========================================================
Total : 2709577.47 10584.29 47.23 13.05 435.91
Signed-off-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
Signed-off-by: Denis Nagorny <denisn@nvidia.com>
Signed-off-by: Evgeniy Kochetov <evgeniik@nvidia.com>
Change-Id: I843a5eda14e872bf6e2010e9f63b8e46d5bba691
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/14174
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
2022-12-07 07:24:08 +00:00
|
|
|
spdk_nvme_transport_get_opts;
|
|
|
|
spdk_nvme_transport_set_opts;
|
2020-05-07 18:24:00 +00:00
|
|
|
spdk_nvme_transport_id_parse;
|
|
|
|
spdk_nvme_transport_id_populate_trstring;
|
|
|
|
spdk_nvme_transport_id_parse_trtype;
|
|
|
|
spdk_nvme_transport_id_trtype_str;
|
|
|
|
spdk_nvme_transport_id_adrfam_str;
|
|
|
|
spdk_nvme_transport_id_parse_adrfam;
|
|
|
|
spdk_nvme_transport_id_compare;
|
|
|
|
spdk_nvme_trid_populate_transport;
|
|
|
|
spdk_nvme_host_id_parse;
|
|
|
|
|
|
|
|
spdk_nvme_prchk_flags_parse;
|
|
|
|
spdk_nvme_prchk_flags_str;
|
|
|
|
|
|
|
|
spdk_nvme_probe;
|
|
|
|
spdk_nvme_connect;
|
|
|
|
spdk_nvme_connect_async;
|
|
|
|
spdk_nvme_probe_async;
|
|
|
|
spdk_nvme_probe_poll_async;
|
|
|
|
spdk_nvme_detach;
|
2020-10-16 07:26:29 +00:00
|
|
|
spdk_nvme_detach_async;
|
|
|
|
spdk_nvme_detach_poll_async;
|
2021-06-24 20:30:55 +00:00
|
|
|
spdk_nvme_detach_poll;
|
2020-05-07 18:24:00 +00:00
|
|
|
|
2021-01-29 16:31:31 +00:00
|
|
|
spdk_nvme_pcie_set_hotplug_filter;
|
|
|
|
|
2020-05-07 18:24:00 +00:00
|
|
|
spdk_nvme_ctrlr_is_discovery;
|
2021-05-26 23:21:34 +00:00
|
|
|
spdk_nvme_ctrlr_is_fabrics;
|
2020-05-07 18:24:00 +00:00
|
|
|
spdk_nvme_ctrlr_get_default_ctrlr_opts;
|
2021-08-30 19:25:20 +00:00
|
|
|
spdk_nvme_ctrlr_get_opts;
|
2020-05-07 18:24:00 +00:00
|
|
|
spdk_nvme_ctrlr_set_trid;
|
2020-12-08 16:57:22 +00:00
|
|
|
spdk_nvme_ctrlr_reset_subsystem;
|
2020-05-07 18:24:00 +00:00
|
|
|
spdk_nvme_ctrlr_reset;
|
2021-08-20 14:43:52 +00:00
|
|
|
spdk_nvme_ctrlr_prepare_for_reset;
|
2021-06-14 15:01:15 +00:00
|
|
|
spdk_nvme_ctrlr_reset_async;
|
|
|
|
spdk_nvme_ctrlr_reset_poll_async;
|
2021-11-26 18:22:08 +00:00
|
|
|
spdk_nvme_ctrlr_disconnect;
|
|
|
|
spdk_nvme_ctrlr_reconnect_async;
|
|
|
|
spdk_nvme_ctrlr_reconnect_poll_async;
|
2020-05-07 18:24:00 +00:00
|
|
|
spdk_nvme_ctrlr_fail;
|
|
|
|
spdk_nvme_ctrlr_is_failed;
|
|
|
|
spdk_nvme_ctrlr_get_data;
|
|
|
|
spdk_nvme_ctrlr_get_regs_csts;
|
2021-08-18 00:33:04 +00:00
|
|
|
spdk_nvme_ctrlr_get_regs_cc;
|
2020-05-07 18:24:00 +00:00
|
|
|
spdk_nvme_ctrlr_get_regs_cap;
|
|
|
|
spdk_nvme_ctrlr_get_regs_vs;
|
|
|
|
spdk_nvme_ctrlr_get_regs_cmbsz;
|
2021-02-01 12:44:52 +00:00
|
|
|
spdk_nvme_ctrlr_get_regs_pmrcap;
|
2021-07-02 11:32:29 +00:00
|
|
|
spdk_nvme_ctrlr_get_regs_bpinfo;
|
2021-02-25 11:11:33 +00:00
|
|
|
spdk_nvme_ctrlr_get_pmrsz;
|
2020-05-07 18:24:00 +00:00
|
|
|
spdk_nvme_ctrlr_get_num_ns;
|
|
|
|
spdk_nvme_ctrlr_get_pci_device;
|
|
|
|
spdk_nvme_ctrlr_get_max_xfer_size;
|
|
|
|
spdk_nvme_ctrlr_is_active_ns;
|
|
|
|
spdk_nvme_ctrlr_get_first_active_ns;
|
|
|
|
spdk_nvme_ctrlr_get_next_active_ns;
|
|
|
|
spdk_nvme_ctrlr_is_log_page_supported;
|
|
|
|
spdk_nvme_ctrlr_is_feature_supported;
|
|
|
|
spdk_nvme_ctrlr_register_aer_callback;
|
2022-08-19 14:19:24 +00:00
|
|
|
spdk_nvme_ctrlr_disable_read_changed_ns_list_log_page;
|
2020-05-07 18:24:00 +00:00
|
|
|
spdk_nvme_ctrlr_register_timeout_callback;
|
|
|
|
spdk_nvme_ctrlr_get_default_io_qpair_opts;
|
|
|
|
spdk_nvme_ctrlr_alloc_io_qpair;
|
|
|
|
spdk_nvme_ctrlr_connect_io_qpair;
|
|
|
|
spdk_nvme_ctrlr_disconnect_io_qpair;
|
|
|
|
spdk_nvme_ctrlr_reconnect_io_qpair;
|
|
|
|
spdk_nvme_ctrlr_get_admin_qp_failure_reason;
|
|
|
|
spdk_nvme_ctrlr_free_io_qpair;
|
|
|
|
spdk_nvme_ctrlr_io_cmd_raw_no_payload_build;
|
|
|
|
spdk_nvme_ctrlr_cmd_io_raw;
|
|
|
|
spdk_nvme_ctrlr_cmd_io_raw_with_md;
|
|
|
|
spdk_nvme_ctrlr_cmd_admin_raw;
|
|
|
|
spdk_nvme_ctrlr_process_admin_completions;
|
|
|
|
spdk_nvme_ctrlr_get_ns;
|
|
|
|
spdk_nvme_ctrlr_cmd_get_log_page;
|
|
|
|
spdk_nvme_ctrlr_cmd_get_log_page_ext;
|
|
|
|
spdk_nvme_ctrlr_cmd_abort;
|
2020-06-14 06:20:25 +00:00
|
|
|
spdk_nvme_ctrlr_cmd_abort_ext;
|
2020-05-07 18:24:00 +00:00
|
|
|
spdk_nvme_ctrlr_cmd_set_feature;
|
|
|
|
spdk_nvme_ctrlr_cmd_get_feature;
|
|
|
|
spdk_nvme_ctrlr_cmd_get_feature_ns;
|
|
|
|
spdk_nvme_ctrlr_cmd_set_feature_ns;
|
|
|
|
spdk_nvme_ctrlr_cmd_security_receive;
|
|
|
|
spdk_nvme_ctrlr_cmd_security_send;
|
|
|
|
spdk_nvme_ctrlr_security_receive;
|
|
|
|
spdk_nvme_ctrlr_security_send;
|
2021-01-20 06:53:12 +00:00
|
|
|
spdk_nvme_ctrlr_cmd_directive_receive;
|
|
|
|
spdk_nvme_ctrlr_cmd_directive_send;
|
2020-05-07 18:24:00 +00:00
|
|
|
spdk_nvme_ctrlr_get_flags;
|
|
|
|
spdk_nvme_ctrlr_attach_ns;
|
|
|
|
spdk_nvme_ctrlr_detach_ns;
|
|
|
|
spdk_nvme_ctrlr_create_ns;
|
|
|
|
spdk_nvme_ctrlr_delete_ns;
|
|
|
|
spdk_nvme_ctrlr_format;
|
|
|
|
spdk_nvme_ctrlr_update_firmware;
|
|
|
|
spdk_nvme_ctrlr_reserve_cmb;
|
|
|
|
spdk_nvme_ctrlr_map_cmb;
|
|
|
|
spdk_nvme_ctrlr_unmap_cmb;
|
2021-02-25 11:11:33 +00:00
|
|
|
spdk_nvme_ctrlr_enable_pmr;
|
|
|
|
spdk_nvme_ctrlr_disable_pmr;
|
|
|
|
spdk_nvme_ctrlr_map_pmr;
|
|
|
|
spdk_nvme_ctrlr_unmap_pmr;
|
2021-07-02 11:32:29 +00:00
|
|
|
spdk_nvme_ctrlr_read_boot_partition_start;
|
|
|
|
spdk_nvme_ctrlr_read_boot_partition_poll;
|
|
|
|
spdk_nvme_ctrlr_write_boot_partition;
|
2020-05-07 18:24:00 +00:00
|
|
|
spdk_nvme_ctrlr_get_transport_id;
|
2020-08-22 06:34:46 +00:00
|
|
|
spdk_nvme_ctrlr_alloc_qid;
|
|
|
|
spdk_nvme_ctrlr_free_qid;
|
2020-12-07 15:42:00 +00:00
|
|
|
spdk_nvme_ctrlr_set_remove_cb;
|
2021-09-20 14:26:57 +00:00
|
|
|
spdk_nvme_ctrlr_get_memory_domains;
|
2021-12-04 02:56:47 +00:00
|
|
|
spdk_nvme_ctrlr_get_discovery_log_page;
|
2023-03-29 10:37:51 +00:00
|
|
|
spdk_nvme_ctrlr_get_registers;
|
2020-05-07 18:24:00 +00:00
|
|
|
|
|
|
|
spdk_nvme_poll_group_create;
|
|
|
|
spdk_nvme_poll_group_add;
|
|
|
|
spdk_nvme_poll_group_remove;
|
|
|
|
spdk_nvme_poll_group_destroy;
|
|
|
|
spdk_nvme_poll_group_process_completions;
|
|
|
|
spdk_nvme_poll_group_get_ctx;
|
|
|
|
|
|
|
|
spdk_nvme_ns_get_data;
|
|
|
|
spdk_nvme_ns_get_id;
|
|
|
|
spdk_nvme_ns_get_ctrlr;
|
|
|
|
spdk_nvme_ns_is_active;
|
|
|
|
spdk_nvme_ns_get_max_io_xfer_size;
|
|
|
|
spdk_nvme_ns_get_sector_size;
|
|
|
|
spdk_nvme_ns_get_extended_sector_size;
|
|
|
|
spdk_nvme_ns_get_num_sectors;
|
|
|
|
spdk_nvme_ns_get_size;
|
|
|
|
spdk_nvme_ns_get_pi_type;
|
|
|
|
spdk_nvme_ns_get_md_size;
|
2023-01-25 22:34:30 +00:00
|
|
|
spdk_nvme_ns_get_format_index;
|
2020-05-07 18:24:00 +00:00
|
|
|
spdk_nvme_ns_supports_extended_lba;
|
|
|
|
spdk_nvme_ns_supports_compare;
|
|
|
|
spdk_nvme_ns_get_dealloc_logical_block_read_value;
|
|
|
|
spdk_nvme_ns_get_optimal_io_boundary;
|
2021-07-02 12:04:35 +00:00
|
|
|
spdk_nvme_ns_get_nguid;
|
2020-05-07 18:24:00 +00:00
|
|
|
spdk_nvme_ns_get_uuid;
|
2020-09-07 11:59:35 +00:00
|
|
|
spdk_nvme_ns_get_csi;
|
2020-05-07 18:24:00 +00:00
|
|
|
spdk_nvme_ns_get_flags;
|
2020-09-22 18:26:26 +00:00
|
|
|
spdk_nvme_ns_get_ana_group_id;
|
|
|
|
spdk_nvme_ns_get_ana_state;
|
2020-05-07 18:24:00 +00:00
|
|
|
|
|
|
|
spdk_nvme_ns_cmd_write;
|
|
|
|
spdk_nvme_ns_cmd_writev;
|
|
|
|
spdk_nvme_ns_cmd_writev_with_md;
|
|
|
|
spdk_nvme_ns_cmd_write_with_md;
|
|
|
|
spdk_nvme_ns_cmd_write_zeroes;
|
|
|
|
spdk_nvme_ns_cmd_write_uncorrectable;
|
|
|
|
spdk_nvme_ns_cmd_read;
|
|
|
|
spdk_nvme_ns_cmd_readv;
|
|
|
|
spdk_nvme_ns_cmd_readv_with_md;
|
|
|
|
spdk_nvme_ns_cmd_read_with_md;
|
|
|
|
spdk_nvme_ns_cmd_dataset_management;
|
2021-04-30 08:59:32 +00:00
|
|
|
spdk_nvme_ns_cmd_copy;
|
2020-05-07 18:24:00 +00:00
|
|
|
spdk_nvme_ns_cmd_flush;
|
|
|
|
spdk_nvme_ns_cmd_reservation_register;
|
|
|
|
spdk_nvme_ns_cmd_reservation_release;
|
|
|
|
spdk_nvme_ns_cmd_reservation_acquire;
|
|
|
|
spdk_nvme_ns_cmd_reservation_report;
|
2023-01-25 22:58:17 +00:00
|
|
|
spdk_nvme_ns_cmd_io_mgmt_recv;
|
|
|
|
spdk_nvme_ns_cmd_io_mgmt_send;
|
2020-05-07 18:24:00 +00:00
|
|
|
spdk_nvme_ns_cmd_compare;
|
|
|
|
spdk_nvme_ns_cmd_comparev;
|
|
|
|
spdk_nvme_ns_cmd_comparev_with_md;
|
|
|
|
spdk_nvme_ns_cmd_compare_with_md;
|
2021-01-09 14:06:31 +00:00
|
|
|
spdk_nvme_ns_cmd_writev_ext;
|
|
|
|
spdk_nvme_ns_cmd_readv_ext;
|
2022-08-09 10:14:05 +00:00
|
|
|
spdk_nvme_ns_cmd_verify;
|
2020-05-07 18:24:00 +00:00
|
|
|
|
2020-12-30 14:52:07 +00:00
|
|
|
spdk_nvme_qpair_get_optimal_poll_group;
|
2020-05-07 18:24:00 +00:00
|
|
|
spdk_nvme_qpair_process_completions;
|
|
|
|
spdk_nvme_qpair_get_failure_reason;
|
|
|
|
spdk_nvme_qpair_add_cmd_error_injection;
|
|
|
|
spdk_nvme_qpair_remove_cmd_error_injection;
|
|
|
|
spdk_nvme_qpair_print_command;
|
|
|
|
spdk_nvme_qpair_print_completion;
|
2021-02-11 20:10:55 +00:00
|
|
|
spdk_nvme_qpair_get_id;
|
2022-12-14 07:03:32 +00:00
|
|
|
spdk_nvme_qpair_get_num_outstanding_reqs;
|
2021-02-11 20:10:55 +00:00
|
|
|
|
2020-07-03 23:22:14 +00:00
|
|
|
spdk_nvme_print_command;
|
|
|
|
spdk_nvme_print_completion;
|
2020-05-07 18:24:00 +00:00
|
|
|
|
|
|
|
spdk_nvme_cpl_get_status_string;
|
2022-12-09 04:25:24 +00:00
|
|
|
spdk_nvme_cpl_get_status_type_string;
|
2020-05-07 18:24:00 +00:00
|
|
|
|
|
|
|
spdk_nvme_rdma_init_hooks;
|
|
|
|
|
|
|
|
spdk_nvme_cuse_get_ctrlr_name;
|
|
|
|
spdk_nvme_cuse_get_ns_name;
|
|
|
|
spdk_nvme_cuse_register;
|
|
|
|
spdk_nvme_cuse_unregister;
|
2020-02-07 13:31:12 +00:00
|
|
|
spdk_nvme_cuse_update_namespaces;
|
2020-05-07 18:24:00 +00:00
|
|
|
|
2020-12-04 11:14:06 +00:00
|
|
|
spdk_nvme_poll_group_get_stats;
|
|
|
|
spdk_nvme_poll_group_free_stats;
|
|
|
|
|
2020-10-13 11:46:16 +00:00
|
|
|
# public functions from nvme_zns.h
|
2020-09-25 12:10:48 +00:00
|
|
|
spdk_nvme_zns_ns_get_data;
|
2021-02-15 13:50:14 +00:00
|
|
|
spdk_nvme_zns_ns_get_zone_size_sectors;
|
2020-10-20 09:29:01 +00:00
|
|
|
spdk_nvme_zns_ns_get_zone_size;
|
2020-10-20 09:30:12 +00:00
|
|
|
spdk_nvme_zns_ns_get_num_zones;
|
2021-02-16 12:40:55 +00:00
|
|
|
spdk_nvme_zns_ns_get_max_open_zones;
|
2021-02-16 12:42:38 +00:00
|
|
|
spdk_nvme_zns_ns_get_max_active_zones;
|
2020-09-25 12:10:48 +00:00
|
|
|
spdk_nvme_zns_ctrlr_get_data;
|
2020-11-17 15:44:04 +00:00
|
|
|
spdk_nvme_zns_ctrlr_get_max_zone_append_size;
|
2021-01-19 11:10:56 +00:00
|
|
|
spdk_nvme_zns_zone_append;
|
|
|
|
spdk_nvme_zns_zone_append_with_md;
|
nvme: add support for ZNS zone append vector variant
We already have support for spdk_nvme_zns_zone_append(),
add support for spdk_nvme_zns_zone_appendv() (zone append with
NVME_PAYLOAD_TYPE_SGL).
_nvme_ns_cmd_rw() currently performs verification of the SGL,
if the parameter check_sgl is set. This parameter is set for all
calls with payload of type NVME_PAYLOAD_TYPE_SGL.
In order to be able to perform the same check_sgl verfication on
zone append vectors, we need to refactor _nvme_ns_cmd_rw() a bit.
Setting check_sgl ensures that _nvme_ns_cmd_split_request_sgl() or
_nvme_ns_cmd_split_request_prp() gets called.
These functions will split an oversized I/O into several different
requests. However, they also iterate the SGE entries, verifies that
the total payload size, total SGE entries is not too many, and that
buffers are properly aligned. A proper request will not get split.
For zone append, splitting a request into several is not allowed,
however, we still want the verification part to be done, such that
(e.g.) a non first/last SGE which is not page aligned, will cause
the whole request to be rejected.
(In the case of spdk_nvme_ns_cmd_write(), a non first/last SGE which
is not page aligned will instead cause the request to be split.)
An alternative would be to try to rip out the verification part from
_nvme_ns_cmd_split_request_sgl() and _nvme_ns_cmd_split_request_prp().
However, that is non-trivial, and would most likely end up with a lot
of duplicated code, which would easily get out of sync.
Signed-off-by: Niklas Cassel <niklas.cassel@wdc.com>
Change-Id: I2728acdcadeb70b1f0ed628704df19e75d14dcca
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/6248
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Mellanox Build Bot
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
2021-01-26 09:42:58 +00:00
|
|
|
spdk_nvme_zns_zone_appendv;
|
|
|
|
spdk_nvme_zns_zone_appendv_with_md;
|
2020-10-13 12:29:54 +00:00
|
|
|
spdk_nvme_zns_close_zone;
|
|
|
|
spdk_nvme_zns_finish_zone;
|
|
|
|
spdk_nvme_zns_open_zone;
|
|
|
|
spdk_nvme_zns_reset_zone;
|
|
|
|
spdk_nvme_zns_offline_zone;
|
2021-09-14 09:01:28 +00:00
|
|
|
spdk_nvme_zns_set_zone_desc_ext;
|
2020-10-13 12:33:38 +00:00
|
|
|
spdk_nvme_zns_report_zones;
|
2021-09-14 09:01:28 +00:00
|
|
|
spdk_nvme_zns_ext_report_zones;
|
2020-09-25 12:10:48 +00:00
|
|
|
|
2020-05-07 18:24:00 +00:00
|
|
|
# public functions from nvme_ocssd.h
|
|
|
|
spdk_nvme_ctrlr_is_ocssd_supported;
|
|
|
|
spdk_nvme_ocssd_ctrlr_cmd_geometry;
|
|
|
|
spdk_nvme_ocssd_ns_cmd_vector_reset;
|
|
|
|
spdk_nvme_ocssd_ns_cmd_vector_write;
|
|
|
|
spdk_nvme_ocssd_ns_cmd_vector_write_with_md;
|
|
|
|
spdk_nvme_ocssd_ns_cmd_vector_read;
|
|
|
|
spdk_nvme_ocssd_ns_cmd_vector_read_with_md;
|
|
|
|
spdk_nvme_ocssd_ns_cmd_vector_copy;
|
|
|
|
|
|
|
|
# public functions from opal.h
|
|
|
|
spdk_opal_dev_construct;
|
|
|
|
spdk_opal_dev_destruct;
|
|
|
|
spdk_opal_get_d0_features_info;
|
|
|
|
spdk_opal_cmd_take_ownership;
|
|
|
|
spdk_opal_cmd_revert_tper;
|
|
|
|
spdk_opal_cmd_activate_locking_sp;
|
|
|
|
spdk_opal_cmd_lock_unlock;
|
|
|
|
spdk_opal_cmd_setup_locking_range;
|
|
|
|
spdk_opal_cmd_get_max_ranges;
|
|
|
|
spdk_opal_cmd_get_locking_range_info;
|
|
|
|
spdk_opal_cmd_enable_user;
|
|
|
|
spdk_opal_cmd_add_user_to_locking_range;
|
|
|
|
spdk_opal_cmd_set_new_passwd;
|
|
|
|
spdk_opal_cmd_erase_locking_range;
|
|
|
|
spdk_opal_cmd_secure_erase_locking_range;
|
|
|
|
spdk_opal_get_locking_range_info;
|
|
|
|
spdk_opal_free_locking_range_info;
|
|
|
|
|
|
|
|
local: *;
|
|
|
|
};
|