Spdk/lib/nvme/spdk_nvme.map

259 lines
7.6 KiB
Plaintext
Raw Normal View History

{
global:
# public functions from nvme.h
spdk_nvme_transport_register;
spdk_nvme_transport_available;
spdk_nvme_transport_available_by_name;
nvme_rdma: Support SRQ for I/O qpairs Support SRQ in RDMA transport of NVMe-oF initiator. Add a new spdk_nvme_transport_opts structure and add rdma_srq_size to the spdk_nvme_transport_opts structure. For the user of the NVMe driver, provide two public APIs, spdk_nvme_transport_get_opts() and spdk_nvme_transport_set_opts(). In the NVMe driver, the instance of spdk_nvme_transport_opts, g_spdk_nvme_transport_opts, is accessible throughtout. From an issue that async event handling caused conflicts between initiator and target, the NVMe-oF RDMA initiator does not handle the LAST_WQE_REACHED event. Hence, it may geta WC for a already destroyed QP. To clarify this, add a comment in the source code. The following is a result of a small performance evaluation using SPDK NVMe perf tool. Even for queue_depth=1, overhead was less than 1%. Eventually, we may be able to enable SRQ by default for NVMe-oF initiator. 1.1 randwrite, qd=1, srq=enabled ./build/examples/perf -q 1 -s 1024 -w randwrite -t 30 -c 0XF -o 4096 -r ======================================================== Latency(us) Device Information : IOPS MiB/s Average min max RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 0: 162411.97 634.42 6.14 5.42 284.07 RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 1: 163095.87 637.09 6.12 5.41 423.95 RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 2: 164725.30 643.46 6.06 5.32 165.60 RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 3: 162548.57 634.96 6.14 5.39 227.24 ======================================================== Total : 652781.70 2549.93 6.12 1.2 randwrite, qd=1, srq=disabled ./build/examples/perf -q 1 -s 1024 -w randwrite -t 30 -c 0XF -o 4096 -r ======================================================== Latency(us) Device Information : IOPS MiB/s Average min max RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 0: 163398.03 638.27 6.11 5.33 240.76 RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 1: 164632.47 643.10 6.06 5.29 125.22 RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 2: 164694.40 643.34 6.06 5.31 408.43 RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 3: 164007.13 640.65 6.08 5.33 170.10 ======================================================== Total : 656732.03 2565.36 6.08 5.29 408.43 2.1 randread, qd=1, srq=enabled ./build/examples/perf -q 1 -s 1024 -w randread -t 30 -c 0xF -o 4096 -r ' ======================================================== Latency(us) Device Information : IOPS MiB/s Average min max RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 0: 153514.40 599.67 6.50 5.97 277.22 RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 1: 153567.57 599.87 6.50 5.95 408.06 RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 2: 153590.33 599.96 6.50 5.88 134.74 RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 3: 153357.40 599.05 6.51 5.97 229.03 ======================================================== Total : 614029.70 2398.55 6.50 5.88 408.06 2.2 randread, qd=1, srq=disabled ./build/examples/perf -q 1 -s 1024 -w randread -t 30 -c 0XF -o 4096 -r ' ======================================================== Latency(us) Device Information : IOPS MiB/s Average min max RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 0: 154452.40 603.33 6.46 5.94 233.15 RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 1: 154711.67 604.34 6.45 5.91 25.55 RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 2: 154717.70 604.37 6.45 5.88 130.92 RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 3: 154713.77 604.35 6.45 5.91 128.19 ======================================================== Total : 618595.53 2416.39 6.45 5.88 233.15 3.1 randwrite, qd=32, srq=enabled ./build/examples/perf -q 32 -s 1024 -w randwrite -t 30 -c 0XF -o 4096 -r 'trtype:RDMA adrfam:IPv4 traddr:1.1.18.1 trsvcid:4420' ======================================================== Latency(us) Device Information : IOPS MiB/s Average min max RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 0: 672608.17 2627.38 47.56 11.33 326.96 RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 1: 672386.20 2626.51 47.58 11.03 221.88 RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 2: 673343.70 2630.25 47.51 9.11 387.54 RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 3: 672799.10 2628.12 47.55 10.48 552.80 ======================================================== Total : 2691137.17 10512.25 47.55 9.11 552.80 3.2 randwrite, qd=32, srq=disabled ./build/examples/perf -q 32 -s 1024 -w randwrite -t 30 -c 0XF -o 4096 -r 'trtype:RDMA adrfam:IPv4 traddr:1.1.18.1 trsvcid:4420' ======================================================== Latency(us) Device Information : IOPS MiB/s Average min max RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 0: 672647.53 2627.53 47.56 11.13 389.95 RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 1: 672756.50 2627.96 47.55 9.53 394.83 RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 2: 672464.63 2626.81 47.57 9.48 528.07 RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 3: 673250.73 2629.89 47.52 9.43 389.83 ======================================================== Total : 2691119.40 10512.19 47.55 9.43 528.07 4.1 randread, qd=32, srq=enabled ./build/examples/perf -q 32 -s 1024 -w randread -t 30 -c 0xF -o 4096 -r ======================================================== Latency(us) Device Information : IOPS MiB/s Average min max RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 0: 677286.30 2645.65 47.23 12.29 335.90 RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 1: 677554.97 2646.70 47.22 20.39 196.21 RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 2: 677086.07 2644.87 47.25 19.17 386.26 RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 3: 677654.93 2647.09 47.21 18.92 181.05 ======================================================== Total : 2709582.27 10584.31 47.23 12.29 386.26 4.2 randread, qd=32, srq=disabled ./build/examples/perf -q 32 -s 1024 -w randread -t 30 -c 0XF -o 4096 -r ======================================================== Latency(us) Device Information : IOPS MiB/s Average min max RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 0: 677432.60 2646.22 47.22 13.05 435.91 RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 1: 677450.43 2646.29 47.22 16.26 178.60 RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 2: 677647.10 2647.06 47.21 17.82 177.83 RDMA (addr:1.1.18.1 subnqn:nqn.2016-06.io.spdk:cnode1) NSID 1 from core 3: 677047.33 2644.72 47.25 15.62 308.21 ======================================================== Total : 2709577.47 10584.29 47.23 13.05 435.91 Signed-off-by: Shuhei Matsumoto <smatsumoto@nvidia.com> Signed-off-by: Denis Nagorny <denisn@nvidia.com> Signed-off-by: Evgeniy Kochetov <evgeniik@nvidia.com> Change-Id: I843a5eda14e872bf6e2010e9f63b8e46d5bba691 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/14174 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com>
2022-12-07 07:24:08 +00:00
spdk_nvme_transport_get_opts;
spdk_nvme_transport_set_opts;
spdk_nvme_transport_id_parse;
spdk_nvme_transport_id_populate_trstring;
spdk_nvme_transport_id_parse_trtype;
spdk_nvme_transport_id_trtype_str;
spdk_nvme_transport_id_adrfam_str;
spdk_nvme_transport_id_parse_adrfam;
spdk_nvme_transport_id_compare;
spdk_nvme_trid_populate_transport;
spdk_nvme_host_id_parse;
spdk_nvme_prchk_flags_parse;
spdk_nvme_prchk_flags_str;
spdk_nvme_probe;
spdk_nvme_connect;
spdk_nvme_connect_async;
spdk_nvme_probe_async;
spdk_nvme_probe_poll_async;
spdk_nvme_detach;
spdk_nvme_detach_async;
spdk_nvme_detach_poll_async;
spdk_nvme_detach_poll;
spdk_nvme_pcie_set_hotplug_filter;
spdk_nvme_ctrlr_is_discovery;
spdk_nvme_ctrlr_is_fabrics;
spdk_nvme_ctrlr_get_default_ctrlr_opts;
spdk_nvme_ctrlr_get_opts;
spdk_nvme_ctrlr_set_trid;
spdk_nvme_ctrlr_reset_subsystem;
spdk_nvme_ctrlr_reset;
spdk_nvme_ctrlr_prepare_for_reset;
spdk_nvme_ctrlr_reset_async;
spdk_nvme_ctrlr_reset_poll_async;
spdk_nvme_ctrlr_disconnect;
spdk_nvme_ctrlr_reconnect_async;
spdk_nvme_ctrlr_reconnect_poll_async;
spdk_nvme_ctrlr_fail;
spdk_nvme_ctrlr_is_failed;
spdk_nvme_ctrlr_get_data;
spdk_nvme_ctrlr_get_regs_csts;
spdk_nvme_ctrlr_get_regs_cc;
spdk_nvme_ctrlr_get_regs_cap;
spdk_nvme_ctrlr_get_regs_vs;
spdk_nvme_ctrlr_get_regs_cmbsz;
spdk_nvme_ctrlr_get_regs_pmrcap;
spdk_nvme_ctrlr_get_regs_bpinfo;
spdk_nvme_ctrlr_get_pmrsz;
spdk_nvme_ctrlr_get_num_ns;
spdk_nvme_ctrlr_get_pci_device;
spdk_nvme_ctrlr_get_max_xfer_size;
spdk_nvme_ctrlr_is_active_ns;
spdk_nvme_ctrlr_get_first_active_ns;
spdk_nvme_ctrlr_get_next_active_ns;
spdk_nvme_ctrlr_is_log_page_supported;
spdk_nvme_ctrlr_is_feature_supported;
spdk_nvme_ctrlr_register_aer_callback;
nvme: add spdk_nvme_ctrlr_disable_read_changed_ns_list_log_page() Commit a119799b ("test/nvme/aer: remove duplicated changed NS list log") changed the nvme driver to read the CHANGED_NS_LIST log page before calling the application's AER callback (previously it would read it after). Commit b801af090 ("nvme: add disable_read_changed_ns_list_log_page") added a new ctrlr_opts member to allow the application to tell the driver to not read this log page, and will read the log page itself instead to clear the AEN. But we cannot add this option to the 22.01 LTS branch since it breaks the ABI. So adding this API here, which can then be backported manually to the 22.01 branch for LTS users that require it. Restoring the old behavior is not correct for applications that want to consume the CHANGED_NS_LIST log page contents itself to know which namespaces have changed. Even if the driver reads the log page after the application, that read could happen during a small window between when a namespace change event has occurred and the AEN has been sent to the host. The only safe way for the application to consume ChANGED_NS_LIST log page contents itself is to make sure the driver never issues such a log page request itself. Fixes issue #2647. Signed-off-by: Jim Harris <james.r.harris@intel.com> Change-Id: Iaeffe23dc7817c0c94441a36ed4d6f64a1f15a4e Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/14134 Reviewed-by: Michael Haeuptle <michaelhaeuptle@gmail.com> Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com> Reviewed-by: Dong Yi <dongx.yi@intel.com> Reviewed-by: Shuhei Matsumoto <smatsumoto@nvidia.com> Community-CI: Mellanox Build Bot Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
2022-08-19 14:19:24 +00:00
spdk_nvme_ctrlr_disable_read_changed_ns_list_log_page;
spdk_nvme_ctrlr_register_timeout_callback;
spdk_nvme_ctrlr_get_default_io_qpair_opts;
spdk_nvme_ctrlr_alloc_io_qpair;
spdk_nvme_ctrlr_connect_io_qpair;
spdk_nvme_ctrlr_disconnect_io_qpair;
spdk_nvme_ctrlr_reconnect_io_qpair;
spdk_nvme_ctrlr_get_admin_qp_failure_reason;
spdk_nvme_ctrlr_free_io_qpair;
spdk_nvme_ctrlr_io_cmd_raw_no_payload_build;
spdk_nvme_ctrlr_cmd_io_raw;
spdk_nvme_ctrlr_cmd_io_raw_with_md;
spdk_nvme_ctrlr_cmd_admin_raw;
spdk_nvme_ctrlr_process_admin_completions;
spdk_nvme_ctrlr_get_ns;
spdk_nvme_ctrlr_cmd_get_log_page;
spdk_nvme_ctrlr_cmd_get_log_page_ext;
spdk_nvme_ctrlr_cmd_abort;
lib/nvme: Add spdk_nvme_ctrlr_cmd_abort_ext() to abort commands whose cb_arg matches A new API spdk_nvme_ctrlr_cmd_abort_ext() gets cmd_cb_arg as a parameter, and use it to abort requests whose cb_arg matches cmd_cb_arg. The caller can set the parameter qpair to NULL if it wants to abort requests on admin qpair. Hold ctrlr->ctrlr_lock throughout because abort request is submitted to admin qpair. The API is not critical for performance, and so initialize parent data first. The API is for a specific qpair, and so hold SQID in the parent, and it is copied to the children. On the other hand, CID is set to child when request to abort is found. Use an new helper function nvme_transport_qpair_iterate_requests() to add abort request for each outstanding request which has cmd_cb_arg as its callback context. The case is possible such that the request to abort is not outstanding yet but queued. Hence abort queued requests which has cmd_cb_arg as its callback context too, but it is done only if there is no error so far. If only queued requests are aborted and there is no outstanding request to abort, complete with success synchronously. If there is no outstanding or queued request to abort, return -ENOENT. When any abort request is submitted, the difference between success and failure is only bit 0 of CDW0 according to the NVMe specification. We cannot the existing helper functions nvme_request_add_child() and nvme_cb_complete_child() but can use nvme_request_remove_child(). nvme_qpair_submit_request() may use only nvme_request_remove_child() from these three helper functions. Hence we use req->parent as other types of request do. Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Change-Id: I3a271c6542f8e2e6b425b3bf6151f41e924bc200 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/2039 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Community-CI: Broadcom CI Community-CI: Mellanox Build Bot Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
2020-06-14 06:20:25 +00:00
spdk_nvme_ctrlr_cmd_abort_ext;
spdk_nvme_ctrlr_cmd_set_feature;
spdk_nvme_ctrlr_cmd_get_feature;
spdk_nvme_ctrlr_cmd_get_feature_ns;
spdk_nvme_ctrlr_cmd_set_feature_ns;
spdk_nvme_ctrlr_cmd_security_receive;
spdk_nvme_ctrlr_cmd_security_send;
spdk_nvme_ctrlr_security_receive;
spdk_nvme_ctrlr_security_send;
spdk_nvme_ctrlr_cmd_directive_receive;
spdk_nvme_ctrlr_cmd_directive_send;
spdk_nvme_ctrlr_get_flags;
spdk_nvme_ctrlr_attach_ns;
spdk_nvme_ctrlr_detach_ns;
spdk_nvme_ctrlr_create_ns;
spdk_nvme_ctrlr_delete_ns;
spdk_nvme_ctrlr_format;
spdk_nvme_ctrlr_update_firmware;
spdk_nvme_ctrlr_reserve_cmb;
spdk_nvme_ctrlr_map_cmb;
spdk_nvme_ctrlr_unmap_cmb;
spdk_nvme_ctrlr_enable_pmr;
spdk_nvme_ctrlr_disable_pmr;
spdk_nvme_ctrlr_map_pmr;
spdk_nvme_ctrlr_unmap_pmr;
spdk_nvme_ctrlr_read_boot_partition_start;
spdk_nvme_ctrlr_read_boot_partition_poll;
spdk_nvme_ctrlr_write_boot_partition;
spdk_nvme_ctrlr_get_transport_id;
spdk_nvme_ctrlr_alloc_qid;
spdk_nvme_ctrlr_free_qid;
spdk_nvme_ctrlr_set_remove_cb;
spdk_nvme_ctrlr_get_memory_domains;
spdk_nvme_ctrlr_get_discovery_log_page;
spdk_nvme_ctrlr_get_registers;
spdk_nvme_poll_group_create;
spdk_nvme_poll_group_add;
spdk_nvme_poll_group_remove;
spdk_nvme_poll_group_destroy;
spdk_nvme_poll_group_process_completions;
spdk_nvme_poll_group_get_ctx;
spdk_nvme_ns_get_data;
spdk_nvme_ns_get_id;
spdk_nvme_ns_get_ctrlr;
spdk_nvme_ns_is_active;
spdk_nvme_ns_get_max_io_xfer_size;
spdk_nvme_ns_get_sector_size;
spdk_nvme_ns_get_extended_sector_size;
spdk_nvme_ns_get_num_sectors;
spdk_nvme_ns_get_size;
spdk_nvme_ns_get_pi_type;
spdk_nvme_ns_get_md_size;
spdk_nvme_ns_get_format_index;
spdk_nvme_ns_supports_extended_lba;
spdk_nvme_ns_supports_compare;
spdk_nvme_ns_get_dealloc_logical_block_read_value;
spdk_nvme_ns_get_optimal_io_boundary;
spdk_nvme_ns_get_nguid;
spdk_nvme_ns_get_uuid;
spdk_nvme_ns_get_csi;
spdk_nvme_ns_get_flags;
spdk_nvme_ns_get_ana_group_id;
spdk_nvme_ns_get_ana_state;
spdk_nvme_ns_cmd_write;
spdk_nvme_ns_cmd_writev;
spdk_nvme_ns_cmd_writev_with_md;
spdk_nvme_ns_cmd_write_with_md;
spdk_nvme_ns_cmd_write_zeroes;
spdk_nvme_ns_cmd_write_uncorrectable;
spdk_nvme_ns_cmd_read;
spdk_nvme_ns_cmd_readv;
spdk_nvme_ns_cmd_readv_with_md;
spdk_nvme_ns_cmd_read_with_md;
spdk_nvme_ns_cmd_dataset_management;
spdk_nvme_ns_cmd_copy;
spdk_nvme_ns_cmd_flush;
spdk_nvme_ns_cmd_reservation_register;
spdk_nvme_ns_cmd_reservation_release;
spdk_nvme_ns_cmd_reservation_acquire;
spdk_nvme_ns_cmd_reservation_report;
spdk_nvme_ns_cmd_io_mgmt_recv;
spdk_nvme_ns_cmd_io_mgmt_send;
spdk_nvme_ns_cmd_compare;
spdk_nvme_ns_cmd_comparev;
spdk_nvme_ns_cmd_comparev_with_md;
spdk_nvme_ns_cmd_compare_with_md;
spdk_nvme_ns_cmd_writev_ext;
spdk_nvme_ns_cmd_readv_ext;
spdk_nvme_ns_cmd_verify;
spdk_nvme_qpair_get_optimal_poll_group;
spdk_nvme_qpair_process_completions;
spdk_nvme_qpair_get_failure_reason;
spdk_nvme_qpair_add_cmd_error_injection;
spdk_nvme_qpair_remove_cmd_error_injection;
spdk_nvme_qpair_print_command;
spdk_nvme_qpair_print_completion;
spdk_nvme_qpair_get_id;
spdk_nvme_qpair_get_num_outstanding_reqs;
spdk_nvme_print_command;
spdk_nvme_print_completion;
spdk_nvme_cpl_get_status_string;
spdk_nvme_cpl_get_status_type_string;
spdk_nvme_rdma_init_hooks;
spdk_nvme_cuse_get_ctrlr_name;
spdk_nvme_cuse_get_ns_name;
spdk_nvme_cuse_register;
spdk_nvme_cuse_unregister;
spdk_nvme_cuse_update_namespaces;
spdk_nvme_poll_group_get_stats;
spdk_nvme_poll_group_free_stats;
# public functions from nvme_zns.h
spdk_nvme_zns_ns_get_data;
spdk_nvme_zns_ns_get_zone_size_sectors;
spdk_nvme_zns_ns_get_zone_size;
spdk_nvme_zns_ns_get_num_zones;
spdk_nvme_zns_ns_get_max_open_zones;
spdk_nvme_zns_ns_get_max_active_zones;
spdk_nvme_zns_ctrlr_get_data;
spdk_nvme_zns_ctrlr_get_max_zone_append_size;
spdk_nvme_zns_zone_append;
spdk_nvme_zns_zone_append_with_md;
nvme: add support for ZNS zone append vector variant We already have support for spdk_nvme_zns_zone_append(), add support for spdk_nvme_zns_zone_appendv() (zone append with NVME_PAYLOAD_TYPE_SGL). _nvme_ns_cmd_rw() currently performs verification of the SGL, if the parameter check_sgl is set. This parameter is set for all calls with payload of type NVME_PAYLOAD_TYPE_SGL. In order to be able to perform the same check_sgl verfication on zone append vectors, we need to refactor _nvme_ns_cmd_rw() a bit. Setting check_sgl ensures that _nvme_ns_cmd_split_request_sgl() or _nvme_ns_cmd_split_request_prp() gets called. These functions will split an oversized I/O into several different requests. However, they also iterate the SGE entries, verifies that the total payload size, total SGE entries is not too many, and that buffers are properly aligned. A proper request will not get split. For zone append, splitting a request into several is not allowed, however, we still want the verification part to be done, such that (e.g.) a non first/last SGE which is not page aligned, will cause the whole request to be rejected. (In the case of spdk_nvme_ns_cmd_write(), a non first/last SGE which is not page aligned will instead cause the request to be split.) An alternative would be to try to rip out the verification part from _nvme_ns_cmd_split_request_sgl() and _nvme_ns_cmd_split_request_prp(). However, that is non-trivial, and would most likely end up with a lot of duplicated code, which would easily get out of sync. Signed-off-by: Niklas Cassel <niklas.cassel@wdc.com> Change-Id: I2728acdcadeb70b1f0ed628704df19e75d14dcca Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/6248 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Community-CI: Mellanox Build Bot Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
2021-01-26 09:42:58 +00:00
spdk_nvme_zns_zone_appendv;
spdk_nvme_zns_zone_appendv_with_md;
spdk_nvme_zns_close_zone;
spdk_nvme_zns_finish_zone;
spdk_nvme_zns_open_zone;
spdk_nvme_zns_reset_zone;
spdk_nvme_zns_offline_zone;
spdk_nvme_zns_set_zone_desc_ext;
spdk_nvme_zns_report_zones;
spdk_nvme_zns_ext_report_zones;
# public functions from nvme_ocssd.h
spdk_nvme_ctrlr_is_ocssd_supported;
spdk_nvme_ocssd_ctrlr_cmd_geometry;
spdk_nvme_ocssd_ns_cmd_vector_reset;
spdk_nvme_ocssd_ns_cmd_vector_write;
spdk_nvme_ocssd_ns_cmd_vector_write_with_md;
spdk_nvme_ocssd_ns_cmd_vector_read;
spdk_nvme_ocssd_ns_cmd_vector_read_with_md;
spdk_nvme_ocssd_ns_cmd_vector_copy;
# public functions from opal.h
spdk_opal_dev_construct;
spdk_opal_dev_destruct;
spdk_opal_get_d0_features_info;
spdk_opal_cmd_take_ownership;
spdk_opal_cmd_revert_tper;
spdk_opal_cmd_activate_locking_sp;
spdk_opal_cmd_lock_unlock;
spdk_opal_cmd_setup_locking_range;
spdk_opal_cmd_get_max_ranges;
spdk_opal_cmd_get_locking_range_info;
spdk_opal_cmd_enable_user;
spdk_opal_cmd_add_user_to_locking_range;
spdk_opal_cmd_set_new_passwd;
spdk_opal_cmd_erase_locking_range;
spdk_opal_cmd_secure_erase_locking_range;
spdk_opal_get_locking_range_info;
spdk_opal_free_locking_range_info;
local: *;
};