2022-06-03 19:15:11 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
2019-11-18 16:59:36 +00:00
|
|
|
* Copyright (c) Intel Corporation. All rights reserved.
|
|
|
|
* Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
|
2022-01-13 07:03:36 +00:00
|
|
|
* Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
2016-10-10 03:54:38 +00:00
|
|
|
*/
|
|
|
|
|
2017-07-13 04:08:53 +00:00
|
|
|
#ifndef SPDK_BDEV_NVME_H
|
|
|
|
#define SPDK_BDEV_NVME_H
|
2016-10-10 03:54:38 +00:00
|
|
|
|
2017-05-02 18:18:25 +00:00
|
|
|
#include "spdk/stdinc.h"
|
2016-10-25 23:19:53 +00:00
|
|
|
|
2018-05-29 01:45:52 +00:00
|
|
|
#include "spdk/queue.h"
|
2017-01-25 23:05:09 +00:00
|
|
|
#include "spdk/nvme.h"
|
2018-05-29 01:45:52 +00:00
|
|
|
#include "spdk/bdev_module.h"
|
2016-10-10 03:54:38 +00:00
|
|
|
|
2021-09-07 16:13:07 +00:00
|
|
|
TAILQ_HEAD(nvme_bdev_ctrlrs, nvme_bdev_ctrlr);
|
|
|
|
extern struct nvme_bdev_ctrlrs g_nvme_bdev_ctrlrs;
|
2021-08-30 18:30:18 +00:00
|
|
|
extern pthread_mutex_t g_bdev_nvme_mutex;
|
|
|
|
extern bool g_bdev_nvme_module_finish;
|
|
|
|
|
|
|
|
#define NVME_MAX_CONTROLLERS 1024
|
|
|
|
|
2022-04-29 05:37:35 +00:00
|
|
|
enum bdev_nvme_multipath_policy {
|
|
|
|
BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE,
|
|
|
|
BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE,
|
|
|
|
};
|
|
|
|
|
2021-08-30 18:30:18 +00:00
|
|
|
typedef void (*spdk_bdev_create_nvme_fn)(void *ctx, size_t bdev_count, int rc);
|
2022-05-12 09:52:58 +00:00
|
|
|
typedef void (*spdk_bdev_nvme_start_discovery_fn)(void *ctx, int status);
|
2021-12-04 05:49:54 +00:00
|
|
|
typedef void (*spdk_bdev_nvme_stop_discovery_fn)(void *ctx);
|
2021-08-30 18:30:18 +00:00
|
|
|
|
2022-03-04 04:51:53 +00:00
|
|
|
struct nvme_ctrlr_opts {
|
|
|
|
uint32_t prchk_flags;
|
|
|
|
int32_t ctrlr_loss_timeout_sec;
|
|
|
|
uint32_t reconnect_delay_sec;
|
|
|
|
uint32_t fast_io_fail_timeout_sec;
|
2022-03-04 20:43:23 +00:00
|
|
|
bool from_discovery_service;
|
2022-03-04 04:51:53 +00:00
|
|
|
};
|
|
|
|
|
2021-08-30 18:30:18 +00:00
|
|
|
struct nvme_async_probe_ctx {
|
|
|
|
struct spdk_nvme_probe_ctx *probe_ctx;
|
|
|
|
const char *base_name;
|
|
|
|
const char **names;
|
|
|
|
uint32_t count;
|
|
|
|
struct spdk_poller *poller;
|
|
|
|
struct spdk_nvme_transport_id trid;
|
2022-03-04 04:51:53 +00:00
|
|
|
struct nvme_ctrlr_opts bdev_opts;
|
2022-03-08 08:56:35 +00:00
|
|
|
struct spdk_nvme_ctrlr_opts drv_opts;
|
2021-08-30 18:30:18 +00:00
|
|
|
spdk_bdev_create_nvme_fn cb_fn;
|
|
|
|
void *cb_ctx;
|
|
|
|
uint32_t populates_in_progress;
|
|
|
|
bool ctrlr_attached;
|
|
|
|
bool probe_done;
|
|
|
|
bool namespaces_populated;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct nvme_ns {
|
2021-09-02 12:33:32 +00:00
|
|
|
uint32_t id;
|
|
|
|
struct spdk_nvme_ns *ns;
|
|
|
|
struct nvme_ctrlr *ctrlr;
|
|
|
|
struct nvme_bdev *bdev;
|
|
|
|
uint32_t ana_group_id;
|
|
|
|
enum spdk_nvme_ana_state ana_state;
|
2021-10-18 22:01:30 +00:00
|
|
|
bool ana_state_updating;
|
2022-04-07 08:29:05 +00:00
|
|
|
bool ana_transition_timedout;
|
|
|
|
struct spdk_poller *anatt_timer;
|
2021-09-02 12:33:32 +00:00
|
|
|
struct nvme_async_probe_ctx *probe_ctx;
|
2021-09-27 23:15:02 +00:00
|
|
|
TAILQ_ENTRY(nvme_ns) tailq;
|
2021-08-25 16:58:16 +00:00
|
|
|
RB_ENTRY(nvme_ns) node;
|
2021-08-30 18:30:18 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct nvme_bdev_io;
|
2021-09-07 16:13:07 +00:00
|
|
|
struct nvme_bdev_ctrlr;
|
2021-09-27 23:15:02 +00:00
|
|
|
struct nvme_bdev;
|
2021-10-15 08:10:42 +00:00
|
|
|
struct nvme_io_path;
|
2021-08-30 18:30:18 +00:00
|
|
|
|
2021-09-14 18:26:50 +00:00
|
|
|
struct nvme_path_id {
|
2021-08-30 18:30:18 +00:00
|
|
|
struct spdk_nvme_transport_id trid;
|
2021-10-11 20:35:39 +00:00
|
|
|
struct spdk_nvme_host_id hostid;
|
2021-09-14 18:26:50 +00:00
|
|
|
TAILQ_ENTRY(nvme_path_id) link;
|
2021-08-30 18:30:18 +00:00
|
|
|
bool is_failed;
|
|
|
|
};
|
|
|
|
|
2021-10-08 04:14:54 +00:00
|
|
|
typedef void (*bdev_nvme_reset_cb)(void *cb_arg, bool success);
|
2022-01-28 00:34:10 +00:00
|
|
|
typedef void (*nvme_ctrlr_disconnected_cb)(struct nvme_ctrlr *nvme_ctrlr);
|
2021-08-30 18:30:18 +00:00
|
|
|
|
|
|
|
struct nvme_ctrlr {
|
|
|
|
/**
|
|
|
|
* points to pinned, physically contiguous memory region;
|
|
|
|
* contains 4KB IDENTIFY structure for controller which is
|
|
|
|
* target for CONTROLLER IDENTIFY command during initialization
|
|
|
|
*/
|
|
|
|
struct spdk_nvme_ctrlr *ctrlr;
|
2021-09-21 19:17:12 +00:00
|
|
|
struct nvme_path_id *active_path_id;
|
2021-08-30 18:30:18 +00:00
|
|
|
int ref;
|
2021-10-08 03:29:29 +00:00
|
|
|
|
|
|
|
uint32_t resetting : 1;
|
2022-01-13 07:03:36 +00:00
|
|
|
uint32_t reconnect_is_delayed : 1;
|
2022-01-14 02:02:31 +00:00
|
|
|
uint32_t fast_io_fail_timedout : 1;
|
2021-10-08 03:29:29 +00:00
|
|
|
uint32_t destruct : 1;
|
2021-09-16 05:24:37 +00:00
|
|
|
uint32_t ana_log_page_updating : 1;
|
2022-08-09 01:46:48 +00:00
|
|
|
uint32_t io_path_cache_clearing : 1;
|
2022-03-04 04:51:53 +00:00
|
|
|
|
|
|
|
struct nvme_ctrlr_opts opts;
|
|
|
|
|
2021-08-25 16:58:16 +00:00
|
|
|
RB_HEAD(nvme_ns_tree, nvme_ns) namespaces;
|
2021-08-30 18:30:18 +00:00
|
|
|
|
|
|
|
struct spdk_opal_dev *opal_dev;
|
|
|
|
|
|
|
|
struct spdk_poller *adminq_timer_poller;
|
|
|
|
struct spdk_thread *thread;
|
|
|
|
|
|
|
|
bdev_nvme_reset_cb reset_cb_fn;
|
|
|
|
void *reset_cb_arg;
|
2021-07-09 08:51:32 +00:00
|
|
|
/* Poller used to check for reset/detach completion */
|
|
|
|
struct spdk_poller *reset_detach_poller;
|
|
|
|
struct spdk_nvme_detach_ctx *detach_ctx;
|
2021-08-30 18:30:18 +00:00
|
|
|
|
2022-01-13 07:03:36 +00:00
|
|
|
uint64_t reset_start_tsc;
|
|
|
|
struct spdk_poller *reconnect_delay_timer;
|
|
|
|
|
2022-01-28 00:34:10 +00:00
|
|
|
nvme_ctrlr_disconnected_cb disconnected_cb;
|
|
|
|
|
2021-08-30 18:30:18 +00:00
|
|
|
/** linked list pointer for device list */
|
|
|
|
TAILQ_ENTRY(nvme_ctrlr) tailq;
|
2021-09-07 16:13:07 +00:00
|
|
|
struct nvme_bdev_ctrlr *nbdev_ctrlr;
|
2021-08-30 18:30:18 +00:00
|
|
|
|
2021-09-14 18:26:50 +00:00
|
|
|
TAILQ_HEAD(nvme_paths, nvme_path_id) trids;
|
2021-08-30 18:30:18 +00:00
|
|
|
|
2022-07-13 04:57:25 +00:00
|
|
|
uint32_t max_ana_log_page_size;
|
2021-08-30 18:30:18 +00:00
|
|
|
struct spdk_nvme_ana_page *ana_log_page;
|
|
|
|
struct spdk_nvme_ana_group_descriptor *copied_ana_desc;
|
|
|
|
|
|
|
|
struct nvme_async_probe_ctx *probe_ctx;
|
|
|
|
|
|
|
|
pthread_mutex_t mutex;
|
|
|
|
};
|
|
|
|
|
2021-09-07 16:13:07 +00:00
|
|
|
struct nvme_bdev_ctrlr {
|
|
|
|
char *name;
|
|
|
|
TAILQ_HEAD(, nvme_ctrlr) ctrlrs;
|
2021-09-27 23:15:02 +00:00
|
|
|
TAILQ_HEAD(, nvme_bdev) bdevs;
|
2021-09-07 16:13:07 +00:00
|
|
|
TAILQ_ENTRY(nvme_bdev_ctrlr) tailq;
|
|
|
|
};
|
|
|
|
|
2021-08-30 18:30:18 +00:00
|
|
|
struct nvme_bdev {
|
2022-04-29 05:37:35 +00:00
|
|
|
struct spdk_bdev disk;
|
|
|
|
uint32_t nsid;
|
|
|
|
struct nvme_bdev_ctrlr *nbdev_ctrlr;
|
|
|
|
pthread_mutex_t mutex;
|
|
|
|
int ref;
|
|
|
|
enum bdev_nvme_multipath_policy mp_policy;
|
|
|
|
TAILQ_HEAD(, nvme_ns) nvme_ns_list;
|
|
|
|
bool opal;
|
|
|
|
TAILQ_ENTRY(nvme_bdev) tailq;
|
2021-08-30 18:30:18 +00:00
|
|
|
};
|
|
|
|
|
2022-03-09 06:44:18 +00:00
|
|
|
struct nvme_qpair {
|
|
|
|
struct nvme_ctrlr *ctrlr;
|
2021-08-30 18:30:18 +00:00
|
|
|
struct spdk_nvme_qpair *qpair;
|
|
|
|
struct nvme_poll_group *group;
|
2022-03-09 06:44:18 +00:00
|
|
|
struct nvme_ctrlr_channel *ctrlr_ch;
|
2021-10-15 08:10:42 +00:00
|
|
|
|
|
|
|
/* The following is used to update io_path cache of nvme_bdev_channels. */
|
|
|
|
TAILQ_HEAD(, nvme_io_path) io_path_list;
|
|
|
|
|
2022-03-09 06:44:18 +00:00
|
|
|
TAILQ_ENTRY(nvme_qpair) tailq;
|
2021-08-30 18:30:18 +00:00
|
|
|
};
|
|
|
|
|
2022-03-09 06:44:18 +00:00
|
|
|
struct nvme_ctrlr_channel {
|
|
|
|
struct nvme_qpair *qpair;
|
|
|
|
TAILQ_HEAD(, spdk_bdev_io) pending_resets;
|
2022-03-09 06:53:15 +00:00
|
|
|
|
|
|
|
struct spdk_io_channel_iter *reset_iter;
|
2022-03-09 06:44:18 +00:00
|
|
|
};
|
2021-08-11 08:11:19 +00:00
|
|
|
|
2021-09-27 23:15:02 +00:00
|
|
|
struct nvme_io_path {
|
2021-08-30 18:30:18 +00:00
|
|
|
struct nvme_ns *nvme_ns;
|
2022-03-09 06:44:18 +00:00
|
|
|
struct nvme_qpair *qpair;
|
2021-09-27 23:15:02 +00:00
|
|
|
STAILQ_ENTRY(nvme_io_path) stailq;
|
2021-10-15 08:10:42 +00:00
|
|
|
|
|
|
|
/* The following are used to update io_path cache of the nvme_bdev_channel. */
|
|
|
|
struct nvme_bdev_channel *nbdev_ch;
|
|
|
|
TAILQ_ENTRY(nvme_io_path) tailq;
|
2021-09-27 23:15:02 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct nvme_bdev_channel {
|
2021-10-15 10:28:09 +00:00
|
|
|
struct nvme_io_path *current_io_path;
|
2022-04-29 05:37:35 +00:00
|
|
|
enum bdev_nvme_multipath_policy mp_policy;
|
2021-10-07 22:11:17 +00:00
|
|
|
STAILQ_HEAD(, nvme_io_path) io_path_list;
|
|
|
|
TAILQ_HEAD(retry_io_head, spdk_bdev_io) retry_io_list;
|
|
|
|
struct spdk_poller *retry_io_poller;
|
2021-08-30 18:30:18 +00:00
|
|
|
};
|
|
|
|
|
2021-09-15 03:10:35 +00:00
|
|
|
struct nvme_poll_group {
|
|
|
|
struct spdk_nvme_poll_group *group;
|
|
|
|
struct spdk_io_channel *accel_channel;
|
|
|
|
struct spdk_poller *poller;
|
|
|
|
bool collect_spin_stat;
|
|
|
|
uint64_t spin_ticks;
|
|
|
|
uint64_t start_ticks;
|
|
|
|
uint64_t end_ticks;
|
2022-03-09 06:44:18 +00:00
|
|
|
TAILQ_HEAD(, nvme_qpair) qpair_list;
|
2021-09-15 03:10:35 +00:00
|
|
|
};
|
|
|
|
|
2022-04-06 02:47:33 +00:00
|
|
|
void nvme_io_path_info_json(struct spdk_json_write_ctx *w, struct nvme_io_path *io_path);
|
|
|
|
|
2021-08-30 18:30:18 +00:00
|
|
|
struct nvme_ctrlr *nvme_ctrlr_get_by_name(const char *name);
|
|
|
|
|
2021-11-10 02:05:00 +00:00
|
|
|
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr_get_by_name(const char *name);
|
2021-08-30 18:30:18 +00:00
|
|
|
|
2021-11-10 02:05:00 +00:00
|
|
|
typedef void (*nvme_bdev_ctrlr_for_each_fn)(struct nvme_bdev_ctrlr *nbdev_ctrlr, void *ctx);
|
|
|
|
|
|
|
|
void nvme_bdev_ctrlr_for_each(nvme_bdev_ctrlr_for_each_fn fn, void *ctx);
|
2021-08-30 18:30:18 +00:00
|
|
|
|
|
|
|
void nvme_bdev_dump_trid_json(const struct spdk_nvme_transport_id *trid,
|
|
|
|
struct spdk_json_write_ctx *w);
|
|
|
|
|
2022-04-20 05:11:04 +00:00
|
|
|
void nvme_ctrlr_info_json(struct spdk_json_write_ctx *w, struct nvme_ctrlr *nvme_ctrlr);
|
|
|
|
|
2021-08-30 18:30:18 +00:00
|
|
|
struct nvme_ns *nvme_ctrlr_get_ns(struct nvme_ctrlr *nvme_ctrlr, uint32_t nsid);
|
|
|
|
struct nvme_ns *nvme_ctrlr_get_first_active_ns(struct nvme_ctrlr *nvme_ctrlr);
|
|
|
|
struct nvme_ns *nvme_ctrlr_get_next_active_ns(struct nvme_ctrlr *nvme_ctrlr, struct nvme_ns *ns);
|
2019-09-13 09:45:53 +00:00
|
|
|
|
2018-07-09 21:04:33 +00:00
|
|
|
enum spdk_bdev_timeout_action {
|
|
|
|
SPDK_BDEV_NVME_TIMEOUT_ACTION_NONE = 0,
|
|
|
|
SPDK_BDEV_NVME_TIMEOUT_ACTION_RESET,
|
|
|
|
SPDK_BDEV_NVME_TIMEOUT_ACTION_ABORT,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct spdk_bdev_nvme_opts {
|
|
|
|
enum spdk_bdev_timeout_action action_on_timeout;
|
|
|
|
uint64_t timeout_us;
|
2021-05-26 20:43:22 +00:00
|
|
|
uint64_t timeout_admin_us;
|
2020-11-27 16:10:36 +00:00
|
|
|
uint32_t keep_alive_timeout_ms;
|
2021-10-18 19:33:01 +00:00
|
|
|
/* The number of attempts per I/O in the transport layer before an I/O fails. */
|
|
|
|
uint32_t transport_retry_count;
|
2019-09-03 03:48:49 +00:00
|
|
|
uint32_t arbitration_burst;
|
|
|
|
uint32_t low_priority_weight;
|
|
|
|
uint32_t medium_priority_weight;
|
|
|
|
uint32_t high_priority_weight;
|
2018-07-09 21:04:33 +00:00
|
|
|
uint64_t nvme_adminq_poll_period_us;
|
2019-03-11 22:26:53 +00:00
|
|
|
uint64_t nvme_ioq_poll_period_us;
|
2019-07-10 05:13:31 +00:00
|
|
|
uint32_t io_queue_requests;
|
2019-11-18 16:59:36 +00:00
|
|
|
bool delay_cmd_submit;
|
2021-10-25 02:59:46 +00:00
|
|
|
/* The number of attempts per I/O in the bdev layer before an I/O fails. */
|
|
|
|
int32_t bdev_retry_count;
|
2022-01-20 12:22:06 +00:00
|
|
|
uint8_t transport_ack_timeout;
|
2022-03-09 12:04:14 +00:00
|
|
|
int32_t ctrlr_loss_timeout_sec;
|
|
|
|
uint32_t reconnect_delay_sec;
|
|
|
|
uint32_t fast_io_fail_timeout_sec;
|
2022-05-02 03:07:02 +00:00
|
|
|
bool disable_auto_failback;
|
2018-07-09 21:04:33 +00:00
|
|
|
};
|
|
|
|
|
2021-07-07 01:02:14 +00:00
|
|
|
struct spdk_nvme_qpair *bdev_nvme_get_io_qpair(struct spdk_io_channel *ctrlr_io_ch);
|
2020-05-10 07:46:07 +00:00
|
|
|
void bdev_nvme_get_opts(struct spdk_bdev_nvme_opts *opts);
|
|
|
|
int bdev_nvme_set_opts(const struct spdk_bdev_nvme_opts *opts);
|
|
|
|
int bdev_nvme_set_hotplug(bool enabled, uint64_t period_us, spdk_msg_fn cb, void *cb_ctx);
|
2018-07-09 21:04:33 +00:00
|
|
|
|
2022-03-09 12:04:14 +00:00
|
|
|
void bdev_nvme_get_default_ctrlr_opts(struct nvme_ctrlr_opts *opts);
|
|
|
|
|
2020-05-10 07:46:07 +00:00
|
|
|
int bdev_nvme_create(struct spdk_nvme_transport_id *trid,
|
|
|
|
const char *base_name,
|
|
|
|
const char **names,
|
|
|
|
uint32_t count,
|
|
|
|
spdk_bdev_create_nvme_fn cb_fn,
|
2021-01-21 11:53:16 +00:00
|
|
|
void *cb_ctx,
|
2022-03-08 08:56:35 +00:00
|
|
|
struct spdk_nvme_ctrlr_opts *drv_opts,
|
2022-03-04 04:51:53 +00:00
|
|
|
struct nvme_ctrlr_opts *bdev_opts,
|
|
|
|
bool multipath);
|
2021-12-04 05:49:54 +00:00
|
|
|
|
|
|
|
int bdev_nvme_start_discovery(struct spdk_nvme_transport_id *trid, const char *base_name,
|
2022-03-25 19:08:00 +00:00
|
|
|
struct spdk_nvme_ctrlr_opts *drv_opts, struct nvme_ctrlr_opts *bdev_opts,
|
2022-05-12 09:52:58 +00:00
|
|
|
uint64_t timeout, spdk_bdev_nvme_start_discovery_fn cb_fn, void *cb_ctx);
|
2021-12-22 22:33:25 +00:00
|
|
|
int bdev_nvme_stop_discovery(const char *name, spdk_bdev_nvme_stop_discovery_fn cb_fn,
|
|
|
|
void *cb_ctx);
|
2022-04-26 10:01:49 +00:00
|
|
|
void bdev_nvme_get_discovery_info(struct spdk_json_write_ctx *w);
|
2021-12-04 05:49:54 +00:00
|
|
|
|
2020-05-10 07:46:07 +00:00
|
|
|
struct spdk_nvme_ctrlr *bdev_nvme_get_ctrlr(struct spdk_bdev *bdev);
|
2016-10-10 03:54:38 +00:00
|
|
|
|
2018-07-26 12:54:20 +00:00
|
|
|
/**
|
2021-03-03 16:32:54 +00:00
|
|
|
* Delete NVMe controller with all bdevs on top of it, or delete the specified path
|
|
|
|
* if there is any alternative path. Requires to pass name of NVMe controller.
|
2018-07-26 12:54:20 +00:00
|
|
|
*
|
|
|
|
* \param name NVMe controller name
|
2021-10-11 19:29:36 +00:00
|
|
|
* \param path_id The specified path to remove (optional)
|
2018-07-26 12:54:20 +00:00
|
|
|
* \return zero on success, -EINVAL on wrong parameters or -ENODEV if controller is not found
|
|
|
|
*/
|
2021-10-11 19:29:36 +00:00
|
|
|
int bdev_nvme_delete(const char *name, const struct nvme_path_id *path_id);
|
2018-07-26 12:54:20 +00:00
|
|
|
|
2021-06-16 17:24:56 +00:00
|
|
|
/**
|
|
|
|
* Reset NVMe controller.
|
|
|
|
*
|
|
|
|
* \param nvme_ctrlr The specified NVMe controller to reset
|
|
|
|
* \param cb_fn Function to be called back after reset completes
|
|
|
|
* \param cb_arg Argument for callback function
|
|
|
|
* \return zero on success. Negated errno on the following error conditions:
|
2021-07-20 15:36:17 +00:00
|
|
|
* -ENXIO: controller is being destroyed.
|
|
|
|
* -EBUSY: controller is already being reset.
|
2021-06-16 17:24:56 +00:00
|
|
|
*/
|
|
|
|
int bdev_nvme_reset_rpc(struct nvme_ctrlr *nvme_ctrlr, bdev_nvme_reset_cb cb_fn, void *cb_arg);
|
|
|
|
|
2022-04-13 21:23:40 +00:00
|
|
|
typedef void (*bdev_nvme_set_preferred_path_cb)(void *cb_arg, int rc);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Set the preferred I/O path for an NVMe bdev in multipath mode.
|
|
|
|
*
|
|
|
|
* NOTE: This function does not support NVMe bdevs in failover mode.
|
|
|
|
*
|
|
|
|
* \param name NVMe bdev name
|
|
|
|
* \param cntlid NVMe-oF controller ID
|
|
|
|
* \param cb_fn Function to be called back after completion.
|
|
|
|
* \param cb_arg Argument for callback function.
|
|
|
|
*/
|
|
|
|
void bdev_nvme_set_preferred_path(const char *name, uint16_t cntlid,
|
|
|
|
bdev_nvme_set_preferred_path_cb cb_fn, void *cb_arg);
|
|
|
|
|
2022-04-29 05:37:35 +00:00
|
|
|
typedef void (*bdev_nvme_set_multipath_policy_cb)(void *cb_arg, int rc);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Set multipath policy of the NVMe bdev.
|
|
|
|
*
|
|
|
|
* \param name NVMe bdev name
|
|
|
|
* \param policy Multipath policy (active-passive or active-active)
|
|
|
|
* \param cb_fn Function to be called back after completion.
|
|
|
|
*/
|
|
|
|
void bdev_nvme_set_multipath_policy(const char *name,
|
|
|
|
enum bdev_nvme_multipath_policy policy,
|
|
|
|
bdev_nvme_set_multipath_policy_cb cb_fn,
|
|
|
|
void *cb_arg);
|
|
|
|
|
2018-10-29 02:45:42 +00:00
|
|
|
#endif /* SPDK_BDEV_NVME_H */
|