2016-07-20 18:16:23 +00:00
|
|
|
/*-
|
|
|
|
* BSD LICENSE
|
|
|
|
*
|
2019-10-09 06:40:31 +00:00
|
|
|
* Copyright (c) Intel Corporation. All rights reserved.
|
|
|
|
* Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
|
2021-08-06 05:31:02 +00:00
|
|
|
* Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
2016-07-20 18:16:23 +00:00
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
*
|
|
|
|
* * Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* * Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in
|
|
|
|
* the documentation and/or other materials provided with the
|
|
|
|
* distribution.
|
|
|
|
* * Neither the name of Intel Corporation nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2017-05-02 18:18:25 +00:00
|
|
|
#include "spdk/stdinc.h"
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2017-07-13 04:08:53 +00:00
|
|
|
#include "bdev_nvme.h"
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2021-03-08 17:43:10 +00:00
|
|
|
#include "spdk/accel_engine.h"
|
2018-09-27 19:38:15 +00:00
|
|
|
#include "spdk/config.h"
|
2016-08-22 22:23:38 +00:00
|
|
|
#include "spdk/endian.h"
|
2016-07-20 18:16:23 +00:00
|
|
|
#include "spdk/bdev.h"
|
2016-11-18 17:22:58 +00:00
|
|
|
#include "spdk/json.h"
|
2016-07-20 18:16:23 +00:00
|
|
|
#include "spdk/nvme.h"
|
2019-03-08 14:28:17 +00:00
|
|
|
#include "spdk/nvme_ocssd.h"
|
2021-03-03 18:38:38 +00:00
|
|
|
#include "spdk/nvme_zns.h"
|
2018-06-11 20:32:15 +00:00
|
|
|
#include "spdk/thread.h"
|
2016-12-05 20:59:39 +00:00
|
|
|
#include "spdk/string.h"
|
2017-09-19 17:59:58 +00:00
|
|
|
#include "spdk/util.h"
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2018-05-23 21:01:03 +00:00
|
|
|
#include "spdk/bdev_module.h"
|
2020-10-06 16:16:26 +00:00
|
|
|
#include "spdk/log.h"
|
2016-11-07 22:10:28 +00:00
|
|
|
|
2019-11-18 16:59:36 +00:00
|
|
|
#define SPDK_BDEV_NVME_DEFAULT_DELAY_CMD_SUBMIT true
|
2020-11-27 16:10:36 +00:00
|
|
|
#define SPDK_BDEV_NVME_DEFAULT_KEEP_ALIVE_TIMEOUT_IN_MS (10000)
|
2019-11-18 16:59:36 +00:00
|
|
|
|
2018-04-05 15:43:55 +00:00
|
|
|
static int bdev_nvme_config_json(struct spdk_json_write_ctx *w);
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2017-02-28 17:51:25 +00:00
|
|
|
struct nvme_bdev_io {
|
2016-10-04 14:39:27 +00:00
|
|
|
/** array of iovecs to transfer. */
|
|
|
|
struct iovec *iovs;
|
|
|
|
|
|
|
|
/** Number of iovecs in iovs array. */
|
|
|
|
int iovcnt;
|
|
|
|
|
|
|
|
/** Current iovec position. */
|
|
|
|
int iovpos;
|
|
|
|
|
|
|
|
/** Offset in current iovec. */
|
|
|
|
uint32_t iov_offset;
|
2017-05-13 20:12:13 +00:00
|
|
|
|
2020-01-16 10:10:53 +00:00
|
|
|
/** array of iovecs to transfer. */
|
|
|
|
struct iovec *fused_iovs;
|
|
|
|
|
|
|
|
/** Number of iovecs in iovs array. */
|
|
|
|
int fused_iovcnt;
|
|
|
|
|
|
|
|
/** Current iovec position. */
|
|
|
|
int fused_iovpos;
|
|
|
|
|
|
|
|
/** Offset in current iovec. */
|
|
|
|
uint32_t fused_iov_offset;
|
|
|
|
|
2020-01-15 10:05:18 +00:00
|
|
|
/** Saved status for admin passthru completion event, PI error verification, or intermediate compare-and-write status */
|
2017-05-13 20:12:13 +00:00
|
|
|
struct spdk_nvme_cpl cpl;
|
2021-01-09 15:10:18 +00:00
|
|
|
/** Extended IO opts passed by the user to bdev layer and mapped to NVME format */
|
|
|
|
struct spdk_nvme_ns_cmd_ext_io_opts ext_opts;
|
2017-05-13 20:12:13 +00:00
|
|
|
|
2017-06-15 20:51:31 +00:00
|
|
|
/** Originating thread */
|
|
|
|
struct spdk_thread *orig_thread;
|
2019-12-20 11:29:48 +00:00
|
|
|
|
2020-01-15 10:05:18 +00:00
|
|
|
/** Keeps track if first of fused commands was submitted */
|
2019-12-20 11:29:48 +00:00
|
|
|
bool first_fused_submitted;
|
2021-03-03 18:38:38 +00:00
|
|
|
|
|
|
|
/** Temporary pointer to zone report buffer */
|
|
|
|
struct spdk_nvme_zns_zone_report *zone_report_buf;
|
|
|
|
|
|
|
|
/** Keep track of how many zones that have been copied to the spdk_bdev_zone_info struct */
|
|
|
|
uint64_t handled_zones;
|
2016-07-20 18:16:23 +00:00
|
|
|
};
|
|
|
|
|
2017-01-25 23:05:09 +00:00
|
|
|
struct nvme_probe_ctx {
|
2017-01-25 23:36:40 +00:00
|
|
|
size_t count;
|
|
|
|
struct spdk_nvme_transport_id trids[NVME_MAX_CONTROLLERS];
|
2018-12-04 22:09:31 +00:00
|
|
|
struct spdk_nvme_host_id hostids[NVME_MAX_CONTROLLERS];
|
2017-03-01 20:35:59 +00:00
|
|
|
const char *names[NVME_MAX_CONTROLLERS];
|
2019-02-10 08:20:40 +00:00
|
|
|
uint32_t prchk_flags[NVME_MAX_CONTROLLERS];
|
2018-06-19 22:00:54 +00:00
|
|
|
const char *hostnqn;
|
2017-01-25 23:05:09 +00:00
|
|
|
};
|
|
|
|
|
2019-02-13 05:08:22 +00:00
|
|
|
struct nvme_probe_skip_entry {
|
|
|
|
struct spdk_nvme_transport_id trid;
|
|
|
|
TAILQ_ENTRY(nvme_probe_skip_entry) tailq;
|
|
|
|
};
|
|
|
|
/* All the controllers deleted by users via RPC are skipped by hotplug monitor */
|
|
|
|
static TAILQ_HEAD(, nvme_probe_skip_entry) g_skipped_nvme_ctrlrs = TAILQ_HEAD_INITIALIZER(
|
|
|
|
g_skipped_nvme_ctrlrs);
|
|
|
|
|
2018-07-09 21:04:33 +00:00
|
|
|
static struct spdk_bdev_nvme_opts g_opts = {
|
|
|
|
.action_on_timeout = SPDK_BDEV_NVME_TIMEOUT_ACTION_NONE,
|
|
|
|
.timeout_us = 0,
|
2021-05-26 20:43:22 +00:00
|
|
|
.timeout_admin_us = 0,
|
2020-11-27 16:10:36 +00:00
|
|
|
.keep_alive_timeout_ms = SPDK_BDEV_NVME_DEFAULT_KEEP_ALIVE_TIMEOUT_IN_MS,
|
2019-08-08 04:31:28 +00:00
|
|
|
.retry_count = 4,
|
2019-09-03 03:48:49 +00:00
|
|
|
.arbitration_burst = 0,
|
|
|
|
.low_priority_weight = 0,
|
|
|
|
.medium_priority_weight = 0,
|
|
|
|
.high_priority_weight = 0,
|
2020-03-20 11:18:05 +00:00
|
|
|
.nvme_adminq_poll_period_us = 10000ULL,
|
2019-03-11 22:26:53 +00:00
|
|
|
.nvme_ioq_poll_period_us = 0,
|
2019-07-10 05:13:31 +00:00
|
|
|
.io_queue_requests = 0,
|
2019-11-18 16:59:36 +00:00
|
|
|
.delay_cmd_submit = SPDK_BDEV_NVME_DEFAULT_DELAY_CMD_SUBMIT,
|
2017-03-30 19:49:52 +00:00
|
|
|
};
|
|
|
|
|
2018-07-12 12:26:19 +00:00
|
|
|
#define NVME_HOTPLUG_POLL_PERIOD_MAX 10000000ULL
|
|
|
|
#define NVME_HOTPLUG_POLL_PERIOD_DEFAULT 100000ULL
|
2018-07-09 21:04:33 +00:00
|
|
|
|
2017-03-16 15:33:27 +00:00
|
|
|
static int g_hot_insert_nvme_controller_index = 0;
|
2018-07-12 12:26:19 +00:00
|
|
|
static uint64_t g_nvme_hotplug_poll_period_us = NVME_HOTPLUG_POLL_PERIOD_DEFAULT;
|
2018-07-09 21:04:33 +00:00
|
|
|
static bool g_nvme_hotplug_enabled = false;
|
2018-07-12 12:26:19 +00:00
|
|
|
static struct spdk_thread *g_bdev_nvme_init_thread;
|
2017-11-17 21:49:36 +00:00
|
|
|
static struct spdk_poller *g_hotplug_poller;
|
2020-12-18 13:10:56 +00:00
|
|
|
static struct spdk_poller *g_hotplug_probe_poller;
|
2019-03-05 07:32:34 +00:00
|
|
|
static struct spdk_nvme_probe_ctx *g_hotplug_probe_ctx;
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
static void nvme_ctrlr_populate_namespaces(struct nvme_ctrlr *nvme_ctrlr,
|
2019-11-26 16:55:40 +00:00
|
|
|
struct nvme_async_probe_ctx *ctx);
|
2021-07-06 19:42:41 +00:00
|
|
|
static void nvme_ctrlr_populate_namespaces_done(struct nvme_ctrlr *nvme_ctrlr,
|
2021-03-01 22:39:39 +00:00
|
|
|
struct nvme_async_probe_ctx *ctx);
|
2017-07-13 17:36:19 +00:00
|
|
|
static int bdev_nvme_library_init(void);
|
2017-02-28 17:51:25 +00:00
|
|
|
static void bdev_nvme_library_fini(void);
|
2020-11-23 04:48:51 +00:00
|
|
|
static int bdev_nvme_readv(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
|
2019-02-05 01:47:25 +00:00
|
|
|
struct nvme_bdev_io *bio,
|
2020-09-23 20:37:27 +00:00
|
|
|
struct iovec *iov, int iovcnt, void *md, uint64_t lba_count, uint64_t lba,
|
2021-08-25 03:18:56 +00:00
|
|
|
uint32_t flags, struct spdk_bdev_ext_io_opts *ext_opts);
|
2020-11-23 04:48:51 +00:00
|
|
|
static int bdev_nvme_no_pi_readv(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
|
2019-02-08 05:42:18 +00:00
|
|
|
struct nvme_bdev_io *bio,
|
2019-04-16 08:17:08 +00:00
|
|
|
struct iovec *iov, int iovcnt, void *md, uint64_t lba_count, uint64_t lba);
|
2020-11-23 04:48:51 +00:00
|
|
|
static int bdev_nvme_writev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
|
2019-02-05 01:47:25 +00:00
|
|
|
struct nvme_bdev_io *bio,
|
2020-09-23 20:37:27 +00:00
|
|
|
struct iovec *iov, int iovcnt, void *md, uint64_t lba_count, uint64_t lba,
|
2021-08-25 03:18:56 +00:00
|
|
|
uint32_t flags, struct spdk_bdev_ext_io_opts *ext_opts);
|
2021-03-03 18:38:38 +00:00
|
|
|
static int bdev_nvme_zone_appendv(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
|
|
|
|
struct nvme_bdev_io *bio,
|
|
|
|
struct iovec *iov, int iovcnt, void *md, uint64_t lba_count,
|
|
|
|
uint64_t zslba, uint32_t flags);
|
2020-11-23 04:48:51 +00:00
|
|
|
static int bdev_nvme_comparev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
|
2019-12-13 08:40:54 +00:00
|
|
|
struct nvme_bdev_io *bio,
|
2020-09-23 20:37:27 +00:00
|
|
|
struct iovec *iov, int iovcnt, void *md, uint64_t lba_count, uint64_t lba,
|
|
|
|
uint32_t flags);
|
2020-11-23 04:48:51 +00:00
|
|
|
static int bdev_nvme_comparev_and_writev(struct spdk_nvme_ns *ns,
|
|
|
|
struct spdk_nvme_qpair *qpair,
|
2020-01-16 10:10:53 +00:00
|
|
|
struct nvme_bdev_io *bio, struct iovec *cmp_iov, int cmp_iovcnt, struct iovec *write_iov,
|
2020-09-23 20:37:27 +00:00
|
|
|
int write_iovcnt, void *md, uint64_t lba_count, uint64_t lba,
|
|
|
|
uint32_t flags);
|
2021-03-03 18:38:38 +00:00
|
|
|
static int bdev_nvme_get_zone_info(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
|
|
|
|
struct nvme_bdev_io *bio, uint64_t zone_id, uint32_t num_zones,
|
|
|
|
struct spdk_bdev_zone_info *info);
|
|
|
|
static int bdev_nvme_zone_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
|
|
|
|
struct nvme_bdev_io *bio, uint64_t zone_id,
|
|
|
|
enum spdk_bdev_zone_action action);
|
2021-07-06 17:35:01 +00:00
|
|
|
static int bdev_nvme_admin_passthru(struct nvme_bdev_channel *nbdev_ch,
|
2017-06-15 20:51:31 +00:00
|
|
|
struct nvme_bdev_io *bio,
|
2017-05-13 20:12:13 +00:00
|
|
|
struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes);
|
2020-11-23 04:48:51 +00:00
|
|
|
static int bdev_nvme_io_passthru(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
|
2017-06-05 18:02:09 +00:00
|
|
|
struct nvme_bdev_io *bio,
|
|
|
|
struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes);
|
2020-11-23 04:48:51 +00:00
|
|
|
static int bdev_nvme_io_passthru_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
|
2017-11-14 06:33:11 +00:00
|
|
|
struct nvme_bdev_io *bio,
|
|
|
|
struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes, void *md_buf, size_t md_len);
|
2021-07-06 17:35:01 +00:00
|
|
|
static int bdev_nvme_abort(struct nvme_bdev_channel *nbdev_ch,
|
2020-07-08 08:03:49 +00:00
|
|
|
struct nvme_bdev_io *bio, struct nvme_bdev_io *bio_to_abort);
|
2021-07-06 17:35:01 +00:00
|
|
|
static int bdev_nvme_reset_io(struct nvme_bdev_channel *nbdev_ch, struct nvme_bdev_io *bio);
|
2021-07-06 19:42:41 +00:00
|
|
|
static int bdev_nvme_failover(struct nvme_ctrlr *nvme_ctrlr, bool remove);
|
2020-12-07 15:42:00 +00:00
|
|
|
static void remove_cb(void *cb_ctx, struct spdk_nvme_ctrlr *ctrlr);
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2018-07-05 07:46:48 +00:00
|
|
|
struct spdk_nvme_qpair *
|
2021-07-07 01:02:14 +00:00
|
|
|
bdev_nvme_get_io_qpair(struct spdk_io_channel *ctrlr_io_ch)
|
2018-07-05 07:46:48 +00:00
|
|
|
{
|
2021-07-07 01:02:14 +00:00
|
|
|
struct nvme_ctrlr_channel *ctrlr_ch;
|
2018-07-05 07:46:48 +00:00
|
|
|
|
2021-07-07 01:02:14 +00:00
|
|
|
assert(ctrlr_io_ch != NULL);
|
2021-03-17 01:18:36 +00:00
|
|
|
|
2021-07-07 01:02:14 +00:00
|
|
|
ctrlr_ch = spdk_io_channel_get_ctx(ctrlr_io_ch);
|
2018-07-05 07:46:48 +00:00
|
|
|
|
2021-07-07 01:02:14 +00:00
|
|
|
return ctrlr_ch->qpair;
|
2018-07-05 07:46:48 +00:00
|
|
|
}
|
|
|
|
|
2016-07-20 18:16:23 +00:00
|
|
|
static int
|
2017-02-28 17:51:25 +00:00
|
|
|
bdev_nvme_get_ctx_size(void)
|
2016-07-20 18:16:23 +00:00
|
|
|
{
|
2017-02-28 17:51:25 +00:00
|
|
|
return sizeof(struct nvme_bdev_io);
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
2018-03-09 22:20:21 +00:00
|
|
|
static struct spdk_bdev_module nvme_if = {
|
2018-03-06 18:52:46 +00:00
|
|
|
.name = "nvme",
|
2020-03-05 10:45:00 +00:00
|
|
|
.async_fini = true,
|
2018-03-06 18:52:46 +00:00
|
|
|
.module_init = bdev_nvme_library_init,
|
|
|
|
.module_fini = bdev_nvme_library_fini,
|
2018-04-05 15:43:55 +00:00
|
|
|
.config_json = bdev_nvme_config_json,
|
2018-03-06 18:52:46 +00:00
|
|
|
.get_ctx_size = bdev_nvme_get_ctx_size,
|
|
|
|
|
|
|
|
};
|
2019-02-05 10:46:48 +00:00
|
|
|
SPDK_BDEV_MODULE_REGISTER(nvme, &nvme_if)
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2021-06-01 17:10:38 +00:00
|
|
|
static inline bool
|
2021-07-06 17:35:01 +00:00
|
|
|
bdev_nvme_find_io_path(struct nvme_bdev_channel *nbdev_ch,
|
2021-06-01 17:49:43 +00:00
|
|
|
struct spdk_nvme_ns **_ns, struct spdk_nvme_qpair **_qpair)
|
2021-06-01 17:10:38 +00:00
|
|
|
{
|
2021-07-06 17:35:01 +00:00
|
|
|
if (spdk_unlikely(nbdev_ch->ctrlr_ch->qpair == NULL)) {
|
2021-06-01 17:10:38 +00:00
|
|
|
/* The device is currently resetting. */
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-07-06 17:35:01 +00:00
|
|
|
*_ns = nbdev_ch->nvme_ns->ns;
|
|
|
|
*_qpair = nbdev_ch->ctrlr_ch->qpair;
|
2021-06-01 17:10:38 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool
|
2021-07-06 17:35:01 +00:00
|
|
|
bdev_nvme_find_admin_path(struct nvme_bdev_channel *nbdev_ch,
|
2021-07-06 19:42:41 +00:00
|
|
|
struct nvme_ctrlr **_nvme_ctrlr)
|
2021-06-01 17:10:38 +00:00
|
|
|
{
|
2021-07-06 17:35:01 +00:00
|
|
|
*_nvme_ctrlr = nbdev_ch->ctrlr_ch->ctrlr;
|
2021-06-01 17:10:38 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-05-12 20:56:42 +00:00
|
|
|
static inline void
|
|
|
|
bdev_nvme_io_complete_nvme_status(struct nvme_bdev_io *bio,
|
|
|
|
const struct spdk_nvme_cpl *cpl)
|
|
|
|
{
|
2021-05-28 01:49:18 +00:00
|
|
|
spdk_bdev_io_complete_nvme_status(spdk_bdev_io_from_ctx(bio), cpl->cdw0,
|
|
|
|
cpl->status.sct, cpl->status.sc);
|
2021-05-12 20:56:42 +00:00
|
|
|
}
|
|
|
|
|
2021-04-20 22:54:59 +00:00
|
|
|
static inline void
|
|
|
|
bdev_nvme_io_complete(struct nvme_bdev_io *bio, int rc)
|
|
|
|
{
|
|
|
|
enum spdk_bdev_io_status io_status;
|
|
|
|
|
|
|
|
if (rc == 0) {
|
|
|
|
io_status = SPDK_BDEV_IO_STATUS_SUCCESS;
|
|
|
|
} else if (rc == -ENOMEM) {
|
|
|
|
io_status = SPDK_BDEV_IO_STATUS_NOMEM;
|
|
|
|
} else {
|
|
|
|
io_status = SPDK_BDEV_IO_STATUS_FAILED;
|
|
|
|
}
|
|
|
|
|
2021-05-28 01:49:18 +00:00
|
|
|
spdk_bdev_io_complete(spdk_bdev_io_from_ctx(bio), io_status);
|
2021-04-20 22:54:59 +00:00
|
|
|
}
|
|
|
|
|
2020-02-07 00:20:35 +00:00
|
|
|
static void
|
|
|
|
bdev_nvme_disconnected_qpair_cb(struct spdk_nvme_qpair *qpair, void *poll_group_ctx)
|
|
|
|
{
|
2020-12-22 11:34:00 +00:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
SPDK_DEBUGLOG(bdev_nvme, "qpair %p is disconnected, attempting reconnect.\n", qpair);
|
2020-02-07 00:20:35 +00:00
|
|
|
/*
|
|
|
|
* Currently, just try to reconnect indefinitely. If we are doing a reset, the reset will
|
|
|
|
* reconnect a qpair and we will stop getting a callback for this one.
|
|
|
|
*/
|
2020-12-22 11:34:00 +00:00
|
|
|
rc = spdk_nvme_ctrlr_reconnect_io_qpair(qpair);
|
|
|
|
if (rc != 0) {
|
2021-08-23 15:55:20 +00:00
|
|
|
SPDK_DEBUGLOG(bdev_nvme, "Failed to reconnect to qpair %p, errno %d\n", qpair, -rc);
|
2020-12-22 11:34:00 +00:00
|
|
|
}
|
2020-02-07 00:20:35 +00:00
|
|
|
}
|
|
|
|
|
2018-03-13 00:16:47 +00:00
|
|
|
static int
|
2017-02-28 17:51:25 +00:00
|
|
|
bdev_nvme_poll(void *arg)
|
2016-09-16 19:53:32 +00:00
|
|
|
{
|
2021-07-06 17:20:32 +00:00
|
|
|
struct nvme_poll_group *group = arg;
|
2020-02-07 00:20:35 +00:00
|
|
|
int64_t num_completions;
|
2016-09-16 19:53:32 +00:00
|
|
|
|
2020-02-07 00:20:35 +00:00
|
|
|
if (group->collect_spin_stat && group->start_ticks == 0) {
|
|
|
|
group->start_ticks = spdk_get_ticks();
|
2017-06-15 16:59:02 +00:00
|
|
|
}
|
|
|
|
|
2020-02-07 00:20:35 +00:00
|
|
|
num_completions = spdk_nvme_poll_group_process_completions(group->group, 0,
|
|
|
|
bdev_nvme_disconnected_qpair_cb);
|
|
|
|
if (group->collect_spin_stat) {
|
2017-06-15 16:59:02 +00:00
|
|
|
if (num_completions > 0) {
|
2020-02-07 00:20:35 +00:00
|
|
|
if (group->end_ticks != 0) {
|
|
|
|
group->spin_ticks += (group->end_ticks - group->start_ticks);
|
|
|
|
group->end_ticks = 0;
|
2017-06-15 16:59:02 +00:00
|
|
|
}
|
2020-02-07 00:20:35 +00:00
|
|
|
group->start_ticks = 0;
|
2017-06-15 16:59:02 +00:00
|
|
|
} else {
|
2020-02-07 00:20:35 +00:00
|
|
|
group->end_ticks = spdk_get_ticks();
|
2017-06-15 16:59:02 +00:00
|
|
|
}
|
2017-06-08 18:49:22 +00:00
|
|
|
}
|
2018-03-13 00:16:47 +00:00
|
|
|
|
2020-05-04 09:51:27 +00:00
|
|
|
return num_completions > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
|
2016-09-16 19:53:32 +00:00
|
|
|
}
|
|
|
|
|
2018-03-13 00:16:47 +00:00
|
|
|
static int
|
2017-02-28 17:51:25 +00:00
|
|
|
bdev_nvme_poll_adminq(void *arg)
|
2017-01-13 19:58:23 +00:00
|
|
|
{
|
2019-11-08 22:22:37 +00:00
|
|
|
int32_t rc;
|
2021-07-06 19:42:41 +00:00
|
|
|
struct nvme_ctrlr *nvme_ctrlr = arg;
|
2017-01-13 19:58:23 +00:00
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
assert(nvme_ctrlr != NULL);
|
2019-11-08 22:22:37 +00:00
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
rc = spdk_nvme_ctrlr_process_admin_completions(nvme_ctrlr->ctrlr);
|
2019-11-08 22:22:37 +00:00
|
|
|
if (rc < 0) {
|
2021-07-06 19:42:41 +00:00
|
|
|
bdev_nvme_failover(nvme_ctrlr, false);
|
2019-11-08 22:22:37 +00:00
|
|
|
}
|
|
|
|
|
2020-05-04 09:51:27 +00:00
|
|
|
return rc == 0 ? SPDK_POLLER_IDLE : SPDK_POLLER_BUSY;
|
2017-01-13 19:58:23 +00:00
|
|
|
}
|
|
|
|
|
2021-07-06 17:35:01 +00:00
|
|
|
static void
|
|
|
|
_bdev_nvme_unregister_dev_cb(void *io_device)
|
|
|
|
{
|
|
|
|
struct nvme_bdev *nvme_disk = io_device;
|
|
|
|
|
|
|
|
free(nvme_disk->disk.name);
|
|
|
|
free(nvme_disk);
|
|
|
|
}
|
|
|
|
|
2016-07-20 18:16:23 +00:00
|
|
|
static int
|
2017-04-04 21:10:00 +00:00
|
|
|
bdev_nvme_destruct(void *ctx)
|
2016-07-20 18:16:23 +00:00
|
|
|
{
|
2017-04-04 21:10:00 +00:00
|
|
|
struct nvme_bdev *nvme_disk = ctx;
|
2021-06-30 01:08:29 +00:00
|
|
|
struct nvme_ns *nvme_ns = nvme_disk->nvme_ns;
|
2017-03-01 01:23:53 +00:00
|
|
|
|
2021-03-29 14:46:57 +00:00
|
|
|
pthread_mutex_lock(&nvme_ns->ctrlr->mutex);
|
|
|
|
|
2021-03-29 15:02:59 +00:00
|
|
|
nvme_ns->bdev = NULL;
|
|
|
|
|
|
|
|
if (!nvme_ns->populated) {
|
2021-03-29 14:46:57 +00:00
|
|
|
pthread_mutex_unlock(&nvme_ns->ctrlr->mutex);
|
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ctrlr_release(nvme_ns->ctrlr);
|
2021-03-29 14:46:57 +00:00
|
|
|
} else {
|
|
|
|
pthread_mutex_unlock(&nvme_ns->ctrlr->mutex);
|
|
|
|
}
|
2019-10-01 08:18:44 +00:00
|
|
|
|
2021-07-06 17:35:01 +00:00
|
|
|
spdk_io_device_unregister(nvme_disk, _bdev_nvme_unregister_dev_cb);
|
2017-03-01 01:23:53 +00:00
|
|
|
|
2016-07-20 18:16:23 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2020-11-23 04:48:51 +00:00
|
|
|
bdev_nvme_flush(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
|
2020-11-23 04:25:16 +00:00
|
|
|
struct nvme_bdev_io *bio, uint64_t offset, uint64_t nbytes)
|
2016-07-20 18:16:23 +00:00
|
|
|
{
|
2021-04-20 22:54:59 +00:00
|
|
|
bdev_nvme_io_complete(bio, 0);
|
2016-07-20 18:16:23 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-11-13 12:06:52 +00:00
|
|
|
static int
|
2021-07-07 01:02:14 +00:00
|
|
|
bdev_nvme_create_qpair(struct nvme_ctrlr_channel *ctrlr_ch)
|
2020-11-13 12:06:52 +00:00
|
|
|
{
|
2021-07-07 01:02:14 +00:00
|
|
|
struct spdk_nvme_ctrlr *ctrlr = ctrlr_ch->ctrlr->ctrlr;
|
2020-11-13 12:06:52 +00:00
|
|
|
struct spdk_nvme_io_qpair_opts opts;
|
2021-03-24 10:31:43 +00:00
|
|
|
struct spdk_nvme_qpair *qpair;
|
2020-11-13 12:06:52 +00:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
spdk_nvme_ctrlr_get_default_io_qpair_opts(ctrlr, &opts, sizeof(opts));
|
|
|
|
opts.delay_cmd_submit = g_opts.delay_cmd_submit;
|
|
|
|
opts.create_only = true;
|
2021-05-14 20:11:33 +00:00
|
|
|
opts.async_mode = true;
|
2020-11-13 12:06:52 +00:00
|
|
|
opts.io_queue_requests = spdk_max(g_opts.io_queue_requests, opts.io_queue_requests);
|
|
|
|
g_opts.io_queue_requests = opts.io_queue_requests;
|
|
|
|
|
2021-03-24 10:31:43 +00:00
|
|
|
qpair = spdk_nvme_ctrlr_alloc_io_qpair(ctrlr, &opts, sizeof(opts));
|
|
|
|
if (qpair == NULL) {
|
2020-11-13 12:06:52 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2021-07-07 01:02:14 +00:00
|
|
|
assert(ctrlr_ch->group != NULL);
|
2020-11-13 12:06:52 +00:00
|
|
|
|
2021-07-07 01:02:14 +00:00
|
|
|
rc = spdk_nvme_poll_group_add(ctrlr_ch->group->group, qpair);
|
2020-11-13 12:06:52 +00:00
|
|
|
if (rc != 0) {
|
|
|
|
SPDK_ERRLOG("Unable to begin polling on NVMe Channel.\n");
|
2020-12-22 04:28:01 +00:00
|
|
|
goto err;
|
2020-11-13 12:06:52 +00:00
|
|
|
}
|
|
|
|
|
2021-03-24 10:31:43 +00:00
|
|
|
rc = spdk_nvme_ctrlr_connect_io_qpair(ctrlr, qpair);
|
2020-11-13 12:06:52 +00:00
|
|
|
if (rc != 0) {
|
|
|
|
SPDK_ERRLOG("Unable to connect I/O qpair.\n");
|
2020-12-22 04:28:01 +00:00
|
|
|
goto err;
|
2020-11-13 12:06:52 +00:00
|
|
|
}
|
|
|
|
|
2021-07-07 01:02:14 +00:00
|
|
|
ctrlr_ch->qpair = qpair;
|
2021-03-24 10:31:43 +00:00
|
|
|
|
2020-11-13 12:06:52 +00:00
|
|
|
return 0;
|
|
|
|
|
2020-12-22 04:28:01 +00:00
|
|
|
err:
|
2021-03-24 10:31:43 +00:00
|
|
|
spdk_nvme_ctrlr_free_io_qpair(qpair);
|
2020-12-08 21:51:00 +00:00
|
|
|
|
2020-11-13 12:06:52 +00:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2021-07-05 17:53:13 +00:00
|
|
|
static void
|
2021-07-07 01:02:14 +00:00
|
|
|
bdev_nvme_destroy_qpair(struct nvme_ctrlr_channel *ctrlr_ch)
|
2021-03-24 10:42:42 +00:00
|
|
|
{
|
2021-07-07 01:02:14 +00:00
|
|
|
if (ctrlr_ch->qpair != NULL) {
|
|
|
|
spdk_nvme_ctrlr_free_io_qpair(ctrlr_ch->qpair);
|
|
|
|
ctrlr_ch->qpair = NULL;
|
2021-03-24 10:42:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-17 09:52:14 +00:00
|
|
|
static void
|
2021-07-06 19:42:41 +00:00
|
|
|
_bdev_nvme_check_pending_destruct(struct nvme_ctrlr *nvme_ctrlr)
|
2020-12-17 09:52:14 +00:00
|
|
|
{
|
2021-07-06 19:42:41 +00:00
|
|
|
pthread_mutex_lock(&nvme_ctrlr->mutex);
|
|
|
|
if (nvme_ctrlr->destruct_after_reset) {
|
|
|
|
assert(nvme_ctrlr->ref == 0 && nvme_ctrlr->destruct);
|
|
|
|
pthread_mutex_unlock(&nvme_ctrlr->mutex);
|
2021-03-05 04:10:53 +00:00
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
spdk_thread_send_msg(nvme_ctrlr->thread, nvme_ctrlr_unregister,
|
|
|
|
nvme_ctrlr);
|
2021-03-05 04:10:53 +00:00
|
|
|
} else {
|
2021-07-06 19:42:41 +00:00
|
|
|
pthread_mutex_unlock(&nvme_ctrlr->mutex);
|
2021-03-05 04:10:53 +00:00
|
|
|
}
|
2020-12-17 09:52:14 +00:00
|
|
|
}
|
|
|
|
|
2021-05-10 22:31:47 +00:00
|
|
|
static void
|
|
|
|
bdev_nvme_check_pending_destruct(struct spdk_io_channel_iter *i, int status)
|
|
|
|
{
|
2021-07-02 01:07:32 +00:00
|
|
|
struct nvme_ctrlr *nvme_ctrlr = spdk_io_channel_iter_get_io_device(i);
|
2021-05-10 22:31:47 +00:00
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
_bdev_nvme_check_pending_destruct(nvme_ctrlr);
|
2021-05-10 22:31:47 +00:00
|
|
|
}
|
|
|
|
|
2019-11-08 21:26:09 +00:00
|
|
|
static void
|
2021-07-07 01:02:14 +00:00
|
|
|
_bdev_nvme_complete_pending_resets(struct nvme_ctrlr_channel *ctrlr_ch,
|
2021-03-18 22:01:01 +00:00
|
|
|
enum spdk_bdev_io_status status)
|
2019-11-08 21:26:09 +00:00
|
|
|
{
|
|
|
|
struct spdk_bdev_io *bdev_io;
|
|
|
|
|
2021-07-07 01:02:14 +00:00
|
|
|
while (!TAILQ_EMPTY(&ctrlr_ch->pending_resets)) {
|
|
|
|
bdev_io = TAILQ_FIRST(&ctrlr_ch->pending_resets);
|
|
|
|
TAILQ_REMOVE(&ctrlr_ch->pending_resets, bdev_io, module_link);
|
2019-11-08 21:26:09 +00:00
|
|
|
spdk_bdev_io_complete(bdev_io, status);
|
|
|
|
}
|
2021-03-18 22:01:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
bdev_nvme_complete_pending_resets(struct spdk_io_channel_iter *i)
|
|
|
|
{
|
|
|
|
struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
|
2021-07-07 01:02:14 +00:00
|
|
|
struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(_ch);
|
2021-03-18 22:01:01 +00:00
|
|
|
|
2021-07-07 01:02:14 +00:00
|
|
|
_bdev_nvme_complete_pending_resets(ctrlr_ch, SPDK_BDEV_IO_STATUS_SUCCESS);
|
2021-03-18 22:01:01 +00:00
|
|
|
|
|
|
|
spdk_for_each_channel_continue(i, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
bdev_nvme_abort_pending_resets(struct spdk_io_channel_iter *i)
|
|
|
|
{
|
|
|
|
struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
|
2021-07-07 01:02:14 +00:00
|
|
|
struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(_ch);
|
2021-03-18 22:01:01 +00:00
|
|
|
|
2021-07-07 01:02:14 +00:00
|
|
|
_bdev_nvme_complete_pending_resets(ctrlr_ch, SPDK_BDEV_IO_STATUS_FAILED);
|
2019-11-08 21:26:09 +00:00
|
|
|
|
|
|
|
spdk_for_each_channel_continue(i, 0);
|
|
|
|
}
|
|
|
|
|
2019-11-08 17:56:11 +00:00
|
|
|
static void
|
2021-07-05 11:03:54 +00:00
|
|
|
bdev_nvme_reset_complete(struct nvme_ctrlr *nvme_ctrlr, int rc)
|
2019-11-08 17:56:11 +00:00
|
|
|
{
|
2021-06-30 01:15:12 +00:00
|
|
|
struct nvme_ctrlr_trid *curr_trid;
|
2021-07-02 01:27:59 +00:00
|
|
|
bdev_nvme_reset_cb reset_cb_fn = nvme_ctrlr->reset_cb_fn;
|
|
|
|
void *reset_cb_arg = nvme_ctrlr->reset_cb_arg;
|
2021-04-05 13:11:43 +00:00
|
|
|
|
2021-07-02 01:27:59 +00:00
|
|
|
nvme_ctrlr->reset_cb_fn = NULL;
|
|
|
|
nvme_ctrlr->reset_cb_arg = NULL;
|
2019-11-08 21:26:09 +00:00
|
|
|
|
2019-11-08 17:56:11 +00:00
|
|
|
if (rc) {
|
|
|
|
SPDK_ERRLOG("Resetting controller failed.\n");
|
|
|
|
} else {
|
|
|
|
SPDK_NOTICELOG("Resetting controller successful.\n");
|
|
|
|
}
|
2019-11-08 21:26:09 +00:00
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
pthread_mutex_lock(&nvme_ctrlr->mutex);
|
|
|
|
nvme_ctrlr->resetting = false;
|
|
|
|
nvme_ctrlr->failover_in_progress = false;
|
2020-12-22 12:50:29 +00:00
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
|
2020-12-22 12:50:29 +00:00
|
|
|
assert(curr_trid != NULL);
|
2021-07-06 19:42:41 +00:00
|
|
|
assert(&curr_trid->trid == nvme_ctrlr->connected_trid);
|
2020-12-22 12:50:29 +00:00
|
|
|
|
2021-03-18 22:01:01 +00:00
|
|
|
curr_trid->is_failed = rc != 0 ? true : false;
|
2020-12-22 12:50:29 +00:00
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
if (nvme_ctrlr->ref == 0 && nvme_ctrlr->destruct) {
|
2020-12-17 09:52:14 +00:00
|
|
|
/* Destruct ctrlr after clearing pending resets. */
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ctrlr->destruct_after_reset = true;
|
2020-12-17 09:52:14 +00:00
|
|
|
}
|
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
pthread_mutex_unlock(&nvme_ctrlr->mutex);
|
2021-03-03 17:23:21 +00:00
|
|
|
|
2021-07-02 01:27:59 +00:00
|
|
|
if (reset_cb_fn) {
|
|
|
|
reset_cb_fn(reset_cb_arg, rc);
|
2021-04-05 13:11:43 +00:00
|
|
|
}
|
2021-07-02 01:02:03 +00:00
|
|
|
|
|
|
|
/* Make sure we clear any pending resets before returning. */
|
|
|
|
spdk_for_each_channel(nvme_ctrlr,
|
|
|
|
rc == 0 ? bdev_nvme_complete_pending_resets :
|
|
|
|
bdev_nvme_abort_pending_resets,
|
2021-07-02 01:07:32 +00:00
|
|
|
NULL,
|
2021-07-02 01:02:03 +00:00
|
|
|
bdev_nvme_check_pending_destruct);
|
2019-11-08 17:56:11 +00:00
|
|
|
}
|
|
|
|
|
2017-06-08 18:49:22 +00:00
|
|
|
static void
|
2021-07-05 11:03:54 +00:00
|
|
|
bdev_nvme_reset_create_qpairs_done(struct spdk_io_channel_iter *i, int status)
|
2017-06-08 18:49:22 +00:00
|
|
|
{
|
2021-07-02 01:07:32 +00:00
|
|
|
struct nvme_ctrlr *nvme_ctrlr = spdk_io_channel_iter_get_io_device(i);
|
2017-12-11 22:14:19 +00:00
|
|
|
|
2021-07-05 11:03:54 +00:00
|
|
|
bdev_nvme_reset_complete(nvme_ctrlr, status);
|
2017-06-08 18:49:22 +00:00
|
|
|
}
|
|
|
|
|
2017-12-11 22:14:19 +00:00
|
|
|
static void
|
2021-07-05 11:03:54 +00:00
|
|
|
bdev_nvme_reset_create_qpair(struct spdk_io_channel_iter *i)
|
2017-06-08 18:49:22 +00:00
|
|
|
{
|
2017-12-11 22:14:19 +00:00
|
|
|
struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
|
2021-07-07 01:02:14 +00:00
|
|
|
struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(_ch);
|
2020-11-13 12:06:52 +00:00
|
|
|
int rc;
|
2020-02-07 00:20:35 +00:00
|
|
|
|
2021-07-07 01:02:14 +00:00
|
|
|
rc = bdev_nvme_create_qpair(ctrlr_ch);
|
2020-02-07 00:20:35 +00:00
|
|
|
|
2020-11-13 12:06:52 +00:00
|
|
|
spdk_for_each_channel_continue(i, rc);
|
2017-06-08 18:49:22 +00:00
|
|
|
}
|
|
|
|
|
2021-06-15 17:39:21 +00:00
|
|
|
static int
|
|
|
|
bdev_nvme_ctrlr_reset_poll(void *arg)
|
|
|
|
{
|
|
|
|
struct nvme_ctrlr *nvme_ctrlr = arg;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = spdk_nvme_ctrlr_reset_poll_async(nvme_ctrlr->reset_ctx);
|
|
|
|
if (rc == -EAGAIN) {
|
|
|
|
return SPDK_POLLER_BUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
spdk_poller_unregister(&nvme_ctrlr->reset_poller);
|
|
|
|
if (rc == 0) {
|
|
|
|
/* Recreate all of the I/O queue pairs */
|
|
|
|
spdk_for_each_channel(nvme_ctrlr,
|
|
|
|
bdev_nvme_reset_create_qpair,
|
|
|
|
NULL,
|
|
|
|
bdev_nvme_reset_create_qpairs_done);
|
|
|
|
} else {
|
|
|
|
bdev_nvme_reset_complete(nvme_ctrlr, rc);
|
|
|
|
}
|
|
|
|
return SPDK_POLLER_BUSY;
|
|
|
|
}
|
|
|
|
|
2017-06-08 18:49:22 +00:00
|
|
|
static void
|
2021-07-05 11:03:54 +00:00
|
|
|
bdev_nvme_reset_ctrlr(struct spdk_io_channel_iter *i, int status)
|
2016-07-20 18:16:23 +00:00
|
|
|
{
|
2021-07-02 01:07:32 +00:00
|
|
|
struct nvme_ctrlr *nvme_ctrlr = spdk_io_channel_iter_get_io_device(i);
|
2016-07-20 18:16:23 +00:00
|
|
|
int rc;
|
|
|
|
|
2017-11-16 07:42:37 +00:00
|
|
|
if (status) {
|
2020-11-02 13:51:03 +00:00
|
|
|
rc = status;
|
|
|
|
goto err;
|
2017-11-16 07:42:37 +00:00
|
|
|
}
|
|
|
|
|
2021-06-15 17:39:21 +00:00
|
|
|
rc = spdk_nvme_ctrlr_reset_async(nvme_ctrlr->ctrlr, &nvme_ctrlr->reset_ctx);
|
2016-07-20 18:16:23 +00:00
|
|
|
if (rc != 0) {
|
2021-06-15 17:39:21 +00:00
|
|
|
SPDK_ERRLOG("Create controller reset context failed\n");
|
2020-11-02 13:51:03 +00:00
|
|
|
goto err;
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
2021-06-15 17:39:21 +00:00
|
|
|
assert(nvme_ctrlr->reset_poller == NULL);
|
|
|
|
nvme_ctrlr->reset_poller = SPDK_POLLER_REGISTER(bdev_nvme_ctrlr_reset_poll,
|
|
|
|
nvme_ctrlr, 0);
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2020-11-02 13:51:03 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
err:
|
2021-07-05 11:03:54 +00:00
|
|
|
bdev_nvme_reset_complete(nvme_ctrlr, rc);
|
2017-06-08 18:49:22 +00:00
|
|
|
}
|
|
|
|
|
2017-12-11 22:14:19 +00:00
|
|
|
static void
|
2021-07-05 11:03:54 +00:00
|
|
|
bdev_nvme_reset_destroy_qpair(struct spdk_io_channel_iter *i)
|
2017-06-08 18:49:22 +00:00
|
|
|
{
|
2017-12-11 22:14:19 +00:00
|
|
|
struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
|
2021-07-07 01:02:14 +00:00
|
|
|
struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch);
|
2017-06-08 18:49:22 +00:00
|
|
|
|
2021-07-07 01:02:14 +00:00
|
|
|
bdev_nvme_destroy_qpair(ctrlr_ch);
|
2021-07-05 17:53:13 +00:00
|
|
|
spdk_for_each_channel_continue(i, 0);
|
2017-06-08 18:49:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2021-07-05 11:03:54 +00:00
|
|
|
bdev_nvme_reset(struct nvme_ctrlr *nvme_ctrlr)
|
2017-06-08 18:49:22 +00:00
|
|
|
{
|
2021-07-06 19:42:41 +00:00
|
|
|
pthread_mutex_lock(&nvme_ctrlr->mutex);
|
|
|
|
if (nvme_ctrlr->destruct) {
|
|
|
|
pthread_mutex_unlock(&nvme_ctrlr->mutex);
|
2020-11-02 14:21:21 +00:00
|
|
|
return -EBUSY;
|
2020-03-12 17:42:25 +00:00
|
|
|
}
|
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
if (nvme_ctrlr->resetting) {
|
|
|
|
pthread_mutex_unlock(&nvme_ctrlr->mutex);
|
2019-11-08 21:26:09 +00:00
|
|
|
SPDK_NOTICELOG("Unable to perform reset, already in progress.\n");
|
2020-11-02 14:21:21 +00:00
|
|
|
return -EAGAIN;
|
2020-10-22 17:39:05 +00:00
|
|
|
}
|
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ctrlr->resetting = true;
|
|
|
|
pthread_mutex_unlock(&nvme_ctrlr->mutex);
|
2020-10-22 17:39:05 +00:00
|
|
|
|
2021-04-05 12:58:55 +00:00
|
|
|
/* First, delete all NVMe I/O queue pairs. */
|
2021-07-06 19:42:41 +00:00
|
|
|
spdk_for_each_channel(nvme_ctrlr,
|
2021-07-05 11:03:54 +00:00
|
|
|
bdev_nvme_reset_destroy_qpair,
|
2021-07-02 01:07:32 +00:00
|
|
|
NULL,
|
2021-07-05 11:03:54 +00:00
|
|
|
bdev_nvme_reset_ctrlr);
|
2021-03-18 20:44:58 +00:00
|
|
|
|
2021-04-05 12:58:55 +00:00
|
|
|
return 0;
|
2020-10-22 17:39:05 +00:00
|
|
|
}
|
|
|
|
|
2021-06-16 17:24:56 +00:00
|
|
|
int
|
|
|
|
bdev_nvme_reset_rpc(struct nvme_ctrlr *nvme_ctrlr, bdev_nvme_reset_cb cb_fn, void *cb_arg)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = bdev_nvme_reset(nvme_ctrlr);
|
|
|
|
if (rc == 0) {
|
|
|
|
nvme_ctrlr->reset_cb_fn = cb_fn;
|
|
|
|
nvme_ctrlr->reset_cb_arg = cb_arg;
|
|
|
|
}
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2021-07-02 01:27:59 +00:00
|
|
|
static void
|
|
|
|
bdev_nvme_reset_io_complete(void *cb_arg, int rc)
|
|
|
|
{
|
|
|
|
struct nvme_bdev_io *bio = cb_arg;
|
|
|
|
|
|
|
|
bdev_nvme_io_complete(bio, rc);
|
|
|
|
}
|
|
|
|
|
2020-11-02 14:21:21 +00:00
|
|
|
static int
|
2021-07-06 17:35:01 +00:00
|
|
|
bdev_nvme_reset_io(struct nvme_bdev_channel *nbdev_ch, struct nvme_bdev_io *bio)
|
2020-11-02 14:21:21 +00:00
|
|
|
{
|
2021-07-06 17:35:01 +00:00
|
|
|
struct nvme_ctrlr_channel *ctrlr_ch = nbdev_ch->ctrlr_ch;
|
2021-07-05 11:06:19 +00:00
|
|
|
struct spdk_bdev_io *bdev_io;
|
2020-11-02 14:21:21 +00:00
|
|
|
int rc;
|
|
|
|
|
2021-07-05 11:03:54 +00:00
|
|
|
rc = bdev_nvme_reset(ctrlr_ch->ctrlr);
|
2021-03-18 21:51:01 +00:00
|
|
|
if (rc == 0) {
|
2021-07-02 01:27:59 +00:00
|
|
|
assert(ctrlr_ch->ctrlr->reset_cb_fn == NULL);
|
|
|
|
assert(ctrlr_ch->ctrlr->reset_cb_arg == NULL);
|
|
|
|
ctrlr_ch->ctrlr->reset_cb_fn = bdev_nvme_reset_io_complete;
|
|
|
|
ctrlr_ch->ctrlr->reset_cb_arg = bio;
|
2020-11-02 14:21:21 +00:00
|
|
|
} else if (rc == -EAGAIN) {
|
|
|
|
/*
|
|
|
|
* Reset call is queued only if it is from the app framework. This is on purpose so that
|
|
|
|
* we don't interfere with the app framework reset strategy. i.e. we are deferring to the
|
|
|
|
* upper level. If they are in the middle of a reset, we won't try to schedule another one.
|
|
|
|
*/
|
2021-07-05 11:06:19 +00:00
|
|
|
bdev_io = spdk_bdev_io_from_ctx(bio);
|
2021-07-07 01:02:14 +00:00
|
|
|
TAILQ_INSERT_TAIL(&ctrlr_ch->pending_resets, bdev_io, module_link);
|
2020-11-02 14:21:21 +00:00
|
|
|
} else {
|
|
|
|
return rc;
|
|
|
|
}
|
2021-03-18 21:51:01 +00:00
|
|
|
|
|
|
|
return 0;
|
2020-11-02 14:21:21 +00:00
|
|
|
}
|
|
|
|
|
2020-10-22 17:39:05 +00:00
|
|
|
static int
|
2021-07-05 11:03:54 +00:00
|
|
|
bdev_nvme_failover_start(struct nvme_ctrlr *nvme_ctrlr, bool remove)
|
2020-10-22 17:39:05 +00:00
|
|
|
{
|
2021-06-30 01:15:12 +00:00
|
|
|
struct nvme_ctrlr_trid *curr_trid = NULL, *next_trid = NULL;
|
2021-03-18 20:44:58 +00:00
|
|
|
int rc;
|
2020-10-22 17:39:05 +00:00
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
pthread_mutex_lock(&nvme_ctrlr->mutex);
|
|
|
|
if (nvme_ctrlr->destruct) {
|
|
|
|
pthread_mutex_unlock(&nvme_ctrlr->mutex);
|
2020-10-22 17:39:05 +00:00
|
|
|
/* Don't bother resetting if the controller is in the process of being destructed. */
|
2021-03-18 20:44:58 +00:00
|
|
|
return -EBUSY;
|
2020-10-22 17:39:05 +00:00
|
|
|
}
|
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
|
2020-10-22 17:39:05 +00:00
|
|
|
assert(curr_trid);
|
2021-07-06 19:42:41 +00:00
|
|
|
assert(&curr_trid->trid == nvme_ctrlr->connected_trid);
|
2020-10-22 17:39:05 +00:00
|
|
|
next_trid = TAILQ_NEXT(curr_trid, link);
|
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
if (nvme_ctrlr->resetting) {
|
|
|
|
if (next_trid && !nvme_ctrlr->failover_in_progress) {
|
2020-10-22 17:39:05 +00:00
|
|
|
rc = -EAGAIN;
|
2021-03-18 20:44:58 +00:00
|
|
|
} else {
|
|
|
|
rc = -EBUSY;
|
2020-10-22 17:39:05 +00:00
|
|
|
}
|
2021-07-06 19:42:41 +00:00
|
|
|
pthread_mutex_unlock(&nvme_ctrlr->mutex);
|
2020-10-22 17:39:05 +00:00
|
|
|
SPDK_NOTICELOG("Unable to perform reset, already in progress.\n");
|
2020-06-12 23:35:47 +00:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ctrlr->resetting = true;
|
2020-12-22 12:50:29 +00:00
|
|
|
curr_trid->is_failed = true;
|
|
|
|
|
2020-10-22 17:39:05 +00:00
|
|
|
if (next_trid) {
|
2020-12-22 07:35:03 +00:00
|
|
|
assert(curr_trid->trid.trtype != SPDK_NVME_TRANSPORT_PCIE);
|
|
|
|
|
|
|
|
SPDK_NOTICELOG("Start failover from %s:%s to %s:%s\n", curr_trid->trid.traddr,
|
|
|
|
curr_trid->trid.trsvcid, next_trid->trid.traddr, next_trid->trid.trsvcid);
|
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ctrlr->failover_in_progress = true;
|
|
|
|
spdk_nvme_ctrlr_fail(nvme_ctrlr->ctrlr);
|
|
|
|
nvme_ctrlr->connected_trid = &next_trid->trid;
|
|
|
|
rc = spdk_nvme_ctrlr_set_trid(nvme_ctrlr->ctrlr, &next_trid->trid);
|
2020-06-12 23:35:47 +00:00
|
|
|
assert(rc == 0);
|
2021-07-06 19:42:41 +00:00
|
|
|
TAILQ_REMOVE(&nvme_ctrlr->trids, curr_trid, link);
|
2020-10-24 13:43:55 +00:00
|
|
|
if (!remove) {
|
|
|
|
/** Shuffle the old trid to the end of the list and use the new one.
|
|
|
|
* Allows for round robin through multiple connections.
|
|
|
|
*/
|
2021-07-06 19:42:41 +00:00
|
|
|
TAILQ_INSERT_TAIL(&nvme_ctrlr->trids, curr_trid, link);
|
2020-10-24 13:43:55 +00:00
|
|
|
} else {
|
|
|
|
free(curr_trid);
|
|
|
|
}
|
2019-11-08 21:26:09 +00:00
|
|
|
}
|
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
pthread_mutex_unlock(&nvme_ctrlr->mutex);
|
2021-03-18 20:44:58 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2021-03-03 17:23:21 +00:00
|
|
|
|
2021-03-18 20:44:58 +00:00
|
|
|
static int
|
2021-07-06 19:42:41 +00:00
|
|
|
bdev_nvme_failover(struct nvme_ctrlr *nvme_ctrlr, bool remove)
|
2021-03-18 20:44:58 +00:00
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
2021-07-05 11:03:54 +00:00
|
|
|
rc = bdev_nvme_failover_start(nvme_ctrlr, remove);
|
2021-03-18 20:44:58 +00:00
|
|
|
if (rc == 0) {
|
|
|
|
/* First, delete all NVMe I/O queue pairs. */
|
2021-07-06 19:42:41 +00:00
|
|
|
spdk_for_each_channel(nvme_ctrlr,
|
2021-07-05 11:03:54 +00:00
|
|
|
bdev_nvme_reset_destroy_qpair,
|
2021-07-02 01:07:32 +00:00
|
|
|
NULL,
|
2021-07-05 11:03:54 +00:00
|
|
|
bdev_nvme_reset_ctrlr);
|
2021-03-18 20:44:58 +00:00
|
|
|
} else if (rc != -EBUSY) {
|
|
|
|
return rc;
|
|
|
|
}
|
2017-06-08 18:49:22 +00:00
|
|
|
|
|
|
|
return 0;
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2020-11-23 04:48:51 +00:00
|
|
|
bdev_nvme_unmap(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
|
2017-02-28 17:51:25 +00:00
|
|
|
struct nvme_bdev_io *bio,
|
2017-08-29 00:15:53 +00:00
|
|
|
uint64_t offset_blocks,
|
|
|
|
uint64_t num_blocks);
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2021-05-14 11:26:40 +00:00
|
|
|
static int
|
|
|
|
bdev_nvme_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
|
|
|
|
struct nvme_bdev_io *bio,
|
|
|
|
uint64_t offset_blocks,
|
|
|
|
uint64_t num_blocks);
|
|
|
|
|
2017-02-28 17:51:25 +00:00
|
|
|
static void
|
bdev: Not assert but pass completion status to spdk_bdev_io_get_buf_cb
When the specified buffer size to spdk_bdev_io_get_buf() is greater
than the permitted maximum, spdk_bdev_io_get_buf() asserts simply and
doesn't call the specified callback function.
SPDK SCSI library doesn't allocate read buffer and specifies
expected read buffer size, and expects that it is allocated by
spdk_bdev_io_get_buf().
Bdev perf tool also doesn't allocate read buffer and specifies
expected read buffer size, and expects that it is allocated by
spdk_bdev_io_get_buf().
When we support DIF insert and strip in iSCSI target, the read
buffer size iSCSI initiator requests and the read buffer size iSCSI target
requests will become different.
Even after that, iSCSI initiator and iSCSI target will negotiate correctly
not to cause buffer overflow in spdk_bdev_io_get_buf(), but if iSCSI
initiator ignores the result of negotiation, iSCSI initiator can request
read buffer size larger than the permitted maximum, and can cause
failure in iSCSI target. This is very flagile and should be avoided.
This patch do the following
- Add the completion status of spdk_bdev_io_get_buf() to
spdk_bdev_io_get_buf_cb(),
- spdk_bdev_io_get_buf() calls spdk_bdev_io_get_buf_cb() by setting
success to false, and return.
- spdk_bdev_io_get_buf_cb() in each bdev module calls assert if success
is false.
Subsequent patches will process the case that success is false
in spdk_bdev_io_get_buf_cb().
Change-Id: I76429a86e18a69aa085a353ac94743296d270b82
Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-on: https://review.gerrithub.io/c/446045
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Ziye Yang <ziye.yang@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
2019-02-25 00:34:28 +00:00
|
|
|
bdev_nvme_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
|
|
|
|
bool success)
|
2016-07-20 18:16:23 +00:00
|
|
|
{
|
2021-04-20 22:54:59 +00:00
|
|
|
struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
|
2020-11-23 04:09:54 +00:00
|
|
|
struct spdk_bdev *bdev = bdev_io->bdev;
|
2021-07-06 17:35:01 +00:00
|
|
|
struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
|
2021-06-01 17:49:43 +00:00
|
|
|
struct spdk_nvme_ns *ns;
|
2021-01-10 15:13:56 +00:00
|
|
|
struct spdk_nvme_qpair *qpair;
|
2016-07-20 18:16:23 +00:00
|
|
|
int ret;
|
|
|
|
|
2019-02-25 01:43:13 +00:00
|
|
|
if (!success) {
|
2021-05-13 02:38:11 +00:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto exit;
|
2019-02-25 01:43:13 +00:00
|
|
|
}
|
bdev: Not assert but pass completion status to spdk_bdev_io_get_buf_cb
When the specified buffer size to spdk_bdev_io_get_buf() is greater
than the permitted maximum, spdk_bdev_io_get_buf() asserts simply and
doesn't call the specified callback function.
SPDK SCSI library doesn't allocate read buffer and specifies
expected read buffer size, and expects that it is allocated by
spdk_bdev_io_get_buf().
Bdev perf tool also doesn't allocate read buffer and specifies
expected read buffer size, and expects that it is allocated by
spdk_bdev_io_get_buf().
When we support DIF insert and strip in iSCSI target, the read
buffer size iSCSI initiator requests and the read buffer size iSCSI target
requests will become different.
Even after that, iSCSI initiator and iSCSI target will negotiate correctly
not to cause buffer overflow in spdk_bdev_io_get_buf(), but if iSCSI
initiator ignores the result of negotiation, iSCSI initiator can request
read buffer size larger than the permitted maximum, and can cause
failure in iSCSI target. This is very flagile and should be avoided.
This patch do the following
- Add the completion status of spdk_bdev_io_get_buf() to
spdk_bdev_io_get_buf_cb(),
- spdk_bdev_io_get_buf() calls spdk_bdev_io_get_buf_cb() by setting
success to false, and return.
- spdk_bdev_io_get_buf_cb() in each bdev module calls assert if success
is false.
Subsequent patches will process the case that success is false
in spdk_bdev_io_get_buf_cb().
Change-Id: I76429a86e18a69aa085a353ac94743296d270b82
Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-on: https://review.gerrithub.io/c/446045
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Ziye Yang <ziye.yang@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
2019-02-25 00:34:28 +00:00
|
|
|
|
2021-07-06 17:35:01 +00:00
|
|
|
if (spdk_unlikely(!bdev_nvme_find_io_path(nbdev_ch, &ns, &qpair))) {
|
2021-05-13 02:38:11 +00:00
|
|
|
ret = -ENXIO;
|
|
|
|
goto exit;
|
2021-01-10 15:13:56 +00:00
|
|
|
}
|
|
|
|
|
2021-06-01 17:49:43 +00:00
|
|
|
ret = bdev_nvme_readv(ns,
|
2021-01-10 15:13:56 +00:00
|
|
|
qpair,
|
2021-05-28 01:49:18 +00:00
|
|
|
bio,
|
2017-09-20 13:10:17 +00:00
|
|
|
bdev_io->u.bdev.iovs,
|
|
|
|
bdev_io->u.bdev.iovcnt,
|
2019-04-16 08:17:08 +00:00
|
|
|
bdev_io->u.bdev.md_buf,
|
2017-09-20 13:10:17 +00:00
|
|
|
bdev_io->u.bdev.num_blocks,
|
2020-09-23 20:37:27 +00:00
|
|
|
bdev_io->u.bdev.offset_blocks,
|
2021-08-25 03:18:56 +00:00
|
|
|
bdev->dif_check_flags,
|
|
|
|
bdev_io->internal.ext_opts);
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2021-05-13 02:38:11 +00:00
|
|
|
exit:
|
2021-04-20 22:54:59 +00:00
|
|
|
if (spdk_unlikely(ret != 0)) {
|
|
|
|
bdev_nvme_io_complete(bio, ret);
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-13 02:09:53 +00:00
|
|
|
static void
|
|
|
|
bdev_nvme_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
|
2016-07-20 18:16:23 +00:00
|
|
|
{
|
2021-07-06 17:35:01 +00:00
|
|
|
struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
|
2020-11-23 04:09:54 +00:00
|
|
|
struct spdk_bdev *bdev = bdev_io->bdev;
|
2019-05-16 09:58:41 +00:00
|
|
|
struct nvme_bdev_io *nbdev_io = (struct nvme_bdev_io *)bdev_io->driver_ctx;
|
2020-07-08 08:03:49 +00:00
|
|
|
struct nvme_bdev_io *nbdev_io_to_abort;
|
2021-06-01 17:49:43 +00:00
|
|
|
struct spdk_nvme_ns *ns;
|
2021-01-10 15:13:56 +00:00
|
|
|
struct spdk_nvme_qpair *qpair;
|
2021-05-13 02:09:53 +00:00
|
|
|
int rc = 0;
|
2019-05-16 09:58:41 +00:00
|
|
|
|
2021-07-06 17:35:01 +00:00
|
|
|
if (spdk_unlikely(!bdev_nvme_find_io_path(nbdev_ch, &ns, &qpair))) {
|
2021-05-13 02:09:53 +00:00
|
|
|
rc = -ENXIO;
|
|
|
|
goto exit;
|
2017-06-08 18:49:22 +00:00
|
|
|
}
|
|
|
|
|
2016-07-20 18:16:23 +00:00
|
|
|
switch (bdev_io->type) {
|
|
|
|
case SPDK_BDEV_IO_TYPE_READ:
|
2020-09-18 17:38:27 +00:00
|
|
|
if (bdev_io->u.bdev.iovs && bdev_io->u.bdev.iovs[0].iov_base) {
|
2021-06-01 17:49:43 +00:00
|
|
|
rc = bdev_nvme_readv(ns,
|
2021-05-13 02:09:53 +00:00
|
|
|
qpair,
|
|
|
|
nbdev_io,
|
|
|
|
bdev_io->u.bdev.iovs,
|
|
|
|
bdev_io->u.bdev.iovcnt,
|
|
|
|
bdev_io->u.bdev.md_buf,
|
|
|
|
bdev_io->u.bdev.num_blocks,
|
|
|
|
bdev_io->u.bdev.offset_blocks,
|
2021-08-25 03:18:56 +00:00
|
|
|
bdev->dif_check_flags,
|
|
|
|
bdev_io->internal.ext_opts);
|
2020-09-18 17:38:27 +00:00
|
|
|
} else {
|
|
|
|
spdk_bdev_io_get_buf(bdev_io, bdev_nvme_get_buf_cb,
|
2020-11-23 04:09:54 +00:00
|
|
|
bdev_io->u.bdev.num_blocks * bdev->blocklen);
|
2021-05-13 02:09:53 +00:00
|
|
|
rc = 0;
|
2020-09-18 17:38:27 +00:00
|
|
|
}
|
2021-05-13 02:09:53 +00:00
|
|
|
break;
|
2016-07-20 18:16:23 +00:00
|
|
|
case SPDK_BDEV_IO_TYPE_WRITE:
|
2021-06-01 17:49:43 +00:00
|
|
|
rc = bdev_nvme_writev(ns,
|
2021-05-13 02:09:53 +00:00
|
|
|
qpair,
|
|
|
|
nbdev_io,
|
|
|
|
bdev_io->u.bdev.iovs,
|
|
|
|
bdev_io->u.bdev.iovcnt,
|
|
|
|
bdev_io->u.bdev.md_buf,
|
|
|
|
bdev_io->u.bdev.num_blocks,
|
|
|
|
bdev_io->u.bdev.offset_blocks,
|
2021-08-25 03:18:56 +00:00
|
|
|
bdev->dif_check_flags,
|
|
|
|
bdev_io->internal.ext_opts);
|
2021-05-13 02:09:53 +00:00
|
|
|
break;
|
|
|
|
case SPDK_BDEV_IO_TYPE_COMPARE:
|
2021-06-01 17:49:43 +00:00
|
|
|
rc = bdev_nvme_comparev(ns,
|
2021-01-10 15:13:56 +00:00
|
|
|
qpair,
|
2019-05-16 09:58:41 +00:00
|
|
|
nbdev_io,
|
2017-09-20 13:10:17 +00:00
|
|
|
bdev_io->u.bdev.iovs,
|
|
|
|
bdev_io->u.bdev.iovcnt,
|
2019-04-16 08:17:08 +00:00
|
|
|
bdev_io->u.bdev.md_buf,
|
2017-09-20 13:10:17 +00:00
|
|
|
bdev_io->u.bdev.num_blocks,
|
2020-09-23 20:37:27 +00:00
|
|
|
bdev_io->u.bdev.offset_blocks,
|
2020-11-23 04:09:54 +00:00
|
|
|
bdev->dif_check_flags);
|
2021-05-13 02:09:53 +00:00
|
|
|
break;
|
2019-12-20 11:29:48 +00:00
|
|
|
case SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE:
|
2021-06-01 17:49:43 +00:00
|
|
|
rc = bdev_nvme_comparev_and_writev(ns,
|
2021-05-13 02:09:53 +00:00
|
|
|
qpair,
|
|
|
|
nbdev_io,
|
|
|
|
bdev_io->u.bdev.iovs,
|
|
|
|
bdev_io->u.bdev.iovcnt,
|
|
|
|
bdev_io->u.bdev.fused_iovs,
|
|
|
|
bdev_io->u.bdev.fused_iovcnt,
|
|
|
|
bdev_io->u.bdev.md_buf,
|
|
|
|
bdev_io->u.bdev.num_blocks,
|
|
|
|
bdev_io->u.bdev.offset_blocks,
|
|
|
|
bdev->dif_check_flags);
|
|
|
|
break;
|
2016-07-20 18:16:23 +00:00
|
|
|
case SPDK_BDEV_IO_TYPE_UNMAP:
|
2021-06-01 17:49:43 +00:00
|
|
|
rc = bdev_nvme_unmap(ns,
|
2021-05-13 02:09:53 +00:00
|
|
|
qpair,
|
|
|
|
nbdev_io,
|
|
|
|
bdev_io->u.bdev.offset_blocks,
|
|
|
|
bdev_io->u.bdev.num_blocks);
|
|
|
|
break;
|
2021-05-14 11:26:40 +00:00
|
|
|
case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
|
|
|
|
rc = bdev_nvme_write_zeroes(ns, qpair,
|
|
|
|
nbdev_io,
|
|
|
|
bdev_io->u.bdev.offset_blocks,
|
|
|
|
bdev_io->u.bdev.num_blocks);
|
|
|
|
break;
|
2016-07-20 18:16:23 +00:00
|
|
|
case SPDK_BDEV_IO_TYPE_RESET:
|
2021-07-06 17:35:01 +00:00
|
|
|
rc = bdev_nvme_reset_io(nbdev_ch, nbdev_io);
|
2021-05-13 02:09:53 +00:00
|
|
|
break;
|
2016-07-20 18:16:23 +00:00
|
|
|
case SPDK_BDEV_IO_TYPE_FLUSH:
|
2021-06-01 17:49:43 +00:00
|
|
|
rc = bdev_nvme_flush(ns,
|
2021-05-13 02:09:53 +00:00
|
|
|
qpair,
|
|
|
|
nbdev_io,
|
|
|
|
bdev_io->u.bdev.offset_blocks,
|
|
|
|
bdev_io->u.bdev.num_blocks);
|
|
|
|
break;
|
2021-03-03 18:38:38 +00:00
|
|
|
case SPDK_BDEV_IO_TYPE_ZONE_APPEND:
|
2021-06-01 17:49:43 +00:00
|
|
|
rc = bdev_nvme_zone_appendv(ns,
|
2021-05-13 02:09:53 +00:00
|
|
|
qpair,
|
|
|
|
nbdev_io,
|
|
|
|
bdev_io->u.bdev.iovs,
|
|
|
|
bdev_io->u.bdev.iovcnt,
|
|
|
|
bdev_io->u.bdev.md_buf,
|
|
|
|
bdev_io->u.bdev.num_blocks,
|
|
|
|
bdev_io->u.bdev.offset_blocks,
|
|
|
|
bdev->dif_check_flags);
|
|
|
|
break;
|
2021-03-03 18:38:38 +00:00
|
|
|
case SPDK_BDEV_IO_TYPE_GET_ZONE_INFO:
|
2021-06-01 17:49:43 +00:00
|
|
|
rc = bdev_nvme_get_zone_info(ns,
|
2021-05-13 02:09:53 +00:00
|
|
|
qpair,
|
|
|
|
nbdev_io,
|
|
|
|
bdev_io->u.zone_mgmt.zone_id,
|
|
|
|
bdev_io->u.zone_mgmt.num_zones,
|
|
|
|
bdev_io->u.zone_mgmt.buf);
|
|
|
|
break;
|
|
|
|
case SPDK_BDEV_IO_TYPE_ZONE_MANAGEMENT:
|
2021-06-01 17:49:43 +00:00
|
|
|
rc = bdev_nvme_zone_management(ns,
|
2021-03-03 18:38:38 +00:00
|
|
|
qpair,
|
|
|
|
nbdev_io,
|
|
|
|
bdev_io->u.zone_mgmt.zone_id,
|
2021-05-13 02:09:53 +00:00
|
|
|
bdev_io->u.zone_mgmt.zone_action);
|
|
|
|
break;
|
2017-05-13 20:12:13 +00:00
|
|
|
case SPDK_BDEV_IO_TYPE_NVME_ADMIN:
|
2021-07-06 17:35:01 +00:00
|
|
|
rc = bdev_nvme_admin_passthru(nbdev_ch,
|
2021-05-13 02:09:53 +00:00
|
|
|
nbdev_io,
|
|
|
|
&bdev_io->u.nvme_passthru.cmd,
|
|
|
|
bdev_io->u.nvme_passthru.buf,
|
|
|
|
bdev_io->u.nvme_passthru.nbytes);
|
|
|
|
break;
|
2017-06-05 18:02:09 +00:00
|
|
|
case SPDK_BDEV_IO_TYPE_NVME_IO:
|
2021-06-01 17:49:43 +00:00
|
|
|
rc = bdev_nvme_io_passthru(ns,
|
2021-05-13 02:09:53 +00:00
|
|
|
qpair,
|
|
|
|
nbdev_io,
|
|
|
|
&bdev_io->u.nvme_passthru.cmd,
|
|
|
|
bdev_io->u.nvme_passthru.buf,
|
|
|
|
bdev_io->u.nvme_passthru.nbytes);
|
|
|
|
break;
|
2017-11-14 06:33:11 +00:00
|
|
|
case SPDK_BDEV_IO_TYPE_NVME_IO_MD:
|
2021-06-01 17:49:43 +00:00
|
|
|
rc = bdev_nvme_io_passthru_md(ns,
|
2021-05-13 02:09:53 +00:00
|
|
|
qpair,
|
|
|
|
nbdev_io,
|
|
|
|
&bdev_io->u.nvme_passthru.cmd,
|
|
|
|
bdev_io->u.nvme_passthru.buf,
|
|
|
|
bdev_io->u.nvme_passthru.nbytes,
|
|
|
|
bdev_io->u.nvme_passthru.md_buf,
|
|
|
|
bdev_io->u.nvme_passthru.md_len);
|
|
|
|
break;
|
2020-07-08 08:03:49 +00:00
|
|
|
case SPDK_BDEV_IO_TYPE_ABORT:
|
|
|
|
nbdev_io_to_abort = (struct nvme_bdev_io *)bdev_io->u.abort.bio_to_abort->driver_ctx;
|
2021-07-06 17:35:01 +00:00
|
|
|
rc = bdev_nvme_abort(nbdev_ch,
|
2021-05-13 02:09:53 +00:00
|
|
|
nbdev_io,
|
|
|
|
nbdev_io_to_abort);
|
|
|
|
break;
|
2016-07-20 18:16:23 +00:00
|
|
|
default:
|
2021-05-13 02:09:53 +00:00
|
|
|
rc = -EINVAL;
|
|
|
|
break;
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
2017-09-15 23:46:53 +00:00
|
|
|
|
2021-05-13 02:09:53 +00:00
|
|
|
exit:
|
2017-09-15 23:46:53 +00:00
|
|
|
if (spdk_unlikely(rc != 0)) {
|
2021-04-20 22:54:59 +00:00
|
|
|
bdev_nvme_io_complete(nbdev_io, rc);
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-24 17:25:49 +00:00
|
|
|
static bool
|
2017-04-04 21:10:00 +00:00
|
|
|
bdev_nvme_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type)
|
2016-08-24 17:25:49 +00:00
|
|
|
{
|
2017-04-04 21:10:00 +00:00
|
|
|
struct nvme_bdev *nbdev = ctx;
|
2021-06-30 01:08:29 +00:00
|
|
|
struct nvme_ns *nvme_ns;
|
2021-01-10 18:15:22 +00:00
|
|
|
struct spdk_nvme_ns *ns;
|
|
|
|
struct spdk_nvme_ctrlr *ctrlr;
|
2016-08-24 17:25:49 +00:00
|
|
|
const struct spdk_nvme_ctrlr_data *cdata;
|
|
|
|
|
2021-05-28 23:22:36 +00:00
|
|
|
nvme_ns = nbdev->nvme_ns;
|
2021-01-10 18:15:22 +00:00
|
|
|
assert(nvme_ns != NULL);
|
|
|
|
ns = nvme_ns->ns;
|
|
|
|
ctrlr = spdk_nvme_ns_get_ctrlr(ns);
|
|
|
|
|
2016-08-24 17:25:49 +00:00
|
|
|
switch (io_type) {
|
|
|
|
case SPDK_BDEV_IO_TYPE_READ:
|
|
|
|
case SPDK_BDEV_IO_TYPE_WRITE:
|
|
|
|
case SPDK_BDEV_IO_TYPE_RESET:
|
|
|
|
case SPDK_BDEV_IO_TYPE_FLUSH:
|
2017-05-13 20:12:13 +00:00
|
|
|
case SPDK_BDEV_IO_TYPE_NVME_ADMIN:
|
2017-06-05 18:02:09 +00:00
|
|
|
case SPDK_BDEV_IO_TYPE_NVME_IO:
|
2020-07-08 08:03:49 +00:00
|
|
|
case SPDK_BDEV_IO_TYPE_ABORT:
|
2016-08-24 17:25:49 +00:00
|
|
|
return true;
|
|
|
|
|
2019-12-13 08:40:54 +00:00
|
|
|
case SPDK_BDEV_IO_TYPE_COMPARE:
|
2020-11-14 22:40:19 +00:00
|
|
|
return spdk_nvme_ns_supports_compare(ns);
|
2019-12-13 08:40:54 +00:00
|
|
|
|
2017-11-14 06:33:11 +00:00
|
|
|
case SPDK_BDEV_IO_TYPE_NVME_IO_MD:
|
2020-11-14 22:40:19 +00:00
|
|
|
return spdk_nvme_ns_get_md_size(ns) ? true : false;
|
2017-11-14 06:33:11 +00:00
|
|
|
|
2016-08-24 17:25:49 +00:00
|
|
|
case SPDK_BDEV_IO_TYPE_UNMAP:
|
2020-11-14 22:40:19 +00:00
|
|
|
cdata = spdk_nvme_ctrlr_get_data(ctrlr);
|
2016-08-24 17:25:49 +00:00
|
|
|
return cdata->oncs.dsm;
|
|
|
|
|
2017-07-28 20:01:14 +00:00
|
|
|
case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
|
2021-05-14 11:26:40 +00:00
|
|
|
cdata = spdk_nvme_ctrlr_get_data(ctrlr);
|
|
|
|
return cdata->oncs.write_zeroes;
|
2017-07-28 20:01:14 +00:00
|
|
|
|
2019-12-20 11:29:48 +00:00
|
|
|
case SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE:
|
2020-11-14 22:40:19 +00:00
|
|
|
if (spdk_nvme_ctrlr_get_flags(ctrlr) &
|
2019-12-20 11:29:48 +00:00
|
|
|
SPDK_NVME_CTRLR_COMPARE_AND_WRITE_SUPPORTED) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
|
2021-03-03 18:38:38 +00:00
|
|
|
case SPDK_BDEV_IO_TYPE_GET_ZONE_INFO:
|
|
|
|
case SPDK_BDEV_IO_TYPE_ZONE_MANAGEMENT:
|
|
|
|
return spdk_nvme_ns_get_csi(ns) == SPDK_NVME_CSI_ZNS;
|
|
|
|
|
|
|
|
case SPDK_BDEV_IO_TYPE_ZONE_APPEND:
|
|
|
|
return spdk_nvme_ns_get_csi(ns) == SPDK_NVME_CSI_ZNS &&
|
|
|
|
spdk_nvme_ctrlr_get_flags(ctrlr) & SPDK_NVME_CTRLR_ZONE_APPEND_SUPPORTED;
|
|
|
|
|
2016-08-24 17:25:49 +00:00
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-16 19:53:32 +00:00
|
|
|
static int
|
2021-07-07 01:02:14 +00:00
|
|
|
bdev_nvme_create_ctrlr_channel_cb(void *io_device, void *ctx_buf)
|
2016-09-16 19:53:32 +00:00
|
|
|
{
|
2021-07-06 19:42:41 +00:00
|
|
|
struct nvme_ctrlr *nvme_ctrlr = io_device;
|
2021-07-07 01:02:14 +00:00
|
|
|
struct nvme_ctrlr_channel *ctrlr_ch = ctx_buf;
|
2021-05-10 18:14:58 +00:00
|
|
|
struct spdk_io_channel *pg_ch;
|
2020-02-07 00:20:35 +00:00
|
|
|
int rc;
|
2017-06-15 16:59:02 +00:00
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
pg_ch = spdk_get_io_channel(&g_nvme_ctrlrs);
|
2020-02-07 00:20:35 +00:00
|
|
|
if (!pg_ch) {
|
2021-05-10 18:14:58 +00:00
|
|
|
return -1;
|
2020-02-07 00:20:35 +00:00
|
|
|
}
|
|
|
|
|
2021-07-07 01:02:14 +00:00
|
|
|
ctrlr_ch->group = spdk_io_channel_get_ctx(pg_ch);
|
2020-02-07 00:20:35 +00:00
|
|
|
|
|
|
|
#ifdef SPDK_CONFIG_VTUNE
|
2021-07-07 01:02:14 +00:00
|
|
|
ctrlr_ch->group->collect_spin_stat = true;
|
2020-02-07 00:20:35 +00:00
|
|
|
#else
|
2021-07-07 01:02:14 +00:00
|
|
|
ctrlr_ch->group->collect_spin_stat = false;
|
2020-02-07 00:20:35 +00:00
|
|
|
#endif
|
2019-11-08 21:26:09 +00:00
|
|
|
|
2021-07-07 01:02:14 +00:00
|
|
|
TAILQ_INIT(&ctrlr_ch->pending_resets);
|
2020-11-13 12:06:52 +00:00
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
ctrlr_ch->ctrlr = nvme_ctrlr;
|
2020-12-08 21:51:00 +00:00
|
|
|
|
2021-07-07 01:02:14 +00:00
|
|
|
rc = bdev_nvme_create_qpair(ctrlr_ch);
|
2020-11-13 12:06:52 +00:00
|
|
|
if (rc != 0) {
|
|
|
|
goto err_qpair;
|
|
|
|
}
|
|
|
|
|
2016-09-16 19:53:32 +00:00
|
|
|
return 0;
|
2020-02-07 00:20:35 +00:00
|
|
|
|
2020-11-13 12:06:52 +00:00
|
|
|
err_qpair:
|
2021-05-10 18:14:58 +00:00
|
|
|
spdk_put_io_channel(pg_ch);
|
2020-11-13 12:06:52 +00:00
|
|
|
|
|
|
|
return rc;
|
2016-09-16 19:53:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2021-07-07 01:02:14 +00:00
|
|
|
bdev_nvme_destroy_ctrlr_channel_cb(void *io_device, void *ctx_buf)
|
2016-09-16 19:53:32 +00:00
|
|
|
{
|
2021-07-07 01:02:14 +00:00
|
|
|
struct nvme_ctrlr_channel *ctrlr_ch = ctx_buf;
|
2020-02-07 00:20:35 +00:00
|
|
|
|
2021-07-07 01:02:14 +00:00
|
|
|
assert(ctrlr_ch->group != NULL);
|
2016-09-16 19:53:32 +00:00
|
|
|
|
2021-07-07 01:02:14 +00:00
|
|
|
bdev_nvme_destroy_qpair(ctrlr_ch);
|
2020-02-07 00:20:35 +00:00
|
|
|
|
2021-07-07 01:02:14 +00:00
|
|
|
spdk_put_io_channel(spdk_io_channel_from_ctx(ctrlr_ch->group));
|
2020-02-07 00:20:35 +00:00
|
|
|
}
|
|
|
|
|
2021-03-08 17:43:10 +00:00
|
|
|
static void
|
2021-07-06 17:20:32 +00:00
|
|
|
bdev_nvme_submit_accel_crc32c(void *ctx, uint32_t *dst, struct iovec *iov,
|
|
|
|
uint32_t iov_cnt, uint32_t seed,
|
|
|
|
spdk_nvme_accel_completion_cb cb_fn, void *cb_arg)
|
2021-03-08 17:43:10 +00:00
|
|
|
{
|
2021-07-06 17:20:32 +00:00
|
|
|
struct nvme_poll_group *group = ctx;
|
2021-03-08 17:43:10 +00:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
assert(group->accel_channel != NULL);
|
|
|
|
assert(cb_fn != NULL);
|
|
|
|
|
|
|
|
rc = spdk_accel_submit_crc32cv(group->accel_channel, dst, iov, iov_cnt, seed, cb_fn, cb_arg);
|
|
|
|
if (rc) {
|
|
|
|
/* For the two cases, spdk_accel_submit_crc32cv does not call the user's cb_fn */
|
|
|
|
if (rc == -ENOMEM || rc == -EINVAL) {
|
|
|
|
cb_fn(cb_arg, rc);
|
|
|
|
}
|
|
|
|
SPDK_ERRLOG("Cannot complete the accelerated crc32c operation with iov=%p\n", iov);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct spdk_nvme_accel_fn_table g_bdev_nvme_accel_fn_table = {
|
|
|
|
.table_size = sizeof(struct spdk_nvme_accel_fn_table),
|
2021-07-06 17:20:32 +00:00
|
|
|
.submit_accel_crc32c = bdev_nvme_submit_accel_crc32c,
|
2021-03-08 17:43:10 +00:00
|
|
|
};
|
|
|
|
|
2020-02-07 00:20:35 +00:00
|
|
|
static int
|
2021-07-06 17:20:32 +00:00
|
|
|
bdev_nvme_create_poll_group_cb(void *io_device, void *ctx_buf)
|
2020-02-07 00:20:35 +00:00
|
|
|
{
|
2021-07-06 17:20:32 +00:00
|
|
|
struct nvme_poll_group *group = ctx_buf;
|
2020-02-07 00:20:35 +00:00
|
|
|
|
2021-03-08 17:43:10 +00:00
|
|
|
group->group = spdk_nvme_poll_group_create(group, &g_bdev_nvme_accel_fn_table);
|
2020-02-07 00:20:35 +00:00
|
|
|
if (group->group == NULL) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2021-03-08 17:43:10 +00:00
|
|
|
group->accel_channel = spdk_accel_engine_get_io_channel();
|
|
|
|
if (!group->accel_channel) {
|
|
|
|
spdk_nvme_poll_group_destroy(group->group);
|
|
|
|
SPDK_ERRLOG("Cannot get the accel_channel for bdev nvme polling group=%p\n",
|
|
|
|
group);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-07-08 01:13:08 +00:00
|
|
|
group->poller = SPDK_POLLER_REGISTER(bdev_nvme_poll, group, g_opts.nvme_ioq_poll_period_us);
|
2020-02-07 00:20:35 +00:00
|
|
|
|
|
|
|
if (group->poller == NULL) {
|
2021-03-08 17:43:10 +00:00
|
|
|
spdk_put_io_channel(group->accel_channel);
|
2020-02-07 00:20:35 +00:00
|
|
|
spdk_nvme_poll_group_destroy(group->group);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2021-07-06 17:20:32 +00:00
|
|
|
bdev_nvme_destroy_poll_group_cb(void *io_device, void *ctx_buf)
|
2020-02-07 00:20:35 +00:00
|
|
|
{
|
2021-07-06 17:20:32 +00:00
|
|
|
struct nvme_poll_group *group = ctx_buf;
|
2020-02-07 00:20:35 +00:00
|
|
|
|
2021-03-08 17:43:10 +00:00
|
|
|
if (group->accel_channel) {
|
|
|
|
spdk_put_io_channel(group->accel_channel);
|
|
|
|
}
|
|
|
|
|
2020-02-07 00:20:35 +00:00
|
|
|
spdk_poller_unregister(&group->poller);
|
|
|
|
if (spdk_nvme_poll_group_destroy(group->group)) {
|
2021-03-24 03:35:32 +00:00
|
|
|
SPDK_ERRLOG("Unable to destroy a poll group for the NVMe bdev module.\n");
|
2020-02-07 00:20:35 +00:00
|
|
|
assert(false);
|
|
|
|
}
|
2016-09-16 19:53:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct spdk_io_channel *
|
2017-05-18 17:48:04 +00:00
|
|
|
bdev_nvme_get_io_channel(void *ctx)
|
2016-09-16 19:53:32 +00:00
|
|
|
{
|
2017-04-04 21:10:00 +00:00
|
|
|
struct nvme_bdev *nvme_bdev = ctx;
|
2016-09-16 19:53:32 +00:00
|
|
|
|
2021-07-06 17:35:01 +00:00
|
|
|
return spdk_get_io_channel(nvme_bdev);
|
2016-09-16 19:53:32 +00:00
|
|
|
}
|
|
|
|
|
2020-10-27 16:13:54 +00:00
|
|
|
static void *
|
|
|
|
bdev_nvme_get_module_ctx(void *ctx)
|
|
|
|
{
|
|
|
|
struct nvme_bdev *nvme_bdev = ctx;
|
|
|
|
|
|
|
|
return bdev_nvme_get_ctrlr(&nvme_bdev->disk);
|
|
|
|
}
|
|
|
|
|
2021-04-20 14:42:50 +00:00
|
|
|
static const char *
|
|
|
|
_nvme_ana_state_str(enum spdk_nvme_ana_state ana_state)
|
|
|
|
{
|
|
|
|
switch (ana_state) {
|
|
|
|
case SPDK_NVME_ANA_OPTIMIZED_STATE:
|
|
|
|
return "optimized";
|
|
|
|
case SPDK_NVME_ANA_NON_OPTIMIZED_STATE:
|
|
|
|
return "non_optimized";
|
|
|
|
case SPDK_NVME_ANA_INACCESSIBLE_STATE:
|
|
|
|
return "inaccessible";
|
|
|
|
case SPDK_NVME_ANA_PERSISTENT_LOSS_STATE:
|
|
|
|
return "persistent_loss";
|
|
|
|
case SPDK_NVME_ANA_CHANGE_STATE:
|
|
|
|
return "change";
|
|
|
|
default:
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-06 05:31:02 +00:00
|
|
|
static int
|
|
|
|
bdev_nvme_get_memory_domains(void *ctx, struct spdk_memory_domain **domains, int array_size)
|
|
|
|
{
|
|
|
|
struct nvme_bdev *nbdev = ctx;
|
|
|
|
struct spdk_memory_domain *domain;
|
|
|
|
|
|
|
|
domain = spdk_nvme_ctrlr_get_memory_domain(nbdev->nvme_ns->ctrlr->ctrlr);
|
|
|
|
|
|
|
|
if (domain) {
|
|
|
|
if (array_size > 0 && domains) {
|
|
|
|
domains[0] = domain;
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-11-18 17:22:58 +00:00
|
|
|
static int
|
2018-02-22 12:48:13 +00:00
|
|
|
bdev_nvme_dump_info_json(void *ctx, struct spdk_json_write_ctx *w)
|
2016-11-18 17:22:58 +00:00
|
|
|
{
|
2017-04-04 21:10:00 +00:00
|
|
|
struct nvme_bdev *nvme_bdev = ctx;
|
2021-06-30 01:08:29 +00:00
|
|
|
struct nvme_ns *nvme_ns;
|
2021-01-10 18:15:22 +00:00
|
|
|
struct spdk_nvme_ns *ns;
|
|
|
|
struct spdk_nvme_ctrlr *ctrlr;
|
2016-12-05 20:59:39 +00:00
|
|
|
const struct spdk_nvme_ctrlr_data *cdata;
|
2020-11-14 22:57:14 +00:00
|
|
|
const struct spdk_nvme_transport_id *trid;
|
2016-12-05 20:59:39 +00:00
|
|
|
union spdk_nvme_vs_register vs;
|
2016-12-12 23:57:20 +00:00
|
|
|
union spdk_nvme_csts_register csts;
|
2016-12-05 20:59:39 +00:00
|
|
|
char buf[128];
|
|
|
|
|
2021-05-28 23:22:36 +00:00
|
|
|
nvme_ns = nvme_bdev->nvme_ns;
|
2021-01-10 18:15:22 +00:00
|
|
|
assert(nvme_ns != NULL);
|
|
|
|
ns = nvme_ns->ns;
|
|
|
|
ctrlr = spdk_nvme_ns_get_ctrlr(ns);
|
|
|
|
|
2020-11-14 23:05:33 +00:00
|
|
|
cdata = spdk_nvme_ctrlr_get_data(ctrlr);
|
|
|
|
trid = spdk_nvme_ctrlr_get_transport_id(ctrlr);
|
|
|
|
vs = spdk_nvme_ctrlr_get_regs_vs(ctrlr);
|
|
|
|
csts = spdk_nvme_ctrlr_get_regs_csts(ctrlr);
|
2016-11-18 17:22:58 +00:00
|
|
|
|
2018-02-22 19:30:03 +00:00
|
|
|
spdk_json_write_named_object_begin(w, "nvme");
|
2016-11-18 17:22:58 +00:00
|
|
|
|
2020-11-14 22:57:14 +00:00
|
|
|
if (trid->trtype == SPDK_NVME_TRANSPORT_PCIE) {
|
|
|
|
spdk_json_write_named_string(w, "pci_address", trid->traddr);
|
2017-01-25 23:36:40 +00:00
|
|
|
}
|
|
|
|
|
2018-02-22 19:30:03 +00:00
|
|
|
spdk_json_write_named_object_begin(w, "trid");
|
2017-01-25 23:36:40 +00:00
|
|
|
|
2020-11-14 22:57:14 +00:00
|
|
|
nvme_bdev_dump_trid_json(trid, w);
|
2017-01-25 23:36:40 +00:00
|
|
|
|
|
|
|
spdk_json_write_object_end(w);
|
2016-12-05 20:59:39 +00:00
|
|
|
|
2019-10-24 18:09:47 +00:00
|
|
|
#ifdef SPDK_CONFIG_NVME_CUSE
|
2020-04-22 11:36:06 +00:00
|
|
|
size_t cuse_name_size = 128;
|
|
|
|
char cuse_name[cuse_name_size];
|
2019-10-24 18:09:47 +00:00
|
|
|
|
2020-11-14 23:05:33 +00:00
|
|
|
int rc = spdk_nvme_cuse_get_ns_name(ctrlr, spdk_nvme_ns_get_id(ns),
|
2020-04-22 11:36:06 +00:00
|
|
|
cuse_name, &cuse_name_size);
|
|
|
|
if (rc == 0) {
|
|
|
|
spdk_json_write_named_string(w, "cuse_device", cuse_name);
|
2019-10-24 18:09:47 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2018-02-22 19:30:03 +00:00
|
|
|
spdk_json_write_named_object_begin(w, "ctrlr_data");
|
2016-12-12 23:57:20 +00:00
|
|
|
|
2018-02-22 19:30:03 +00:00
|
|
|
spdk_json_write_named_string_fmt(w, "vendor_id", "0x%04x", cdata->vid);
|
2016-12-05 20:59:39 +00:00
|
|
|
|
|
|
|
snprintf(buf, sizeof(cdata->mn) + 1, "%s", cdata->mn);
|
|
|
|
spdk_str_trim(buf);
|
2018-02-22 19:30:03 +00:00
|
|
|
spdk_json_write_named_string(w, "model_number", buf);
|
2016-12-05 20:59:39 +00:00
|
|
|
|
|
|
|
snprintf(buf, sizeof(cdata->sn) + 1, "%s", cdata->sn);
|
|
|
|
spdk_str_trim(buf);
|
2018-02-22 19:30:03 +00:00
|
|
|
spdk_json_write_named_string(w, "serial_number", buf);
|
2016-12-05 20:59:39 +00:00
|
|
|
|
|
|
|
snprintf(buf, sizeof(cdata->fr) + 1, "%s", cdata->fr);
|
|
|
|
spdk_str_trim(buf);
|
2018-02-22 19:30:03 +00:00
|
|
|
spdk_json_write_named_string(w, "firmware_revision", buf);
|
2016-12-05 20:59:39 +00:00
|
|
|
|
2020-10-29 19:27:14 +00:00
|
|
|
if (cdata->subnqn[0] != '\0') {
|
|
|
|
spdk_json_write_named_string(w, "subnqn", cdata->subnqn);
|
|
|
|
}
|
|
|
|
|
2018-02-22 19:30:03 +00:00
|
|
|
spdk_json_write_named_object_begin(w, "oacs");
|
2017-01-03 22:47:32 +00:00
|
|
|
|
2018-02-22 19:30:03 +00:00
|
|
|
spdk_json_write_named_uint32(w, "security", cdata->oacs.security);
|
|
|
|
spdk_json_write_named_uint32(w, "format", cdata->oacs.format);
|
|
|
|
spdk_json_write_named_uint32(w, "firmware", cdata->oacs.firmware);
|
|
|
|
spdk_json_write_named_uint32(w, "ns_manage", cdata->oacs.ns_manage);
|
2017-01-03 22:47:32 +00:00
|
|
|
|
|
|
|
spdk_json_write_object_end(w);
|
|
|
|
|
2016-12-12 23:57:20 +00:00
|
|
|
spdk_json_write_object_end(w);
|
|
|
|
|
2018-02-22 19:30:03 +00:00
|
|
|
spdk_json_write_named_object_begin(w, "vs");
|
2016-12-12 23:57:20 +00:00
|
|
|
|
2016-12-06 22:25:28 +00:00
|
|
|
spdk_json_write_name(w, "nvme_version");
|
2016-12-05 20:59:39 +00:00
|
|
|
if (vs.bits.ter) {
|
2016-12-06 22:25:28 +00:00
|
|
|
spdk_json_write_string_fmt(w, "%u.%u.%u", vs.bits.mjr, vs.bits.mnr, vs.bits.ter);
|
|
|
|
} else {
|
|
|
|
spdk_json_write_string_fmt(w, "%u.%u", vs.bits.mjr, vs.bits.mnr);
|
2016-12-05 20:59:39 +00:00
|
|
|
}
|
|
|
|
|
2016-12-12 23:57:20 +00:00
|
|
|
spdk_json_write_object_end(w);
|
|
|
|
|
2018-02-22 19:30:03 +00:00
|
|
|
spdk_json_write_named_object_begin(w, "csts");
|
2016-12-12 23:57:20 +00:00
|
|
|
|
2018-02-22 19:30:03 +00:00
|
|
|
spdk_json_write_named_uint32(w, "rdy", csts.bits.rdy);
|
|
|
|
spdk_json_write_named_uint32(w, "cfs", csts.bits.cfs);
|
2016-12-12 23:57:20 +00:00
|
|
|
|
|
|
|
spdk_json_write_object_end(w);
|
|
|
|
|
2018-02-22 19:30:03 +00:00
|
|
|
spdk_json_write_named_object_begin(w, "ns_data");
|
2016-12-12 23:57:20 +00:00
|
|
|
|
2018-02-22 19:30:03 +00:00
|
|
|
spdk_json_write_named_uint32(w, "id", spdk_nvme_ns_get_id(ns));
|
2016-12-05 20:59:39 +00:00
|
|
|
|
2021-04-20 14:42:50 +00:00
|
|
|
if (cdata->cmic.ana_reporting) {
|
|
|
|
spdk_json_write_named_string(w, "ana_state",
|
2021-07-02 07:03:59 +00:00
|
|
|
_nvme_ana_state_str(nvme_ns->ana_state));
|
2021-04-20 14:42:50 +00:00
|
|
|
}
|
|
|
|
|
2016-11-18 17:22:58 +00:00
|
|
|
spdk_json_write_object_end(w);
|
|
|
|
|
2019-07-19 15:26:53 +00:00
|
|
|
if (cdata->oacs.security) {
|
|
|
|
spdk_json_write_named_object_begin(w, "security");
|
|
|
|
|
2021-01-05 22:47:47 +00:00
|
|
|
spdk_json_write_named_bool(w, "opal", nvme_bdev->opal);
|
2019-07-19 15:26:53 +00:00
|
|
|
|
|
|
|
spdk_json_write_object_end(w);
|
|
|
|
}
|
|
|
|
|
2016-12-12 23:57:20 +00:00
|
|
|
spdk_json_write_object_end(w);
|
|
|
|
|
2016-11-18 17:22:58 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-04-05 15:43:55 +00:00
|
|
|
static void
|
|
|
|
bdev_nvme_write_config_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w)
|
|
|
|
{
|
|
|
|
/* No config per bdev needed */
|
|
|
|
}
|
|
|
|
|
2017-06-15 16:59:02 +00:00
|
|
|
static uint64_t
|
|
|
|
bdev_nvme_get_spin_time(struct spdk_io_channel *ch)
|
|
|
|
{
|
2021-07-06 17:35:01 +00:00
|
|
|
struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
|
|
|
|
struct nvme_ctrlr_channel *ctrlr_ch = nbdev_ch->ctrlr_ch;
|
2021-07-06 17:20:32 +00:00
|
|
|
struct nvme_poll_group *group = ctrlr_ch->group;
|
2017-06-15 16:59:02 +00:00
|
|
|
uint64_t spin_time;
|
|
|
|
|
2020-02-07 00:20:35 +00:00
|
|
|
if (!group || !group->collect_spin_stat) {
|
2017-06-15 16:59:02 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-02-07 00:20:35 +00:00
|
|
|
if (group->end_ticks != 0) {
|
|
|
|
group->spin_ticks += (group->end_ticks - group->start_ticks);
|
|
|
|
group->end_ticks = 0;
|
2017-06-15 16:59:02 +00:00
|
|
|
}
|
|
|
|
|
2020-02-07 00:20:35 +00:00
|
|
|
spin_time = (group->spin_ticks * 1000000ULL) / spdk_get_ticks_hz();
|
|
|
|
group->start_ticks = 0;
|
|
|
|
group->spin_ticks = 0;
|
2017-06-15 16:59:02 +00:00
|
|
|
|
|
|
|
return spin_time;
|
|
|
|
}
|
|
|
|
|
2016-08-30 19:56:06 +00:00
|
|
|
static const struct spdk_bdev_fn_table nvmelib_fn_table = {
|
2017-02-28 17:51:25 +00:00
|
|
|
.destruct = bdev_nvme_destruct,
|
|
|
|
.submit_request = bdev_nvme_submit_request,
|
|
|
|
.io_type_supported = bdev_nvme_io_type_supported,
|
|
|
|
.get_io_channel = bdev_nvme_get_io_channel,
|
2018-02-22 12:48:13 +00:00
|
|
|
.dump_info_json = bdev_nvme_dump_info_json,
|
2018-04-05 15:43:55 +00:00
|
|
|
.write_config_json = bdev_nvme_write_config_json,
|
2017-06-15 16:59:02 +00:00
|
|
|
.get_spin_time = bdev_nvme_get_spin_time,
|
2020-10-27 16:13:54 +00:00
|
|
|
.get_module_ctx = bdev_nvme_get_module_ctx,
|
2021-08-06 05:31:02 +00:00
|
|
|
.get_memory_domains = bdev_nvme_get_memory_domains,
|
2016-07-20 18:16:23 +00:00
|
|
|
};
|
|
|
|
|
2021-07-05 11:07:14 +00:00
|
|
|
typedef int (*bdev_nvme_parse_ana_log_page_cb)(
|
|
|
|
const struct spdk_nvme_ana_group_descriptor *desc, void *cb_arg);
|
|
|
|
|
|
|
|
static int
|
|
|
|
bdev_nvme_parse_ana_log_page(struct nvme_ctrlr *nvme_ctrlr,
|
|
|
|
bdev_nvme_parse_ana_log_page_cb cb_fn, void *cb_arg)
|
|
|
|
{
|
|
|
|
struct spdk_nvme_ana_group_descriptor *copied_desc;
|
|
|
|
uint8_t *orig_desc;
|
|
|
|
uint32_t i, desc_size, copy_len;
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
if (nvme_ctrlr->ana_log_page == NULL) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
copied_desc = nvme_ctrlr->copied_ana_desc;
|
|
|
|
|
|
|
|
orig_desc = (uint8_t *)nvme_ctrlr->ana_log_page + sizeof(struct spdk_nvme_ana_page);
|
|
|
|
copy_len = nvme_ctrlr->ana_log_page_size - sizeof(struct spdk_nvme_ana_page);
|
|
|
|
|
|
|
|
for (i = 0; i < nvme_ctrlr->ana_log_page->num_ana_group_desc; i++) {
|
|
|
|
memcpy(copied_desc, orig_desc, copy_len);
|
|
|
|
|
|
|
|
rc = cb_fn(copied_desc, cb_arg);
|
|
|
|
if (rc != 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
desc_size = sizeof(struct spdk_nvme_ana_group_descriptor) +
|
|
|
|
copied_desc->num_of_nsid * sizeof(uint32_t);
|
|
|
|
orig_desc += desc_size;
|
|
|
|
copy_len -= desc_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nvme_ns_set_ana_state(const struct spdk_nvme_ana_group_descriptor *desc, void *cb_arg)
|
|
|
|
{
|
|
|
|
struct nvme_ns *nvme_ns = cb_arg;
|
|
|
|
uint32_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < desc->num_of_nsid; i++) {
|
|
|
|
if (desc->nsid[i] != spdk_nvme_ns_get_id(nvme_ns->ns)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
nvme_ns->ana_group_id = desc->ana_group_id;
|
|
|
|
nvme_ns->ana_state = desc->ana_state;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-12-27 09:20:01 +00:00
|
|
|
static int
|
|
|
|
nvme_disk_create(struct spdk_bdev *disk, const char *base_name,
|
|
|
|
struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns *ns,
|
|
|
|
uint32_t prchk_flags, void *ctx)
|
2018-06-13 05:06:40 +00:00
|
|
|
{
|
2020-11-03 23:36:48 +00:00
|
|
|
const struct spdk_uuid *uuid;
|
2021-07-02 12:07:15 +00:00
|
|
|
const uint8_t *nguid;
|
2018-06-13 05:06:40 +00:00
|
|
|
const struct spdk_nvme_ctrlr_data *cdata;
|
2020-11-03 23:36:48 +00:00
|
|
|
const struct spdk_nvme_ns_data *nsdata;
|
2021-03-03 18:38:38 +00:00
|
|
|
enum spdk_nvme_csi csi;
|
2021-06-07 14:02:38 +00:00
|
|
|
uint32_t atomic_bs, phys_bs, bs;
|
2018-06-13 05:06:40 +00:00
|
|
|
|
|
|
|
cdata = spdk_nvme_ctrlr_get_data(ctrlr);
|
2021-03-03 18:38:38 +00:00
|
|
|
csi = spdk_nvme_ns_get_csi(ns);
|
|
|
|
|
|
|
|
switch (csi) {
|
|
|
|
case SPDK_NVME_CSI_NVM:
|
|
|
|
disk->product_name = "NVMe disk";
|
|
|
|
break;
|
|
|
|
case SPDK_NVME_CSI_ZNS:
|
|
|
|
disk->product_name = "NVMe ZNS disk";
|
|
|
|
disk->zoned = true;
|
|
|
|
disk->zone_size = spdk_nvme_zns_ns_get_zone_size_sectors(ns);
|
|
|
|
disk->max_zone_append_size = spdk_nvme_zns_ctrlr_get_max_zone_append_size(ctrlr) /
|
|
|
|
spdk_nvme_ns_get_extended_sector_size(ns);
|
|
|
|
disk->max_open_zones = spdk_nvme_zns_ns_get_max_open_zones(ns);
|
|
|
|
disk->max_active_zones = spdk_nvme_zns_ns_get_max_active_zones(ns);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
SPDK_ERRLOG("unsupported CSI: %u\n", csi);
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
2018-06-13 05:06:40 +00:00
|
|
|
|
2020-12-27 09:20:01 +00:00
|
|
|
disk->name = spdk_sprintf_alloc("%sn%d", base_name, spdk_nvme_ns_get_id(ns));
|
|
|
|
if (!disk->name) {
|
|
|
|
return -ENOMEM;
|
2019-10-14 12:35:11 +00:00
|
|
|
}
|
2018-06-13 05:06:40 +00:00
|
|
|
|
2020-12-27 09:20:01 +00:00
|
|
|
disk->write_cache = 0;
|
2018-06-13 05:06:40 +00:00
|
|
|
if (cdata->vwc.present) {
|
|
|
|
/* Enable if the Volatile Write Cache exists */
|
2020-12-27 09:20:01 +00:00
|
|
|
disk->write_cache = 1;
|
2018-06-13 05:06:40 +00:00
|
|
|
}
|
2021-05-14 11:26:40 +00:00
|
|
|
if (cdata->oncs.write_zeroes) {
|
|
|
|
disk->max_write_zeroes = UINT16_MAX + 1;
|
|
|
|
}
|
2020-12-27 09:20:01 +00:00
|
|
|
disk->blocklen = spdk_nvme_ns_get_extended_sector_size(ns);
|
|
|
|
disk->blockcnt = spdk_nvme_ns_get_num_sectors(ns);
|
|
|
|
disk->optimal_io_boundary = spdk_nvme_ns_get_optimal_io_boundary(ns);
|
2018-06-13 05:06:40 +00:00
|
|
|
|
2021-07-02 12:07:15 +00:00
|
|
|
nguid = spdk_nvme_ns_get_nguid(ns);
|
|
|
|
if (!nguid) {
|
|
|
|
uuid = spdk_nvme_ns_get_uuid(ns);
|
|
|
|
if (uuid) {
|
|
|
|
disk->uuid = *uuid;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
memcpy(&disk->uuid, nguid, sizeof(disk->uuid));
|
2018-06-13 05:06:40 +00:00
|
|
|
}
|
|
|
|
|
2019-12-13 09:56:17 +00:00
|
|
|
nsdata = spdk_nvme_ns_get_data(ns);
|
2021-06-07 14:02:38 +00:00
|
|
|
bs = spdk_nvme_ns_get_sector_size(ns);
|
|
|
|
atomic_bs = bs;
|
|
|
|
phys_bs = bs;
|
|
|
|
if (nsdata->nabo == 0) {
|
|
|
|
if (nsdata->nsfeat.ns_atomic_write_unit && nsdata->nawupf) {
|
|
|
|
atomic_bs = bs * (1 + nsdata->nawupf);
|
|
|
|
} else {
|
|
|
|
atomic_bs = bs * (1 + cdata->awupf);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (nsdata->nsfeat.optperf) {
|
|
|
|
phys_bs = bs * (1 + nsdata->npwg);
|
|
|
|
}
|
|
|
|
disk->phys_blocklen = spdk_min(phys_bs, atomic_bs);
|
2019-12-13 09:56:17 +00:00
|
|
|
|
2020-12-27 09:20:01 +00:00
|
|
|
disk->md_len = spdk_nvme_ns_get_md_size(ns);
|
|
|
|
if (disk->md_len != 0) {
|
|
|
|
disk->md_interleave = nsdata->flbas.extended;
|
|
|
|
disk->dif_type = (enum spdk_dif_type)spdk_nvme_ns_get_pi_type(ns);
|
|
|
|
if (disk->dif_type != SPDK_DIF_DISABLE) {
|
|
|
|
disk->dif_is_head_of_md = nsdata->dps.md_start;
|
|
|
|
disk->dif_check_flags = prchk_flags;
|
2019-02-05 06:00:52 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-19 07:00:45 +00:00
|
|
|
if (!(spdk_nvme_ctrlr_get_flags(ctrlr) &
|
|
|
|
SPDK_NVME_CTRLR_COMPARE_AND_WRITE_SUPPORTED)) {
|
2020-12-27 09:20:01 +00:00
|
|
|
disk->acwu = 0;
|
2019-12-13 09:56:17 +00:00
|
|
|
} else if (nsdata->nsfeat.ns_atomic_write_unit) {
|
2020-12-27 09:20:01 +00:00
|
|
|
disk->acwu = nsdata->nacwu;
|
2019-12-13 09:56:17 +00:00
|
|
|
} else {
|
2020-12-27 09:20:01 +00:00
|
|
|
disk->acwu = cdata->acwu;
|
2019-12-13 09:56:17 +00:00
|
|
|
}
|
|
|
|
|
2020-12-27 09:20:01 +00:00
|
|
|
disk->ctxt = ctx;
|
|
|
|
disk->fn_table = &nvmelib_fn_table;
|
|
|
|
disk->module = &nvme_if;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-12-21 02:19:46 +00:00
|
|
|
static int
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_bdev_create(struct nvme_ctrlr *nvme_ctrlr, struct nvme_ns *nvme_ns)
|
2020-12-27 09:20:01 +00:00
|
|
|
{
|
|
|
|
struct nvme_bdev *bdev;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
bdev = calloc(1, sizeof(*bdev));
|
|
|
|
if (!bdev) {
|
|
|
|
SPDK_ERRLOG("bdev calloc() failed\n");
|
2020-12-21 02:19:46 +00:00
|
|
|
return -ENOMEM;
|
2020-12-27 09:20:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bdev->nvme_ns = nvme_ns;
|
2021-07-06 19:42:41 +00:00
|
|
|
bdev->opal = nvme_ctrlr->opal_dev != NULL;
|
2020-12-27 09:20:01 +00:00
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
rc = nvme_disk_create(&bdev->disk, nvme_ctrlr->name, nvme_ctrlr->ctrlr,
|
|
|
|
nvme_ns->ns, nvme_ctrlr->prchk_flags, bdev);
|
2020-12-27 09:20:01 +00:00
|
|
|
if (rc != 0) {
|
|
|
|
SPDK_ERRLOG("Failed to create NVMe disk\n");
|
2019-10-14 12:35:11 +00:00
|
|
|
free(bdev);
|
2020-12-21 02:19:46 +00:00
|
|
|
return rc;
|
2020-11-03 23:36:48 +00:00
|
|
|
}
|
|
|
|
|
2021-07-06 17:35:01 +00:00
|
|
|
spdk_io_device_register(bdev,
|
|
|
|
bdev_nvme_create_bdev_channel_cb,
|
|
|
|
bdev_nvme_destroy_bdev_channel_cb,
|
|
|
|
sizeof(struct nvme_bdev_channel),
|
|
|
|
bdev->disk.name);
|
|
|
|
|
|
|
|
rc = spdk_bdev_register(&bdev->disk);
|
|
|
|
if (rc != 0) {
|
|
|
|
SPDK_ERRLOG("spdk_bdev_register() failed\n");
|
|
|
|
spdk_io_device_unregister(bdev, NULL);
|
|
|
|
free(bdev->disk.name);
|
|
|
|
free(bdev);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2021-02-17 16:07:25 +00:00
|
|
|
nvme_ns->bdev = bdev;
|
2021-01-04 07:50:15 +00:00
|
|
|
|
2020-12-21 02:19:46 +00:00
|
|
|
return 0;
|
2020-11-03 23:36:48 +00:00
|
|
|
}
|
|
|
|
|
2021-05-12 07:22:58 +00:00
|
|
|
static bool
|
|
|
|
bdev_nvme_compare_ns(struct spdk_nvme_ns *ns1, struct spdk_nvme_ns *ns2)
|
|
|
|
{
|
|
|
|
const struct spdk_nvme_ns_data *nsdata1, *nsdata2;
|
2021-05-16 21:47:51 +00:00
|
|
|
const struct spdk_uuid *uuid1, *uuid2;
|
2021-05-12 07:22:58 +00:00
|
|
|
|
|
|
|
nsdata1 = spdk_nvme_ns_get_data(ns1);
|
|
|
|
nsdata2 = spdk_nvme_ns_get_data(ns2);
|
2021-05-16 21:47:51 +00:00
|
|
|
uuid1 = spdk_nvme_ns_get_uuid(ns1);
|
|
|
|
uuid2 = spdk_nvme_ns_get_uuid(ns2);
|
2021-05-12 07:22:58 +00:00
|
|
|
|
2021-05-16 21:47:51 +00:00
|
|
|
return memcmp(nsdata1->nguid, nsdata2->nguid, sizeof(nsdata1->nguid)) == 0 &&
|
|
|
|
nsdata1->eui64 == nsdata2->eui64 &&
|
|
|
|
uuid1 != NULL && uuid2 != NULL && spdk_uuid_compare(uuid1, uuid2) == 0;
|
2021-05-12 07:22:58 +00:00
|
|
|
}
|
|
|
|
|
2017-03-01 01:23:53 +00:00
|
|
|
static bool
|
|
|
|
hotplug_probe_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
|
|
|
|
struct spdk_nvme_ctrlr_opts *opts)
|
|
|
|
{
|
2019-02-13 05:08:22 +00:00
|
|
|
struct nvme_probe_skip_entry *entry;
|
|
|
|
|
|
|
|
TAILQ_FOREACH(entry, &g_skipped_nvme_ctrlrs, tailq) {
|
|
|
|
if (spdk_nvme_transport_id_compare(trid, &entry->trid) == 0) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-03 03:48:49 +00:00
|
|
|
opts->arbitration_burst = (uint8_t)g_opts.arbitration_burst;
|
|
|
|
opts->low_priority_weight = (uint8_t)g_opts.low_priority_weight;
|
|
|
|
opts->medium_priority_weight = (uint8_t)g_opts.medium_priority_weight;
|
|
|
|
opts->high_priority_weight = (uint8_t)g_opts.high_priority_weight;
|
2021-07-07 15:27:55 +00:00
|
|
|
opts->disable_read_ana_log_page = true;
|
2019-09-03 03:48:49 +00:00
|
|
|
|
2020-09-04 11:27:29 +00:00
|
|
|
SPDK_DEBUGLOG(bdev_nvme, "Attaching to %s\n", trid->traddr);
|
2017-03-01 01:23:53 +00:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-03-30 19:49:52 +00:00
|
|
|
static void
|
2020-05-10 07:46:07 +00:00
|
|
|
nvme_abort_cpl(void *ctx, const struct spdk_nvme_cpl *cpl)
|
2017-03-30 19:49:52 +00:00
|
|
|
{
|
2021-07-06 19:42:41 +00:00
|
|
|
struct nvme_ctrlr *nvme_ctrlr = ctx;
|
2017-03-30 19:49:52 +00:00
|
|
|
|
|
|
|
if (spdk_nvme_cpl_is_error(cpl)) {
|
2021-03-24 03:35:32 +00:00
|
|
|
SPDK_WARNLOG("Abort failed. Resetting controller. sc is %u, sct is %u.\n", cpl->status.sc,
|
|
|
|
cpl->status.sct);
|
2021-07-05 11:03:54 +00:00
|
|
|
bdev_nvme_reset(nvme_ctrlr);
|
2021-08-26 15:57:34 +00:00
|
|
|
} else if (cpl->cdw0 & 0x1) {
|
|
|
|
SPDK_WARNLOG("Specified command could not be aborted.\n");
|
|
|
|
bdev_nvme_reset(nvme_ctrlr);
|
2017-03-30 19:49:52 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-10 22:43:18 +00:00
|
|
|
static void
|
2017-02-28 17:51:25 +00:00
|
|
|
timeout_cb(void *cb_arg, struct spdk_nvme_ctrlr *ctrlr,
|
|
|
|
struct spdk_nvme_qpair *qpair, uint16_t cid)
|
2016-12-10 22:43:18 +00:00
|
|
|
{
|
2021-07-06 19:42:41 +00:00
|
|
|
struct nvme_ctrlr *nvme_ctrlr = cb_arg;
|
2018-08-07 00:17:42 +00:00
|
|
|
union spdk_nvme_csts_register csts;
|
2020-10-22 12:26:25 +00:00
|
|
|
int rc;
|
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
assert(nvme_ctrlr->ctrlr == ctrlr);
|
2016-12-10 22:43:18 +00:00
|
|
|
|
2017-02-23 16:24:58 +00:00
|
|
|
SPDK_WARNLOG("Warning: Detected a timeout. ctrlr=%p qpair=%p cid=%u\n", ctrlr, qpair, cid);
|
2016-12-10 22:43:18 +00:00
|
|
|
|
2020-12-09 23:11:08 +00:00
|
|
|
/* Only try to read CSTS if it's a PCIe controller or we have a timeout on an I/O
|
|
|
|
* queue. (Note: qpair == NULL when there's an admin cmd timeout.) Otherwise we
|
|
|
|
* would submit another fabrics cmd on the admin queue to read CSTS and check for its
|
|
|
|
* completion recursively.
|
|
|
|
*/
|
2021-07-06 19:42:41 +00:00
|
|
|
if (nvme_ctrlr->connected_trid->trtype == SPDK_NVME_TRANSPORT_PCIE || qpair != NULL) {
|
2020-12-09 23:11:08 +00:00
|
|
|
csts = spdk_nvme_ctrlr_get_regs_csts(ctrlr);
|
|
|
|
if (csts.bits.cfs) {
|
|
|
|
SPDK_ERRLOG("Controller Fatal Status, reset required\n");
|
2021-07-05 11:03:54 +00:00
|
|
|
bdev_nvme_reset(nvme_ctrlr);
|
2020-12-09 23:11:08 +00:00
|
|
|
return;
|
|
|
|
}
|
2018-08-07 00:17:42 +00:00
|
|
|
}
|
|
|
|
|
2018-07-09 21:04:33 +00:00
|
|
|
switch (g_opts.action_on_timeout) {
|
|
|
|
case SPDK_BDEV_NVME_TIMEOUT_ACTION_ABORT:
|
2017-03-30 19:49:52 +00:00
|
|
|
if (qpair) {
|
2021-03-24 03:35:32 +00:00
|
|
|
/* Don't send abort to ctrlr when reset is running. */
|
2021-07-06 19:42:41 +00:00
|
|
|
pthread_mutex_lock(&nvme_ctrlr->mutex);
|
|
|
|
if (nvme_ctrlr->resetting) {
|
|
|
|
pthread_mutex_unlock(&nvme_ctrlr->mutex);
|
2021-03-24 03:35:32 +00:00
|
|
|
SPDK_NOTICELOG("Quit abort. Ctrlr is in the process of reseting.\n");
|
|
|
|
return;
|
|
|
|
}
|
2021-07-06 19:42:41 +00:00
|
|
|
pthread_mutex_unlock(&nvme_ctrlr->mutex);
|
2021-03-24 03:35:32 +00:00
|
|
|
|
2017-03-30 19:49:52 +00:00
|
|
|
rc = spdk_nvme_ctrlr_cmd_abort(ctrlr, qpair, cid,
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_abort_cpl, nvme_ctrlr);
|
2017-03-30 19:49:52 +00:00
|
|
|
if (rc == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-03-24 03:35:32 +00:00
|
|
|
SPDK_ERRLOG("Unable to send abort. Resetting, rc is %d.\n", rc);
|
2017-03-30 19:49:52 +00:00
|
|
|
}
|
|
|
|
|
2017-06-02 06:32:45 +00:00
|
|
|
/* FALLTHROUGH */
|
2018-07-09 21:04:33 +00:00
|
|
|
case SPDK_BDEV_NVME_TIMEOUT_ACTION_RESET:
|
2021-07-05 11:03:54 +00:00
|
|
|
bdev_nvme_reset(nvme_ctrlr);
|
2017-03-30 19:49:52 +00:00
|
|
|
break;
|
2018-07-09 21:04:33 +00:00
|
|
|
case SPDK_BDEV_NVME_TIMEOUT_ACTION_NONE:
|
2020-09-04 11:27:29 +00:00
|
|
|
SPDK_DEBUGLOG(bdev_nvme, "No action for nvme controller timeout.\n");
|
2018-10-23 10:41:37 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
SPDK_ERRLOG("An invalid timeout action value is found.\n");
|
2017-03-30 19:49:52 +00:00
|
|
|
break;
|
2016-12-10 22:43:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-21 01:10:30 +00:00
|
|
|
static void
|
2021-08-27 20:31:04 +00:00
|
|
|
nvme_ctrlr_populate_namespace(struct nvme_ctrlr *nvme_ctrlr, struct nvme_ns *nvme_ns,
|
|
|
|
struct nvme_async_probe_ctx *ctx)
|
2018-06-21 01:10:30 +00:00
|
|
|
{
|
2021-08-27 20:31:04 +00:00
|
|
|
struct spdk_nvme_ctrlr *ctrlr = nvme_ctrlr->ctrlr;
|
|
|
|
struct spdk_nvme_ns *ns;
|
|
|
|
int rc = 0;
|
2019-10-17 11:33:25 +00:00
|
|
|
|
2021-08-27 20:31:04 +00:00
|
|
|
ns = spdk_nvme_ctrlr_get_ns(ctrlr, nvme_ns->id);
|
|
|
|
if (!ns) {
|
|
|
|
SPDK_DEBUGLOG(bdev_nvme, "Invalid NS %d\n", nvme_ns->id);
|
|
|
|
rc = -EINVAL;
|
|
|
|
goto done;
|
2019-10-17 11:33:25 +00:00
|
|
|
}
|
|
|
|
|
2021-08-27 20:31:04 +00:00
|
|
|
nvme_ns->ns = ns;
|
|
|
|
nvme_ns->populated = true;
|
|
|
|
nvme_ns->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
|
2018-06-21 01:10:30 +00:00
|
|
|
|
2021-08-27 20:31:04 +00:00
|
|
|
if (nvme_ctrlr->ana_log_page != NULL) {
|
|
|
|
bdev_nvme_parse_ana_log_page(nvme_ctrlr, nvme_ns_set_ana_state, nvme_ns);
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = nvme_bdev_create(nvme_ctrlr, nvme_ns);
|
|
|
|
done:
|
|
|
|
nvme_ctrlr_populate_namespace_done(ctx, nvme_ns, rc);
|
2019-11-25 20:16:24 +00:00
|
|
|
}
|
|
|
|
|
2020-10-13 07:48:13 +00:00
|
|
|
static void
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ctrlr_depopulate_namespace(struct nvme_ctrlr *ctrlr, struct nvme_ns *nvme_ns)
|
2019-11-25 20:16:24 +00:00
|
|
|
{
|
2021-08-27 20:31:04 +00:00
|
|
|
struct nvme_bdev *bdev;
|
|
|
|
|
|
|
|
bdev = nvme_ns->bdev;
|
|
|
|
if (bdev != NULL) {
|
|
|
|
spdk_bdev_unregister(&bdev->disk, NULL, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
nvme_ctrlr_depopulate_namespace_done(nvme_ns);
|
2019-11-25 20:16:24 +00:00
|
|
|
}
|
|
|
|
|
2019-11-26 18:39:01 +00:00
|
|
|
void
|
|
|
|
nvme_ctrlr_populate_namespace_done(struct nvme_async_probe_ctx *ctx,
|
2021-06-30 01:08:29 +00:00
|
|
|
struct nvme_ns *nvme_ns, int rc)
|
2019-11-26 18:39:01 +00:00
|
|
|
{
|
2021-07-06 19:42:41 +00:00
|
|
|
struct nvme_ctrlr *nvme_ctrlr = nvme_ns->ctrlr;
|
2021-03-01 22:39:39 +00:00
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
assert(nvme_ctrlr != NULL);
|
2021-03-01 22:39:39 +00:00
|
|
|
|
2019-11-26 18:39:01 +00:00
|
|
|
if (rc == 0) {
|
2021-07-06 19:42:41 +00:00
|
|
|
pthread_mutex_lock(&nvme_ctrlr->mutex);
|
|
|
|
nvme_ctrlr->ref++;
|
|
|
|
pthread_mutex_unlock(&nvme_ctrlr->mutex);
|
2019-11-26 18:39:01 +00:00
|
|
|
} else {
|
2020-11-25 14:52:16 +00:00
|
|
|
memset(nvme_ns, 0, sizeof(*nvme_ns));
|
2019-11-26 18:39:01 +00:00
|
|
|
}
|
2019-11-26 18:11:29 +00:00
|
|
|
|
|
|
|
if (ctx) {
|
|
|
|
ctx->populates_in_progress--;
|
|
|
|
if (ctx->populates_in_progress == 0) {
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ctrlr_populate_namespaces_done(nvme_ctrlr, ctx);
|
2019-11-26 18:11:29 +00:00
|
|
|
}
|
|
|
|
}
|
2019-11-26 18:39:01 +00:00
|
|
|
}
|
|
|
|
|
2018-06-21 01:10:30 +00:00
|
|
|
static void
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ctrlr_populate_namespaces(struct nvme_ctrlr *nvme_ctrlr,
|
2019-11-26 16:55:40 +00:00
|
|
|
struct nvme_async_probe_ctx *ctx)
|
2018-06-21 01:10:30 +00:00
|
|
|
{
|
2021-07-06 19:42:41 +00:00
|
|
|
struct spdk_nvme_ctrlr *ctrlr = nvme_ctrlr->ctrlr;
|
2021-06-30 01:08:29 +00:00
|
|
|
struct nvme_ns *nvme_ns;
|
2020-11-25 14:52:16 +00:00
|
|
|
struct spdk_nvme_ns *ns;
|
2020-02-13 07:34:09 +00:00
|
|
|
struct nvme_bdev *bdev;
|
2018-06-21 01:10:30 +00:00
|
|
|
uint32_t i;
|
2020-02-13 07:34:09 +00:00
|
|
|
int rc;
|
|
|
|
uint64_t num_sectors;
|
|
|
|
bool ns_is_active;
|
2019-11-26 18:11:29 +00:00
|
|
|
|
|
|
|
if (ctx) {
|
|
|
|
/* Initialize this count to 1 to handle the populate functions
|
|
|
|
* calling nvme_ctrlr_populate_namespace_done() immediately.
|
|
|
|
*/
|
|
|
|
ctx->populates_in_progress = 1;
|
|
|
|
}
|
2018-06-21 01:10:30 +00:00
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
for (i = 0; i < nvme_ctrlr->num_ns; i++) {
|
2018-06-21 01:10:30 +00:00
|
|
|
uint32_t nsid = i + 1;
|
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ns = nvme_ctrlr->namespaces[i];
|
2020-02-13 07:34:09 +00:00
|
|
|
ns_is_active = spdk_nvme_ctrlr_is_active_ns(ctrlr, nsid);
|
|
|
|
|
2021-06-30 01:08:29 +00:00
|
|
|
if (nvme_ns->populated && ns_is_active && nvme_ns->type == NVME_NS_STANDARD) {
|
2020-02-13 07:34:09 +00:00
|
|
|
/* NS is still there but attributes may have changed */
|
2020-11-25 14:52:16 +00:00
|
|
|
ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
|
|
|
|
num_sectors = spdk_nvme_ns_get_num_sectors(ns);
|
2021-03-26 14:29:54 +00:00
|
|
|
bdev = nvme_ns->bdev;
|
2021-01-06 16:21:06 +00:00
|
|
|
assert(bdev != NULL);
|
2020-02-13 07:34:09 +00:00
|
|
|
if (bdev->disk.blockcnt != num_sectors) {
|
2020-11-17 16:29:21 +00:00
|
|
|
SPDK_NOTICELOG("NSID %u is resized: bdev name %s, old size %" PRIu64 ", new size %" PRIu64 "\n",
|
2020-02-13 07:34:09 +00:00
|
|
|
nsid,
|
|
|
|
bdev->disk.name,
|
|
|
|
bdev->disk.blockcnt,
|
|
|
|
num_sectors);
|
|
|
|
rc = spdk_bdev_notify_blockcnt_change(&bdev->disk, num_sectors);
|
|
|
|
if (rc != 0) {
|
|
|
|
SPDK_ERRLOG("Could not change num blocks for nvme bdev: name %s, errno: %d.\n",
|
|
|
|
bdev->disk.name, rc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-25 14:52:16 +00:00
|
|
|
if (!nvme_ns->populated && ns_is_active) {
|
|
|
|
nvme_ns->id = nsid;
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ns->ctrlr = nvme_ctrlr;
|
2021-08-27 20:23:56 +00:00
|
|
|
nvme_ns->type = NVME_NS_STANDARD;
|
2019-10-14 12:35:11 +00:00
|
|
|
|
2021-02-17 16:07:25 +00:00
|
|
|
nvme_ns->bdev = NULL;
|
2019-10-17 11:33:25 +00:00
|
|
|
|
2019-11-26 18:11:29 +00:00
|
|
|
if (ctx) {
|
|
|
|
ctx->populates_in_progress++;
|
|
|
|
}
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ctrlr_populate_namespace(nvme_ctrlr, nvme_ns, ctx);
|
2018-06-21 01:10:30 +00:00
|
|
|
}
|
|
|
|
|
2020-11-25 14:52:16 +00:00
|
|
|
if (nvme_ns->populated && !ns_is_active) {
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ctrlr_depopulate_namespace(nvme_ctrlr, nvme_ns);
|
2018-06-21 01:10:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-26 18:11:29 +00:00
|
|
|
if (ctx) {
|
|
|
|
/* Decrement this count now that the loop is over to account
|
|
|
|
* for the one we started with. If the count is then 0, we
|
|
|
|
* know any populate_namespace functions completed immediately,
|
|
|
|
* so we'll kick the callback here.
|
|
|
|
*/
|
|
|
|
ctx->populates_in_progress--;
|
|
|
|
if (ctx->populates_in_progress == 0) {
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ctrlr_populate_namespaces_done(nvme_ctrlr, ctx);
|
2019-11-26 18:11:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-21 01:10:30 +00:00
|
|
|
}
|
|
|
|
|
2020-10-13 07:48:13 +00:00
|
|
|
static void
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ctrlr_depopulate_namespaces(struct nvme_ctrlr *nvme_ctrlr)
|
2020-10-13 07:48:13 +00:00
|
|
|
{
|
|
|
|
uint32_t i;
|
2021-06-30 01:08:29 +00:00
|
|
|
struct nvme_ns *nvme_ns;
|
2020-10-13 07:48:13 +00:00
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
for (i = 0; i < nvme_ctrlr->num_ns; i++) {
|
2020-10-13 07:48:13 +00:00
|
|
|
uint32_t nsid = i + 1;
|
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ns = nvme_ctrlr->namespaces[nsid - 1];
|
2020-11-25 14:52:16 +00:00
|
|
|
if (nvme_ns->populated) {
|
|
|
|
assert(nvme_ns->id == nsid);
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ctrlr_depopulate_namespace(nvme_ctrlr, nvme_ns);
|
2020-10-13 07:48:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-02 07:03:59 +00:00
|
|
|
static bool
|
|
|
|
nvme_ctrlr_acquire(struct nvme_ctrlr *nvme_ctrlr)
|
|
|
|
{
|
|
|
|
pthread_mutex_lock(&nvme_ctrlr->mutex);
|
|
|
|
if (nvme_ctrlr->destruct || nvme_ctrlr->resetting) {
|
|
|
|
pthread_mutex_unlock(&nvme_ctrlr->mutex);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
nvme_ctrlr->ref++;
|
|
|
|
pthread_mutex_unlock(&nvme_ctrlr->mutex);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nvme_ctrlr_set_ana_states(const struct spdk_nvme_ana_group_descriptor *desc,
|
|
|
|
void *cb_arg)
|
|
|
|
{
|
|
|
|
struct nvme_ctrlr *nvme_ctrlr = cb_arg;
|
|
|
|
struct nvme_ns *nvme_ns;
|
|
|
|
uint32_t i, nsid;
|
|
|
|
|
|
|
|
for (i = 0; i < desc->num_of_nsid; i++) {
|
|
|
|
nsid = desc->nsid[i];
|
|
|
|
if (nsid == 0 || nsid > nvme_ctrlr->num_ns) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
nvme_ns = nvme_ctrlr->namespaces[nsid - 1];
|
|
|
|
assert(nvme_ns != NULL);
|
|
|
|
|
|
|
|
if (!nvme_ns->populated) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
nvme_ns->ana_group_id = desc->ana_group_id;
|
|
|
|
nvme_ns->ana_state = desc->ana_state;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nvme_ctrlr_read_ana_log_page_done(void *ctx, const struct spdk_nvme_cpl *cpl)
|
|
|
|
{
|
|
|
|
struct nvme_ctrlr *nvme_ctrlr = ctx;
|
|
|
|
|
|
|
|
if (spdk_nvme_cpl_is_success(cpl)) {
|
|
|
|
bdev_nvme_parse_ana_log_page(nvme_ctrlr, nvme_ctrlr_set_ana_states,
|
|
|
|
nvme_ctrlr);
|
|
|
|
}
|
|
|
|
|
|
|
|
nvme_ctrlr_release(nvme_ctrlr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nvme_ctrlr_read_ana_log_page(struct nvme_ctrlr *nvme_ctrlr)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (nvme_ctrlr->ana_log_page == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!nvme_ctrlr_acquire(nvme_ctrlr)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = spdk_nvme_ctrlr_cmd_get_log_page(nvme_ctrlr->ctrlr,
|
|
|
|
SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS,
|
|
|
|
SPDK_NVME_GLOBAL_NS_TAG,
|
|
|
|
nvme_ctrlr->ana_log_page,
|
|
|
|
nvme_ctrlr->ana_log_page_size, 0,
|
|
|
|
nvme_ctrlr_read_ana_log_page_done,
|
|
|
|
nvme_ctrlr);
|
|
|
|
if (rc != 0) {
|
|
|
|
nvme_ctrlr_release(nvme_ctrlr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-21 01:10:30 +00:00
|
|
|
static void
|
|
|
|
aer_cb(void *arg, const struct spdk_nvme_cpl *cpl)
|
|
|
|
{
|
2021-07-06 19:42:41 +00:00
|
|
|
struct nvme_ctrlr *nvme_ctrlr = arg;
|
2018-06-21 01:10:30 +00:00
|
|
|
union spdk_nvme_async_event_completion event;
|
|
|
|
|
|
|
|
if (spdk_nvme_cpl_is_error(cpl)) {
|
|
|
|
SPDK_WARNLOG("AER request execute failed");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
event.raw = cpl->cdw0;
|
|
|
|
if ((event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE) &&
|
|
|
|
(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED)) {
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ctrlr_populate_namespaces(nvme_ctrlr, NULL);
|
2021-07-02 07:03:59 +00:00
|
|
|
} else if ((event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE) &&
|
|
|
|
(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_ANA_CHANGE)) {
|
|
|
|
nvme_ctrlr_read_ana_log_page(nvme_ctrlr);
|
2018-06-21 01:10:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-24 10:46:38 +00:00
|
|
|
static void
|
|
|
|
populate_namespaces_cb(struct nvme_async_probe_ctx *ctx, size_t count, int rc)
|
|
|
|
{
|
|
|
|
if (ctx->cb_fn) {
|
|
|
|
ctx->cb_fn(ctx->cb_ctx, count, rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx->namespaces_populated = true;
|
|
|
|
if (ctx->probe_done) {
|
|
|
|
/* The probe was already completed, so we need to free the context
|
|
|
|
* here. This can happen for cases like OCSSD, where we need to
|
|
|
|
* send additional commands to the SSD after attach.
|
|
|
|
*/
|
|
|
|
free(ctx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-13 21:16:46 +00:00
|
|
|
static void
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ctrlr_create_done(struct nvme_ctrlr *nvme_ctrlr,
|
|
|
|
struct nvme_async_probe_ctx *ctx)
|
2021-06-13 21:16:46 +00:00
|
|
|
{
|
2021-07-06 19:42:41 +00:00
|
|
|
spdk_io_device_register(nvme_ctrlr,
|
2021-07-07 01:02:14 +00:00
|
|
|
bdev_nvme_create_ctrlr_channel_cb,
|
|
|
|
bdev_nvme_destroy_ctrlr_channel_cb,
|
|
|
|
sizeof(struct nvme_ctrlr_channel),
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ctrlr->name);
|
2021-06-13 21:16:46 +00:00
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ctrlr_populate_namespaces(nvme_ctrlr, ctx);
|
2021-06-13 21:16:46 +00:00
|
|
|
}
|
|
|
|
|
2021-07-05 11:07:14 +00:00
|
|
|
static void
|
|
|
|
nvme_ctrlr_init_ana_log_page_done(void *_ctx, const struct spdk_nvme_cpl *cpl)
|
|
|
|
{
|
|
|
|
struct nvme_ctrlr *nvme_ctrlr = _ctx;
|
|
|
|
struct nvme_async_probe_ctx *ctx = nvme_ctrlr->probe_ctx;
|
|
|
|
|
|
|
|
nvme_ctrlr->probe_ctx = NULL;
|
|
|
|
|
|
|
|
if (spdk_nvme_cpl_is_error(cpl)) {
|
|
|
|
nvme_ctrlr_delete(nvme_ctrlr);
|
|
|
|
|
|
|
|
if (ctx != NULL) {
|
|
|
|
populate_namespaces_cb(ctx, 0, -1);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
nvme_ctrlr_create_done(nvme_ctrlr, ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nvme_ctrlr_init_ana_log_page(struct nvme_ctrlr *nvme_ctrlr,
|
|
|
|
struct nvme_async_probe_ctx *ctx)
|
|
|
|
{
|
|
|
|
struct spdk_nvme_ctrlr *ctrlr = nvme_ctrlr->ctrlr;
|
|
|
|
const struct spdk_nvme_ctrlr_data *cdata;
|
|
|
|
uint32_t ana_log_page_size;
|
|
|
|
|
|
|
|
cdata = spdk_nvme_ctrlr_get_data(ctrlr);
|
|
|
|
|
|
|
|
ana_log_page_size = sizeof(struct spdk_nvme_ana_page) + cdata->nanagrpid *
|
|
|
|
sizeof(struct spdk_nvme_ana_group_descriptor) + cdata->nn *
|
|
|
|
sizeof(uint32_t);
|
|
|
|
|
|
|
|
nvme_ctrlr->ana_log_page = spdk_zmalloc(ana_log_page_size, 64, NULL,
|
|
|
|
SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
|
|
|
|
if (nvme_ctrlr->ana_log_page == NULL) {
|
|
|
|
SPDK_ERRLOG("could not allocate ANA log page buffer\n");
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Each descriptor in a ANA log page is not ensured to be 8-bytes aligned.
|
|
|
|
* Hence copy each descriptor to a temporary area when parsing it.
|
|
|
|
*
|
|
|
|
* Allocate a buffer whose size is as large as ANA log page buffer because
|
|
|
|
* we do not know the size of a descriptor until actually reading it.
|
|
|
|
*/
|
|
|
|
nvme_ctrlr->copied_ana_desc = calloc(1, ana_log_page_size);
|
|
|
|
if (nvme_ctrlr->copied_ana_desc == NULL) {
|
|
|
|
SPDK_ERRLOG("could not allocate a buffer to parse ANA descriptor\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
nvme_ctrlr->ana_log_page_size = ana_log_page_size;
|
|
|
|
|
|
|
|
nvme_ctrlr->probe_ctx = ctx;
|
|
|
|
|
|
|
|
return spdk_nvme_ctrlr_cmd_get_log_page(ctrlr,
|
|
|
|
SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS,
|
|
|
|
SPDK_NVME_GLOBAL_NS_TAG,
|
|
|
|
nvme_ctrlr->ana_log_page,
|
|
|
|
nvme_ctrlr->ana_log_page_size, 0,
|
|
|
|
nvme_ctrlr_init_ana_log_page_done,
|
|
|
|
nvme_ctrlr);
|
|
|
|
}
|
|
|
|
|
2018-06-15 22:30:43 +00:00
|
|
|
static int
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ctrlr_create(struct spdk_nvme_ctrlr *ctrlr,
|
|
|
|
const char *name,
|
|
|
|
const struct spdk_nvme_transport_id *trid,
|
|
|
|
uint32_t prchk_flags,
|
|
|
|
struct nvme_async_probe_ctx *ctx)
|
2016-07-20 18:16:23 +00:00
|
|
|
{
|
2021-07-06 19:42:41 +00:00
|
|
|
struct nvme_ctrlr *nvme_ctrlr;
|
2021-06-30 01:15:12 +00:00
|
|
|
struct nvme_ctrlr_trid *trid_entry;
|
2021-06-13 20:56:02 +00:00
|
|
|
uint32_t i, num_ns;
|
2021-07-05 11:07:14 +00:00
|
|
|
const struct spdk_nvme_ctrlr_data *cdata;
|
2019-12-11 11:20:08 +00:00
|
|
|
int rc;
|
2017-03-30 17:59:25 +00:00
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ctrlr = calloc(1, sizeof(*nvme_ctrlr));
|
|
|
|
if (nvme_ctrlr == NULL) {
|
2016-07-20 18:16:23 +00:00
|
|
|
SPDK_ERRLOG("Failed to allocate device struct\n");
|
2018-06-15 22:30:43 +00:00
|
|
|
return -ENOMEM;
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
2020-06-24 22:00:57 +00:00
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
rc = pthread_mutex_init(&nvme_ctrlr->mutex, NULL);
|
2021-03-03 17:23:21 +00:00
|
|
|
if (rc != 0) {
|
2021-07-06 19:42:41 +00:00
|
|
|
free(nvme_ctrlr);
|
2021-06-13 20:56:02 +00:00
|
|
|
return rc;
|
2021-03-03 17:23:21 +00:00
|
|
|
}
|
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
TAILQ_INIT(&nvme_ctrlr->trids);
|
2021-06-13 20:56:02 +00:00
|
|
|
|
|
|
|
num_ns = spdk_nvme_ctrlr_get_num_ns(ctrlr);
|
|
|
|
if (num_ns != 0) {
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ctrlr->namespaces = calloc(num_ns, sizeof(struct nvme_ns *));
|
|
|
|
if (!nvme_ctrlr->namespaces) {
|
2021-02-24 17:03:44 +00:00
|
|
|
SPDK_ERRLOG("Failed to allocate block namespaces pointer\n");
|
|
|
|
rc = -ENOMEM;
|
2021-06-13 20:56:02 +00:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < num_ns; i++) {
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ctrlr->namespaces[i] = calloc(1, sizeof(struct nvme_ns));
|
|
|
|
if (nvme_ctrlr->namespaces[i] == NULL) {
|
2021-06-13 20:56:02 +00:00
|
|
|
SPDK_ERRLOG("Failed to allocate block namespace struct\n");
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto err;
|
|
|
|
}
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ctrlr->num_ns++;
|
2021-02-24 17:03:44 +00:00
|
|
|
}
|
2021-06-13 20:56:02 +00:00
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
assert(num_ns == nvme_ctrlr->num_ns);
|
2020-06-12 22:14:49 +00:00
|
|
|
}
|
|
|
|
|
2020-06-12 22:14:49 +00:00
|
|
|
trid_entry = calloc(1, sizeof(*trid_entry));
|
|
|
|
if (trid_entry == NULL) {
|
|
|
|
SPDK_ERRLOG("Failed to allocate trid entry pointer\n");
|
2020-10-01 09:11:39 +00:00
|
|
|
rc = -ENOMEM;
|
2021-06-13 20:56:02 +00:00
|
|
|
goto err;
|
2020-06-12 22:14:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
trid_entry->trid = *trid;
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ctrlr->connected_trid = &trid_entry->trid;
|
|
|
|
TAILQ_INSERT_HEAD(&nvme_ctrlr->trids, trid_entry, link);
|
|
|
|
|
|
|
|
nvme_ctrlr->thread = spdk_get_thread();
|
|
|
|
nvme_ctrlr->ctrlr = ctrlr;
|
|
|
|
nvme_ctrlr->ref = 1;
|
|
|
|
nvme_ctrlr->name = strdup(name);
|
|
|
|
if (nvme_ctrlr->name == NULL) {
|
2020-10-01 09:11:39 +00:00
|
|
|
rc = -ENOMEM;
|
2021-06-13 20:56:02 +00:00
|
|
|
goto err;
|
2018-07-31 14:05:41 +00:00
|
|
|
}
|
2019-12-11 11:20:08 +00:00
|
|
|
|
2021-07-05 11:07:14 +00:00
|
|
|
if (spdk_nvme_ctrlr_is_ocssd_supported(ctrlr)) {
|
2021-08-27 20:23:56 +00:00
|
|
|
SPDK_ERRLOG("OCSSDs are not supported");
|
|
|
|
rc = -ENOTSUP;
|
|
|
|
goto err;
|
2019-12-11 11:20:08 +00:00
|
|
|
}
|
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ctrlr->prchk_flags = prchk_flags;
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ctrlr->adminq_timer_poller = SPDK_POLLER_REGISTER(bdev_nvme_poll_adminq, nvme_ctrlr,
|
|
|
|
g_opts.nvme_adminq_poll_period_us);
|
2017-01-13 19:58:23 +00:00
|
|
|
|
2021-07-07 13:15:36 +00:00
|
|
|
pthread_mutex_lock(&g_bdev_nvme_mutex);
|
2021-07-06 19:42:41 +00:00
|
|
|
TAILQ_INSERT_TAIL(&g_nvme_ctrlrs, nvme_ctrlr, tailq);
|
2021-07-07 13:15:36 +00:00
|
|
|
pthread_mutex_unlock(&g_bdev_nvme_mutex);
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2019-02-19 22:03:10 +00:00
|
|
|
if (g_opts.timeout_us > 0) {
|
2021-05-26 20:43:22 +00:00
|
|
|
/* Register timeout callback. Timeout values for IO vs. admin reqs can be different. */
|
|
|
|
/* If timeout_admin_us is 0 (not specified), admin uses same timeout as IO. */
|
|
|
|
uint64_t adm_timeout_us = (g_opts.timeout_admin_us == 0) ?
|
|
|
|
g_opts.timeout_us : g_opts.timeout_admin_us;
|
2018-07-09 21:04:33 +00:00
|
|
|
spdk_nvme_ctrlr_register_timeout_callback(ctrlr, g_opts.timeout_us,
|
2021-05-26 20:43:22 +00:00
|
|
|
adm_timeout_us, timeout_cb, nvme_ctrlr);
|
2016-12-10 22:43:18 +00:00
|
|
|
}
|
2018-06-15 22:30:43 +00:00
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
spdk_nvme_ctrlr_register_aer_callback(ctrlr, aer_cb, nvme_ctrlr);
|
|
|
|
spdk_nvme_ctrlr_set_remove_cb(ctrlr, remove_cb, nvme_ctrlr);
|
2018-06-21 01:10:30 +00:00
|
|
|
|
2021-07-05 11:07:14 +00:00
|
|
|
if (spdk_nvme_ctrlr_get_flags(ctrlr) &
|
2019-07-19 15:26:53 +00:00
|
|
|
SPDK_NVME_CTRLR_SECURITY_SEND_RECV_SUPPORTED) {
|
2021-07-05 11:07:14 +00:00
|
|
|
nvme_ctrlr->opal_dev = spdk_opal_dev_construct(ctrlr);
|
2019-07-19 15:26:53 +00:00
|
|
|
}
|
2020-06-12 22:14:49 +00:00
|
|
|
|
2021-07-05 11:07:14 +00:00
|
|
|
cdata = spdk_nvme_ctrlr_get_data(ctrlr);
|
|
|
|
|
|
|
|
if (cdata->cmic.ana_reporting) {
|
|
|
|
rc = nvme_ctrlr_init_ana_log_page(nvme_ctrlr, ctx);
|
|
|
|
if (rc == 0) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
nvme_ctrlr_create_done(nvme_ctrlr, ctx);
|
|
|
|
return 0;
|
|
|
|
}
|
2020-10-01 09:11:39 +00:00
|
|
|
|
2021-06-13 20:56:02 +00:00
|
|
|
err:
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ctrlr_delete(nvme_ctrlr);
|
2020-10-01 09:11:39 +00:00
|
|
|
return rc;
|
2018-06-15 22:30:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
|
|
|
|
struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts)
|
|
|
|
{
|
|
|
|
struct nvme_probe_ctx *ctx = cb_ctx;
|
|
|
|
char *name = NULL;
|
2019-02-10 08:20:40 +00:00
|
|
|
uint32_t prchk_flags = 0;
|
2018-06-15 22:30:43 +00:00
|
|
|
size_t i;
|
|
|
|
|
|
|
|
if (ctx) {
|
|
|
|
for (i = 0; i < ctx->count; i++) {
|
|
|
|
if (spdk_nvme_transport_id_compare(trid, &ctx->trids[i]) == 0) {
|
2019-02-10 08:20:40 +00:00
|
|
|
prchk_flags = ctx->prchk_flags[i];
|
2018-06-15 22:30:43 +00:00
|
|
|
name = strdup(ctx->names[i]);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
name = spdk_sprintf_alloc("HotInNvme%d", g_hot_insert_nvme_controller_index++);
|
|
|
|
}
|
|
|
|
if (!name) {
|
|
|
|
SPDK_ERRLOG("Failed to assign name to NVMe device\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-09-04 11:27:29 +00:00
|
|
|
SPDK_DEBUGLOG(bdev_nvme, "Attached to %s (%s)\n", trid->traddr, name);
|
2018-06-15 22:30:43 +00:00
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ctrlr_create(ctrlr, name, trid, prchk_flags, NULL);
|
2019-09-17 08:06:33 +00:00
|
|
|
|
2018-06-15 22:44:08 +00:00
|
|
|
free(name);
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
2020-11-26 06:25:08 +00:00
|
|
|
static void
|
2021-07-06 19:42:41 +00:00
|
|
|
_nvme_ctrlr_destruct(void *ctx)
|
2020-11-26 06:25:08 +00:00
|
|
|
{
|
2021-07-06 19:42:41 +00:00
|
|
|
struct nvme_ctrlr *nvme_ctrlr = ctx;
|
2020-11-26 06:34:00 +00:00
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ctrlr_depopulate_namespaces(nvme_ctrlr);
|
|
|
|
nvme_ctrlr_release(nvme_ctrlr);
|
2020-11-26 06:25:08 +00:00
|
|
|
}
|
|
|
|
|
2021-03-03 16:07:18 +00:00
|
|
|
static int
|
2021-07-06 19:42:41 +00:00
|
|
|
_bdev_nvme_delete(struct nvme_ctrlr *nvme_ctrlr, bool hotplug)
|
2017-03-01 01:23:53 +00:00
|
|
|
{
|
2021-03-03 16:07:18 +00:00
|
|
|
struct nvme_probe_skip_entry *entry;
|
2017-03-01 01:23:53 +00:00
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
pthread_mutex_lock(&nvme_ctrlr->mutex);
|
2021-03-03 16:07:18 +00:00
|
|
|
|
2020-12-07 15:42:00 +00:00
|
|
|
/* The controller's destruction was already started */
|
2021-07-06 19:42:41 +00:00
|
|
|
if (nvme_ctrlr->destruct) {
|
|
|
|
pthread_mutex_unlock(&nvme_ctrlr->mutex);
|
2021-03-03 16:07:18 +00:00
|
|
|
return 0;
|
2017-03-01 01:23:53 +00:00
|
|
|
}
|
2021-03-03 16:07:18 +00:00
|
|
|
|
|
|
|
if (!hotplug &&
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ctrlr->connected_trid->trtype == SPDK_NVME_TRANSPORT_PCIE) {
|
2021-03-03 16:07:18 +00:00
|
|
|
entry = calloc(1, sizeof(*entry));
|
|
|
|
if (!entry) {
|
2021-07-06 19:42:41 +00:00
|
|
|
pthread_mutex_unlock(&nvme_ctrlr->mutex);
|
2021-03-03 16:07:18 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2021-07-06 19:42:41 +00:00
|
|
|
entry->trid = *nvme_ctrlr->connected_trid;
|
2021-03-03 16:07:18 +00:00
|
|
|
TAILQ_INSERT_TAIL(&g_skipped_nvme_ctrlrs, entry, tailq);
|
|
|
|
}
|
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ctrlr->destruct = true;
|
|
|
|
pthread_mutex_unlock(&nvme_ctrlr->mutex);
|
2021-03-03 16:07:18 +00:00
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
_nvme_ctrlr_destruct(nvme_ctrlr);
|
2021-03-03 16:07:18 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
remove_cb(void *cb_ctx, struct spdk_nvme_ctrlr *ctrlr)
|
|
|
|
{
|
2021-07-06 19:42:41 +00:00
|
|
|
struct nvme_ctrlr *nvme_ctrlr = cb_ctx;
|
2021-03-03 16:07:18 +00:00
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
_bdev_nvme_delete(nvme_ctrlr, true);
|
2017-03-01 01:23:53 +00:00
|
|
|
}
|
|
|
|
|
2020-12-18 13:10:56 +00:00
|
|
|
static int
|
|
|
|
bdev_nvme_hotplug_probe(void *arg)
|
|
|
|
{
|
2021-03-23 19:25:08 +00:00
|
|
|
if (g_hotplug_probe_ctx == NULL) {
|
|
|
|
spdk_poller_unregister(&g_hotplug_probe_poller);
|
|
|
|
return SPDK_POLLER_IDLE;
|
|
|
|
}
|
|
|
|
|
2020-12-18 13:10:56 +00:00
|
|
|
if (spdk_nvme_probe_poll_async(g_hotplug_probe_ctx) != -EAGAIN) {
|
|
|
|
g_hotplug_probe_ctx = NULL;
|
|
|
|
spdk_poller_unregister(&g_hotplug_probe_poller);
|
|
|
|
}
|
|
|
|
|
|
|
|
return SPDK_POLLER_BUSY;
|
|
|
|
}
|
|
|
|
|
2018-03-13 00:16:47 +00:00
|
|
|
static int
|
2017-07-13 04:08:53 +00:00
|
|
|
bdev_nvme_hotplug(void *arg)
|
2017-03-01 01:23:53 +00:00
|
|
|
{
|
2019-03-05 07:32:34 +00:00
|
|
|
struct spdk_nvme_transport_id trid_pcie;
|
|
|
|
|
2020-12-18 13:10:56 +00:00
|
|
|
if (g_hotplug_probe_ctx) {
|
|
|
|
return SPDK_POLLER_BUSY;
|
2019-03-05 07:32:34 +00:00
|
|
|
}
|
|
|
|
|
2020-12-18 13:10:56 +00:00
|
|
|
memset(&trid_pcie, 0, sizeof(trid_pcie));
|
|
|
|
spdk_nvme_trid_populate_transport(&trid_pcie, SPDK_NVME_TRANSPORT_PCIE);
|
|
|
|
|
|
|
|
g_hotplug_probe_ctx = spdk_nvme_probe_async(&trid_pcie, NULL,
|
|
|
|
hotplug_probe_cb, attach_cb, NULL);
|
|
|
|
|
|
|
|
if (g_hotplug_probe_ctx) {
|
|
|
|
assert(g_hotplug_probe_poller == NULL);
|
|
|
|
g_hotplug_probe_poller = SPDK_POLLER_REGISTER(bdev_nvme_hotplug_probe, NULL, 1000);
|
2017-03-01 01:23:53 +00:00
|
|
|
}
|
2018-03-13 00:16:47 +00:00
|
|
|
|
2020-05-04 09:51:27 +00:00
|
|
|
return SPDK_POLLER_BUSY;
|
2017-03-01 01:23:53 +00:00
|
|
|
}
|
|
|
|
|
2018-07-09 21:04:33 +00:00
|
|
|
void
|
2020-05-10 07:46:07 +00:00
|
|
|
bdev_nvme_get_opts(struct spdk_bdev_nvme_opts *opts)
|
2018-07-09 21:04:33 +00:00
|
|
|
{
|
|
|
|
*opts = g_opts;
|
|
|
|
}
|
|
|
|
|
2021-06-02 17:42:34 +00:00
|
|
|
static int
|
|
|
|
bdev_nvme_validate_opts(const struct spdk_bdev_nvme_opts *opts)
|
|
|
|
{
|
|
|
|
if ((opts->timeout_us == 0) && (opts->timeout_admin_us != 0)) {
|
|
|
|
/* Can't set timeout_admin_us without also setting timeout_us */
|
|
|
|
SPDK_WARNLOG("Invalid options: Can't have (timeout_us == 0) with (timeout_admin_us > 0)\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-07-09 21:04:33 +00:00
|
|
|
int
|
2020-05-10 07:46:07 +00:00
|
|
|
bdev_nvme_set_opts(const struct spdk_bdev_nvme_opts *opts)
|
2018-07-09 21:04:33 +00:00
|
|
|
{
|
2021-06-02 17:42:34 +00:00
|
|
|
int ret = bdev_nvme_validate_opts(opts);
|
|
|
|
if (ret) {
|
|
|
|
SPDK_WARNLOG("Failed to set nvme opts.\n");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-07-12 12:26:19 +00:00
|
|
|
if (g_bdev_nvme_init_thread != NULL) {
|
2021-07-06 19:42:41 +00:00
|
|
|
if (!TAILQ_EMPTY(&g_nvme_ctrlrs)) {
|
2020-01-07 18:53:13 +00:00
|
|
|
return -EPERM;
|
|
|
|
}
|
2018-07-09 21:04:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
g_opts = *opts;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2019-07-10 05:13:31 +00:00
|
|
|
|
2018-07-12 12:26:19 +00:00
|
|
|
struct set_nvme_hotplug_ctx {
|
|
|
|
uint64_t period_us;
|
|
|
|
bool enabled;
|
2018-10-10 21:05:04 +00:00
|
|
|
spdk_msg_fn fn;
|
2018-07-12 12:26:19 +00:00
|
|
|
void *fn_ctx;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
set_nvme_hotplug_period_cb(void *_ctx)
|
|
|
|
{
|
|
|
|
struct set_nvme_hotplug_ctx *ctx = _ctx;
|
|
|
|
|
|
|
|
spdk_poller_unregister(&g_hotplug_poller);
|
|
|
|
if (ctx->enabled) {
|
2020-04-14 06:49:46 +00:00
|
|
|
g_hotplug_poller = SPDK_POLLER_REGISTER(bdev_nvme_hotplug, NULL, ctx->period_us);
|
2018-07-12 12:26:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
g_nvme_hotplug_poll_period_us = ctx->period_us;
|
|
|
|
g_nvme_hotplug_enabled = ctx->enabled;
|
|
|
|
if (ctx->fn) {
|
|
|
|
ctx->fn(ctx->fn_ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
free(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2020-05-10 07:46:07 +00:00
|
|
|
bdev_nvme_set_hotplug(bool enabled, uint64_t period_us, spdk_msg_fn cb, void *cb_ctx)
|
2018-07-12 12:26:19 +00:00
|
|
|
{
|
|
|
|
struct set_nvme_hotplug_ctx *ctx;
|
|
|
|
|
|
|
|
if (enabled == true && !spdk_process_is_primary()) {
|
|
|
|
return -EPERM;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx = calloc(1, sizeof(*ctx));
|
|
|
|
if (ctx == NULL) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
period_us = period_us == 0 ? NVME_HOTPLUG_POLL_PERIOD_DEFAULT : period_us;
|
|
|
|
ctx->period_us = spdk_min(period_us, NVME_HOTPLUG_POLL_PERIOD_MAX);
|
|
|
|
ctx->enabled = enabled;
|
|
|
|
ctx->fn = cb;
|
|
|
|
ctx->fn_ctx = cb_ctx;
|
|
|
|
|
|
|
|
spdk_thread_send_msg(g_bdev_nvme_init_thread, set_nvme_hotplug_period_cb, ctx);
|
|
|
|
return 0;
|
|
|
|
}
|
2018-07-09 21:04:33 +00:00
|
|
|
|
2019-09-27 09:27:30 +00:00
|
|
|
static void
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ctrlr_populate_namespaces_done(struct nvme_ctrlr *nvme_ctrlr,
|
2021-03-01 22:39:39 +00:00
|
|
|
struct nvme_async_probe_ctx *ctx)
|
2019-03-05 06:13:03 +00:00
|
|
|
{
|
2021-06-30 01:08:29 +00:00
|
|
|
struct nvme_ns *nvme_ns;
|
2021-01-06 16:21:06 +00:00
|
|
|
struct nvme_bdev *nvme_bdev;
|
2019-03-05 06:13:03 +00:00
|
|
|
uint32_t i, nsid;
|
|
|
|
size_t j;
|
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
assert(nvme_ctrlr != NULL);
|
2019-09-17 08:06:33 +00:00
|
|
|
|
2019-03-05 06:13:03 +00:00
|
|
|
/*
|
|
|
|
* Report the new bdevs that were created in this call.
|
2019-10-18 07:12:00 +00:00
|
|
|
* There can be more than one bdev per NVMe controller.
|
2019-03-05 06:13:03 +00:00
|
|
|
*/
|
|
|
|
j = 0;
|
2021-07-06 19:42:41 +00:00
|
|
|
for (i = 0; i < nvme_ctrlr->num_ns; i++) {
|
2019-03-05 06:13:03 +00:00
|
|
|
nsid = i + 1;
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ns = nvme_ctrlr->namespaces[nsid - 1];
|
2020-11-25 14:52:16 +00:00
|
|
|
if (!nvme_ns->populated) {
|
2019-03-05 06:13:03 +00:00
|
|
|
continue;
|
|
|
|
}
|
2020-11-25 14:52:16 +00:00
|
|
|
assert(nvme_ns->id == nsid);
|
2021-03-26 14:29:54 +00:00
|
|
|
nvme_bdev = nvme_ns->bdev;
|
2021-01-06 16:21:06 +00:00
|
|
|
if (nvme_bdev == NULL) {
|
2021-06-30 01:08:29 +00:00
|
|
|
assert(nvme_ns->type == NVME_NS_OCSSD);
|
2021-01-06 16:21:06 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (j < ctx->count) {
|
|
|
|
ctx->names[j] = nvme_bdev->disk.name;
|
|
|
|
j++;
|
|
|
|
} else {
|
|
|
|
SPDK_ERRLOG("Maximum number of namespaces supported per NVMe controller is %du. Unable to return all names of created bdevs\n",
|
|
|
|
ctx->count);
|
|
|
|
populate_namespaces_cb(ctx, 0, -ERANGE);
|
|
|
|
return;
|
2019-03-05 06:13:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-26 17:52:55 +00:00
|
|
|
populate_namespaces_cb(ctx, j, 0);
|
2019-03-05 06:13:03 +00:00
|
|
|
}
|
|
|
|
|
2020-09-24 02:05:17 +00:00
|
|
|
static int
|
2021-07-06 19:42:41 +00:00
|
|
|
bdev_nvme_compare_trids(struct nvme_ctrlr *nvme_ctrlr,
|
2021-03-26 09:20:04 +00:00
|
|
|
struct spdk_nvme_ctrlr *new_ctrlr,
|
|
|
|
struct spdk_nvme_transport_id *trid)
|
2020-06-12 22:14:49 +00:00
|
|
|
{
|
2021-06-30 01:15:12 +00:00
|
|
|
struct nvme_ctrlr_trid *tmp_trid;
|
2020-06-12 22:14:49 +00:00
|
|
|
|
2020-09-24 02:10:11 +00:00
|
|
|
if (trid->trtype == SPDK_NVME_TRANSPORT_PCIE) {
|
|
|
|
SPDK_ERRLOG("PCIe failover is not supported.\n");
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
2020-06-12 22:14:49 +00:00
|
|
|
/* Currently we only support failover to the same transport type. */
|
2021-07-06 19:42:41 +00:00
|
|
|
if (nvme_ctrlr->connected_trid->trtype != trid->trtype) {
|
2021-03-26 09:20:04 +00:00
|
|
|
return -EINVAL;
|
2020-06-12 22:14:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Currently we only support failover to the same NQN. */
|
2021-07-06 19:42:41 +00:00
|
|
|
if (strncmp(trid->subnqn, nvme_ctrlr->connected_trid->subnqn, SPDK_NVMF_NQN_MAX_LEN)) {
|
2021-03-26 09:20:04 +00:00
|
|
|
return -EINVAL;
|
2020-06-12 22:14:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Skip all the other checks if we've already registered this path. */
|
2021-07-06 19:42:41 +00:00
|
|
|
TAILQ_FOREACH(tmp_trid, &nvme_ctrlr->trids, link) {
|
2021-03-26 09:20:04 +00:00
|
|
|
if (!spdk_nvme_transport_id_compare(&tmp_trid->trid, trid)) {
|
|
|
|
return -EEXIST;
|
2020-06-12 22:14:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-26 09:20:04 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2021-07-06 19:42:41 +00:00
|
|
|
bdev_nvme_compare_namespaces(struct nvme_ctrlr *nvme_ctrlr,
|
2021-03-26 09:20:04 +00:00
|
|
|
struct spdk_nvme_ctrlr *new_ctrlr)
|
|
|
|
{
|
|
|
|
uint32_t i, nsid;
|
2021-06-30 01:08:29 +00:00
|
|
|
struct nvme_ns *nvme_ns;
|
2021-03-26 09:20:04 +00:00
|
|
|
struct spdk_nvme_ns *new_ns;
|
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
if (spdk_nvme_ctrlr_get_num_ns(new_ctrlr) != nvme_ctrlr->num_ns) {
|
2021-03-26 09:20:04 +00:00
|
|
|
return -EINVAL;
|
2020-06-12 22:14:49 +00:00
|
|
|
}
|
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
for (i = 0; i < nvme_ctrlr->num_ns; i++) {
|
2020-11-30 22:19:55 +00:00
|
|
|
nsid = i + 1;
|
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ns = nvme_ctrlr->namespaces[i];
|
2020-11-30 22:19:55 +00:00
|
|
|
if (!nvme_ns->populated) {
|
2020-11-03 18:14:17 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2020-11-30 22:19:55 +00:00
|
|
|
new_ns = spdk_nvme_ctrlr_get_ns(new_ctrlr, nsid);
|
2020-06-12 22:14:49 +00:00
|
|
|
assert(new_ns != NULL);
|
|
|
|
|
2021-05-16 21:47:51 +00:00
|
|
|
if (!bdev_nvme_compare_ns(nvme_ns->ns, new_ns)) {
|
2021-03-26 09:20:04 +00:00
|
|
|
return -EINVAL;
|
2020-06-12 22:14:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-26 09:20:04 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2021-07-06 19:42:41 +00:00
|
|
|
_bdev_nvme_add_secondary_trid(struct nvme_ctrlr *nvme_ctrlr,
|
2021-03-26 09:20:04 +00:00
|
|
|
struct spdk_nvme_transport_id *trid)
|
|
|
|
{
|
2021-06-30 01:15:12 +00:00
|
|
|
struct nvme_ctrlr_trid *new_trid, *tmp_trid;
|
2021-03-26 09:20:04 +00:00
|
|
|
|
2020-06-12 22:14:49 +00:00
|
|
|
new_trid = calloc(1, sizeof(*new_trid));
|
|
|
|
if (new_trid == NULL) {
|
2021-03-26 09:20:04 +00:00
|
|
|
return -ENOMEM;
|
2020-06-12 22:14:49 +00:00
|
|
|
}
|
|
|
|
new_trid->trid = *trid;
|
2020-12-22 12:50:29 +00:00
|
|
|
new_trid->is_failed = false;
|
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
TAILQ_FOREACH(tmp_trid, &nvme_ctrlr->trids, link) {
|
2020-12-22 12:50:29 +00:00
|
|
|
if (tmp_trid->is_failed) {
|
|
|
|
TAILQ_INSERT_BEFORE(tmp_trid, new_trid, link);
|
2021-03-26 09:20:04 +00:00
|
|
|
return 0;
|
2020-12-22 12:50:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
TAILQ_INSERT_TAIL(&nvme_ctrlr->trids, new_trid, link);
|
2021-03-26 09:20:04 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-03-26 09:32:12 +00:00
|
|
|
/* This is the case that a secondary path is added to an existing
|
2021-07-06 19:42:41 +00:00
|
|
|
* nvme_ctrlr for failover. After checking if it can access the same
|
2021-03-26 09:32:12 +00:00
|
|
|
* namespaces as the primary path, it is disconnected until failover occurs.
|
|
|
|
*/
|
2021-06-13 19:56:22 +00:00
|
|
|
static int
|
2021-07-06 19:42:41 +00:00
|
|
|
bdev_nvme_add_secondary_trid(struct nvme_ctrlr *nvme_ctrlr,
|
2021-03-26 09:20:04 +00:00
|
|
|
struct spdk_nvme_ctrlr *new_ctrlr,
|
2021-06-13 19:56:22 +00:00
|
|
|
struct spdk_nvme_transport_id *trid)
|
2021-03-26 09:20:04 +00:00
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
assert(nvme_ctrlr != NULL);
|
2021-03-26 09:20:04 +00:00
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
pthread_mutex_lock(&nvme_ctrlr->mutex);
|
2021-03-26 09:20:04 +00:00
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
rc = bdev_nvme_compare_trids(nvme_ctrlr, new_ctrlr, trid);
|
2021-03-26 09:20:04 +00:00
|
|
|
if (rc != 0) {
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
rc = bdev_nvme_compare_namespaces(nvme_ctrlr, new_ctrlr);
|
2021-03-26 09:20:04 +00:00
|
|
|
if (rc != 0) {
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
rc = _bdev_nvme_add_secondary_trid(nvme_ctrlr, trid);
|
2020-06-12 22:14:49 +00:00
|
|
|
|
2020-12-22 12:26:39 +00:00
|
|
|
exit:
|
2021-07-06 19:42:41 +00:00
|
|
|
pthread_mutex_unlock(&nvme_ctrlr->mutex);
|
2021-03-26 09:32:12 +00:00
|
|
|
|
|
|
|
spdk_nvme_detach(new_ctrlr);
|
|
|
|
|
2021-06-13 19:56:22 +00:00
|
|
|
return rc;
|
2020-06-12 22:14:49 +00:00
|
|
|
}
|
|
|
|
|
2020-11-27 17:38:18 +00:00
|
|
|
static void
|
|
|
|
connect_attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
|
|
|
|
struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts)
|
2020-06-25 17:06:56 +00:00
|
|
|
{
|
2020-11-27 17:38:18 +00:00
|
|
|
struct spdk_nvme_ctrlr_opts *user_opts = cb_ctx;
|
2021-08-18 10:23:44 +00:00
|
|
|
struct nvme_async_probe_ctx *ctx;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
ctx = SPDK_CONTAINEROF(user_opts, struct nvme_async_probe_ctx, opts);
|
|
|
|
ctx->ctrlr_attached = true;
|
|
|
|
|
|
|
|
rc = nvme_ctrlr_create(ctrlr, ctx->base_name, &ctx->trid, ctx->prchk_flags, ctx);
|
|
|
|
if (rc != 0) {
|
|
|
|
populate_namespaces_cb(ctx, 0, rc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
connect_set_failover_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
|
|
|
|
struct spdk_nvme_ctrlr *ctrlr,
|
|
|
|
const struct spdk_nvme_ctrlr_opts *opts)
|
|
|
|
{
|
|
|
|
struct spdk_nvme_ctrlr_opts *user_opts = cb_ctx;
|
|
|
|
struct nvme_ctrlr *nvme_ctrlr;
|
2020-11-27 17:38:18 +00:00
|
|
|
struct nvme_async_probe_ctx *ctx;
|
2021-06-13 19:56:22 +00:00
|
|
|
int rc;
|
2020-06-25 17:06:56 +00:00
|
|
|
|
2020-11-27 17:38:18 +00:00
|
|
|
ctx = SPDK_CONTAINEROF(user_opts, struct nvme_async_probe_ctx, opts);
|
2021-01-07 07:41:32 +00:00
|
|
|
ctx->ctrlr_attached = true;
|
2020-11-27 17:38:18 +00:00
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ctrlr = nvme_ctrlr_get_by_name(ctx->base_name);
|
|
|
|
if (nvme_ctrlr) {
|
|
|
|
rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, ctrlr, &ctx->trid);
|
2021-06-13 19:56:22 +00:00
|
|
|
} else {
|
2021-08-18 10:23:44 +00:00
|
|
|
rc = -ENODEV;
|
2020-06-25 17:06:56 +00:00
|
|
|
}
|
|
|
|
|
2021-06-13 19:56:22 +00:00
|
|
|
populate_namespaces_cb(ctx, 0, rc);
|
2020-11-27 17:38:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
bdev_nvme_async_poll(void *arg)
|
|
|
|
{
|
|
|
|
struct nvme_async_probe_ctx *ctx = arg;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = spdk_nvme_probe_poll_async(ctx->probe_ctx);
|
2021-01-07 07:41:32 +00:00
|
|
|
if (spdk_unlikely(rc != -EAGAIN)) {
|
|
|
|
ctx->probe_done = true;
|
2020-11-27 17:38:18 +00:00
|
|
|
spdk_poller_unregister(&ctx->poller);
|
2021-01-07 07:41:32 +00:00
|
|
|
if (!ctx->ctrlr_attached) {
|
|
|
|
/* The probe is done, but no controller was attached.
|
|
|
|
* That means we had a failure, so report -EIO back to
|
|
|
|
* the caller (usually the RPC). populate_namespaces_cb()
|
|
|
|
* will take care of freeing the nvme_async_probe_ctx.
|
|
|
|
*/
|
|
|
|
populate_namespaces_cb(ctx, 0, -EIO);
|
|
|
|
} else if (ctx->namespaces_populated) {
|
|
|
|
/* The namespaces for the attached controller were all
|
|
|
|
* populated and the response was already sent to the
|
|
|
|
* caller (usually the RPC). So free the context here.
|
|
|
|
*/
|
|
|
|
free(ctx);
|
|
|
|
}
|
2020-06-25 17:06:56 +00:00
|
|
|
}
|
|
|
|
|
2020-11-27 17:38:18 +00:00
|
|
|
return SPDK_POLLER_BUSY;
|
2020-06-25 17:06:56 +00:00
|
|
|
}
|
|
|
|
|
2016-10-10 03:54:38 +00:00
|
|
|
int
|
2020-05-10 07:46:07 +00:00
|
|
|
bdev_nvme_create(struct spdk_nvme_transport_id *trid,
|
|
|
|
struct spdk_nvme_host_id *hostid,
|
|
|
|
const char *base_name,
|
|
|
|
const char **names,
|
|
|
|
uint32_t count,
|
|
|
|
uint32_t prchk_flags,
|
|
|
|
spdk_bdev_create_nvme_fn cb_fn,
|
2021-01-21 11:53:16 +00:00
|
|
|
void *cb_ctx,
|
|
|
|
struct spdk_nvme_ctrlr_opts *opts)
|
2016-10-10 03:54:38 +00:00
|
|
|
{
|
2019-02-13 05:08:22 +00:00
|
|
|
struct nvme_probe_skip_entry *entry, *tmp;
|
2019-03-05 07:26:58 +00:00
|
|
|
struct nvme_async_probe_ctx *ctx;
|
2021-08-18 10:23:44 +00:00
|
|
|
spdk_nvme_attach_cb attach_cb;
|
2016-10-25 23:19:53 +00:00
|
|
|
|
2020-09-24 01:47:54 +00:00
|
|
|
/* TODO expand this check to include both the host and target TRIDs.
|
|
|
|
* Only if both are the same should we fail.
|
|
|
|
*/
|
2021-07-06 19:42:41 +00:00
|
|
|
if (nvme_ctrlr_get(trid) != NULL) {
|
2020-09-24 01:47:54 +00:00
|
|
|
SPDK_ERRLOG("A controller with the provided trid (traddr: %s) already exists.\n", trid->traddr);
|
|
|
|
return -EEXIST;
|
|
|
|
}
|
|
|
|
|
2020-09-24 01:54:00 +00:00
|
|
|
ctx = calloc(1, sizeof(*ctx));
|
|
|
|
if (!ctx) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
ctx->base_name = base_name;
|
|
|
|
ctx->names = names;
|
|
|
|
ctx->count = count;
|
|
|
|
ctx->cb_fn = cb_fn;
|
|
|
|
ctx->cb_ctx = cb_ctx;
|
|
|
|
ctx->prchk_flags = prchk_flags;
|
|
|
|
ctx->trid = *trid;
|
|
|
|
|
2019-02-13 05:08:22 +00:00
|
|
|
if (trid->trtype == SPDK_NVME_TRANSPORT_PCIE) {
|
|
|
|
TAILQ_FOREACH_SAFE(entry, &g_skipped_nvme_ctrlrs, tailq, tmp) {
|
|
|
|
if (spdk_nvme_transport_id_compare(trid, &entry->trid) == 0) {
|
|
|
|
TAILQ_REMOVE(&g_skipped_nvme_ctrlrs, entry, tailq);
|
|
|
|
free(entry);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-21 11:53:16 +00:00
|
|
|
if (opts) {
|
|
|
|
memcpy(&ctx->opts, opts, sizeof(*opts));
|
|
|
|
} else {
|
|
|
|
spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctx->opts, sizeof(ctx->opts));
|
|
|
|
}
|
|
|
|
|
2019-08-08 02:30:27 +00:00
|
|
|
ctx->opts.transport_retry_count = g_opts.retry_count;
|
2020-11-27 16:10:36 +00:00
|
|
|
ctx->opts.keep_alive_timeout_ms = g_opts.keep_alive_timeout_ms;
|
2021-07-07 15:27:55 +00:00
|
|
|
ctx->opts.disable_read_ana_log_page = true;
|
2018-12-18 23:09:14 +00:00
|
|
|
|
2018-12-04 23:30:11 +00:00
|
|
|
if (hostid->hostaddr[0] != '\0') {
|
2019-03-05 07:26:58 +00:00
|
|
|
snprintf(ctx->opts.src_addr, sizeof(ctx->opts.src_addr), "%s", hostid->hostaddr);
|
2018-12-04 23:30:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (hostid->hostsvcid[0] != '\0') {
|
2019-03-05 07:26:58 +00:00
|
|
|
snprintf(ctx->opts.src_svcid, sizeof(ctx->opts.src_svcid), "%s", hostid->hostsvcid);
|
2018-12-04 23:30:11 +00:00
|
|
|
}
|
|
|
|
|
2021-08-18 10:23:44 +00:00
|
|
|
if (nvme_ctrlr_get_by_name(base_name) == NULL) {
|
|
|
|
attach_cb = connect_attach_cb;
|
|
|
|
} else {
|
|
|
|
attach_cb = connect_set_failover_cb;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx->probe_ctx = spdk_nvme_connect_async(trid, &ctx->opts, attach_cb);
|
2019-03-05 07:26:58 +00:00
|
|
|
if (ctx->probe_ctx == NULL) {
|
|
|
|
SPDK_ERRLOG("No controller was found with provided trid (traddr: %s)\n", trid->traddr);
|
|
|
|
free(ctx);
|
2019-07-15 11:07:12 +00:00
|
|
|
return -ENODEV;
|
2017-05-01 23:54:11 +00:00
|
|
|
}
|
2020-04-14 06:49:46 +00:00
|
|
|
ctx->poller = SPDK_POLLER_REGISTER(bdev_nvme_async_poll, ctx, 1000);
|
2017-05-01 23:54:11 +00:00
|
|
|
|
2019-03-05 07:26:58 +00:00
|
|
|
return 0;
|
2016-10-10 03:54:38 +00:00
|
|
|
}
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2021-03-29 10:57:37 +00:00
|
|
|
static int
|
2021-07-06 19:42:41 +00:00
|
|
|
bdev_nvme_delete_secondary_trid(struct nvme_ctrlr *nvme_ctrlr,
|
2021-03-29 10:57:37 +00:00
|
|
|
const struct spdk_nvme_transport_id *trid)
|
|
|
|
{
|
2021-06-30 01:15:12 +00:00
|
|
|
struct nvme_ctrlr_trid *ctrlr_trid, *tmp_trid;
|
2021-03-29 10:57:37 +00:00
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
if (!spdk_nvme_transport_id_compare(trid, nvme_ctrlr->connected_trid)) {
|
2021-03-29 10:57:37 +00:00
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
TAILQ_FOREACH_SAFE(ctrlr_trid, &nvme_ctrlr->trids, link, tmp_trid) {
|
2021-03-29 10:57:37 +00:00
|
|
|
if (!spdk_nvme_transport_id_compare(&ctrlr_trid->trid, trid)) {
|
2021-07-06 19:42:41 +00:00
|
|
|
TAILQ_REMOVE(&nvme_ctrlr->trids, ctrlr_trid, link);
|
2021-03-29 10:57:37 +00:00
|
|
|
free(ctrlr_trid);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
2020-11-27 17:38:18 +00:00
|
|
|
int
|
2021-03-03 16:32:54 +00:00
|
|
|
bdev_nvme_delete(const char *name, const struct spdk_nvme_transport_id *trid)
|
2020-11-27 17:38:18 +00:00
|
|
|
{
|
2021-07-06 19:42:41 +00:00
|
|
|
struct nvme_ctrlr *nvme_ctrlr;
|
2021-06-30 01:15:12 +00:00
|
|
|
struct nvme_ctrlr_trid *ctrlr_trid;
|
2020-11-27 17:38:18 +00:00
|
|
|
|
|
|
|
if (name == NULL) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ctrlr = nvme_ctrlr_get_by_name(name);
|
|
|
|
if (nvme_ctrlr == NULL) {
|
2020-11-27 17:38:18 +00:00
|
|
|
SPDK_ERRLOG("Failed to find NVMe controller\n");
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2021-03-03 16:32:54 +00:00
|
|
|
/* case 1: remove the controller itself. */
|
|
|
|
if (trid == NULL) {
|
2021-07-06 19:42:41 +00:00
|
|
|
return _bdev_nvme_delete(nvme_ctrlr, false);
|
2021-03-03 16:32:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* case 2: we are currently using the path to be removed. */
|
2021-07-06 19:42:41 +00:00
|
|
|
if (!spdk_nvme_transport_id_compare(trid, nvme_ctrlr->connected_trid)) {
|
|
|
|
ctrlr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
|
|
|
|
assert(nvme_ctrlr->connected_trid == &ctrlr_trid->trid);
|
2021-03-03 16:32:54 +00:00
|
|
|
/* case 2A: the current path is the only path. */
|
2020-11-27 17:38:18 +00:00
|
|
|
if (!TAILQ_NEXT(ctrlr_trid, link)) {
|
2021-07-06 19:42:41 +00:00
|
|
|
return _bdev_nvme_delete(nvme_ctrlr, false);
|
2020-11-27 17:38:18 +00:00
|
|
|
}
|
|
|
|
|
2021-03-29 10:57:37 +00:00
|
|
|
/* case 2B: there is an alternative path. */
|
2021-07-06 19:42:41 +00:00
|
|
|
return bdev_nvme_failover(nvme_ctrlr, true);
|
2020-11-27 17:38:18 +00:00
|
|
|
}
|
|
|
|
|
2021-03-29 10:57:37 +00:00
|
|
|
/* case 3: We are not using the specified path. */
|
2021-07-06 19:42:41 +00:00
|
|
|
return bdev_nvme_delete_secondary_trid(nvme_ctrlr, trid);
|
2020-11-27 17:38:18 +00:00
|
|
|
}
|
|
|
|
|
2017-07-13 17:36:19 +00:00
|
|
|
static int
|
2017-02-28 17:51:25 +00:00
|
|
|
bdev_nvme_library_init(void)
|
2016-07-20 18:16:23 +00:00
|
|
|
{
|
2018-07-12 12:26:19 +00:00
|
|
|
g_bdev_nvme_init_thread = spdk_get_thread();
|
2018-07-09 21:04:33 +00:00
|
|
|
|
2021-07-06 17:20:32 +00:00
|
|
|
spdk_io_device_register(&g_nvme_ctrlrs, bdev_nvme_create_poll_group_cb,
|
|
|
|
bdev_nvme_destroy_poll_group_cb,
|
|
|
|
sizeof(struct nvme_poll_group), "nvme_poll_groups");
|
2020-02-07 00:20:35 +00:00
|
|
|
|
2020-10-14 14:51:18 +00:00
|
|
|
return 0;
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
2016-12-14 06:05:10 +00:00
|
|
|
static void
|
2017-02-28 17:51:25 +00:00
|
|
|
bdev_nvme_library_fini(void)
|
2016-07-20 18:16:23 +00:00
|
|
|
{
|
2021-07-06 19:42:41 +00:00
|
|
|
struct nvme_ctrlr *nvme_ctrlr, *tmp;
|
2019-02-13 05:08:22 +00:00
|
|
|
struct nvme_probe_skip_entry *entry, *entry_tmp;
|
2018-11-21 10:45:31 +00:00
|
|
|
|
2018-07-12 12:26:19 +00:00
|
|
|
spdk_poller_unregister(&g_hotplug_poller);
|
2019-03-05 07:32:34 +00:00
|
|
|
free(g_hotplug_probe_ctx);
|
2021-03-23 19:25:08 +00:00
|
|
|
g_hotplug_probe_ctx = NULL;
|
2018-11-21 10:45:31 +00:00
|
|
|
|
2019-02-13 05:08:22 +00:00
|
|
|
TAILQ_FOREACH_SAFE(entry, &g_skipped_nvme_ctrlrs, tailq, entry_tmp) {
|
|
|
|
TAILQ_REMOVE(&g_skipped_nvme_ctrlrs, entry, tailq);
|
|
|
|
free(entry);
|
|
|
|
}
|
|
|
|
|
2018-11-21 10:45:31 +00:00
|
|
|
pthread_mutex_lock(&g_bdev_nvme_mutex);
|
2021-07-06 19:42:41 +00:00
|
|
|
TAILQ_FOREACH_SAFE(nvme_ctrlr, &g_nvme_ctrlrs, tailq, tmp) {
|
|
|
|
pthread_mutex_lock(&nvme_ctrlr->mutex);
|
|
|
|
if (nvme_ctrlr->destruct) {
|
2018-11-21 10:45:31 +00:00
|
|
|
/* This controller's destruction was already started
|
|
|
|
* before the application started shutting down
|
|
|
|
*/
|
2021-07-06 19:42:41 +00:00
|
|
|
pthread_mutex_unlock(&nvme_ctrlr->mutex);
|
2018-11-21 10:45:31 +00:00
|
|
|
continue;
|
|
|
|
}
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ctrlr->destruct = true;
|
|
|
|
pthread_mutex_unlock(&nvme_ctrlr->mutex);
|
2018-11-21 10:45:31 +00:00
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
spdk_thread_send_msg(nvme_ctrlr->thread, _nvme_ctrlr_destruct,
|
|
|
|
nvme_ctrlr);
|
2018-11-21 10:45:31 +00:00
|
|
|
}
|
2020-03-05 10:45:00 +00:00
|
|
|
|
|
|
|
g_bdev_nvme_module_finish = true;
|
2021-07-06 19:42:41 +00:00
|
|
|
if (TAILQ_EMPTY(&g_nvme_ctrlrs)) {
|
2020-03-05 10:45:00 +00:00
|
|
|
pthread_mutex_unlock(&g_bdev_nvme_mutex);
|
2021-07-06 19:42:41 +00:00
|
|
|
spdk_io_device_unregister(&g_nvme_ctrlrs, NULL);
|
2021-08-10 13:30:42 +00:00
|
|
|
spdk_bdev_module_fini_done();
|
2020-03-05 10:45:00 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-11-21 10:45:31 +00:00
|
|
|
pthread_mutex_unlock(&g_bdev_nvme_mutex);
|
2017-01-25 01:04:14 +00:00
|
|
|
}
|
|
|
|
|
2019-02-05 01:56:15 +00:00
|
|
|
static void
|
2021-05-28 01:49:18 +00:00
|
|
|
bdev_nvme_verify_pi_error(struct nvme_bdev_io *bio)
|
2019-02-05 01:56:15 +00:00
|
|
|
{
|
2021-05-28 01:49:18 +00:00
|
|
|
struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(bio);
|
2019-02-05 01:56:15 +00:00
|
|
|
struct spdk_bdev *bdev = bdev_io->bdev;
|
|
|
|
struct spdk_dif_ctx dif_ctx;
|
|
|
|
struct spdk_dif_error err_blk = {};
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = spdk_dif_ctx_init(&dif_ctx,
|
|
|
|
bdev->blocklen, bdev->md_len, bdev->md_interleave,
|
|
|
|
bdev->dif_is_head_of_md, bdev->dif_type, bdev->dif_check_flags,
|
2019-06-04 05:52:24 +00:00
|
|
|
bdev_io->u.bdev.offset_blocks, 0, 0, 0, 0);
|
2019-02-05 01:56:15 +00:00
|
|
|
if (rc != 0) {
|
|
|
|
SPDK_ERRLOG("Initialization of DIF context failed\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-04-16 08:17:08 +00:00
|
|
|
if (bdev->md_interleave) {
|
|
|
|
rc = spdk_dif_verify(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
|
|
|
|
bdev_io->u.bdev.num_blocks, &dif_ctx, &err_blk);
|
|
|
|
} else {
|
|
|
|
struct iovec md_iov = {
|
|
|
|
.iov_base = bdev_io->u.bdev.md_buf,
|
|
|
|
.iov_len = bdev_io->u.bdev.num_blocks * bdev->md_len,
|
|
|
|
};
|
|
|
|
|
|
|
|
rc = spdk_dix_verify(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
|
|
|
|
&md_iov, bdev_io->u.bdev.num_blocks, &dif_ctx, &err_blk);
|
|
|
|
}
|
|
|
|
|
2019-02-05 01:56:15 +00:00
|
|
|
if (rc != 0) {
|
|
|
|
SPDK_ERRLOG("DIF error detected. type=%d, offset=%" PRIu32 "\n",
|
|
|
|
err_blk.err_type, err_blk.err_offset);
|
|
|
|
} else {
|
|
|
|
SPDK_ERRLOG("Hardware reported PI error but SPDK could not find any.\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-08 05:42:18 +00:00
|
|
|
static void
|
|
|
|
bdev_nvme_no_pi_readv_done(void *ref, const struct spdk_nvme_cpl *cpl)
|
|
|
|
{
|
|
|
|
struct nvme_bdev_io *bio = ref;
|
|
|
|
|
|
|
|
if (spdk_nvme_cpl_is_success(cpl)) {
|
|
|
|
/* Run PI verification for read data buffer. */
|
2021-05-28 01:49:18 +00:00
|
|
|
bdev_nvme_verify_pi_error(bio);
|
2019-02-08 05:42:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Return original completion status */
|
2021-05-12 20:56:42 +00:00
|
|
|
bdev_nvme_io_complete_nvme_status(bio, &bio->cpl);
|
2019-02-08 05:42:18 +00:00
|
|
|
}
|
|
|
|
|
2019-02-05 01:47:25 +00:00
|
|
|
static void
|
|
|
|
bdev_nvme_readv_done(void *ref, const struct spdk_nvme_cpl *cpl)
|
|
|
|
{
|
2019-02-08 05:42:18 +00:00
|
|
|
struct nvme_bdev_io *bio = ref;
|
|
|
|
struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(bio);
|
2021-07-06 17:35:01 +00:00
|
|
|
struct nvme_bdev_channel *nbdev_ch;
|
2021-06-01 17:49:43 +00:00
|
|
|
struct spdk_nvme_ns *ns;
|
2021-02-28 22:07:32 +00:00
|
|
|
struct spdk_nvme_qpair *qpair;
|
2019-02-08 05:42:18 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (spdk_unlikely(spdk_nvme_cpl_is_pi_error(cpl))) {
|
|
|
|
SPDK_ERRLOG("readv completed with PI error (sct=%d, sc=%d)\n",
|
|
|
|
cpl->status.sct, cpl->status.sc);
|
|
|
|
|
|
|
|
/* Save completion status to use after verifying PI error. */
|
|
|
|
bio->cpl = *cpl;
|
|
|
|
|
2021-07-06 17:35:01 +00:00
|
|
|
nbdev_ch = spdk_io_channel_get_ctx(spdk_bdev_io_get_io_channel(bdev_io));
|
2020-09-23 22:10:47 +00:00
|
|
|
|
2021-07-06 17:35:01 +00:00
|
|
|
if (spdk_likely(bdev_nvme_find_io_path(nbdev_ch, &ns, &qpair))) {
|
2021-02-28 22:07:32 +00:00
|
|
|
/* Read without PI checking to verify PI error. */
|
2021-06-01 17:49:43 +00:00
|
|
|
ret = bdev_nvme_no_pi_readv(ns,
|
2021-02-28 22:07:32 +00:00
|
|
|
qpair,
|
|
|
|
bio,
|
|
|
|
bdev_io->u.bdev.iovs,
|
|
|
|
bdev_io->u.bdev.iovcnt,
|
|
|
|
bdev_io->u.bdev.md_buf,
|
|
|
|
bdev_io->u.bdev.num_blocks,
|
|
|
|
bdev_io->u.bdev.offset_blocks);
|
|
|
|
if (ret == 0) {
|
|
|
|
return;
|
|
|
|
}
|
2019-02-08 05:42:18 +00:00
|
|
|
}
|
|
|
|
}
|
2019-02-05 01:47:25 +00:00
|
|
|
|
2021-05-12 20:56:42 +00:00
|
|
|
bdev_nvme_io_complete_nvme_status(bio, cpl);
|
2019-02-05 01:47:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
bdev_nvme_writev_done(void *ref, const struct spdk_nvme_cpl *cpl)
|
|
|
|
{
|
2021-05-12 20:56:42 +00:00
|
|
|
struct nvme_bdev_io *bio = ref;
|
2019-02-05 01:47:25 +00:00
|
|
|
|
2019-02-05 01:56:15 +00:00
|
|
|
if (spdk_nvme_cpl_is_pi_error(cpl)) {
|
|
|
|
SPDK_ERRLOG("writev completed with PI error (sct=%d, sc=%d)\n",
|
|
|
|
cpl->status.sct, cpl->status.sc);
|
|
|
|
/* Run PI verification for write data buffer if PI error is detected. */
|
2021-05-28 01:49:18 +00:00
|
|
|
bdev_nvme_verify_pi_error(bio);
|
2019-02-05 01:56:15 +00:00
|
|
|
}
|
|
|
|
|
2021-05-12 20:56:42 +00:00
|
|
|
bdev_nvme_io_complete_nvme_status(bio, cpl);
|
2019-02-05 01:47:25 +00:00
|
|
|
}
|
|
|
|
|
2021-03-03 18:38:38 +00:00
|
|
|
static void
|
|
|
|
bdev_nvme_zone_appendv_done(void *ref, const struct spdk_nvme_cpl *cpl)
|
|
|
|
{
|
2021-05-12 20:56:42 +00:00
|
|
|
struct nvme_bdev_io *bio = ref;
|
|
|
|
struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(bio);
|
2021-03-03 18:38:38 +00:00
|
|
|
|
|
|
|
/* spdk_bdev_io_get_append_location() requires that the ALBA is stored in offset_blocks.
|
|
|
|
* Additionally, offset_blocks has to be set before calling bdev_nvme_verify_pi_error().
|
|
|
|
*/
|
|
|
|
bdev_io->u.bdev.offset_blocks = *(uint64_t *)&cpl->cdw0;
|
|
|
|
|
|
|
|
if (spdk_nvme_cpl_is_pi_error(cpl)) {
|
|
|
|
SPDK_ERRLOG("zone append completed with PI error (sct=%d, sc=%d)\n",
|
|
|
|
cpl->status.sct, cpl->status.sc);
|
|
|
|
/* Run PI verification for zone append data buffer if PI error is detected. */
|
2021-05-28 01:49:18 +00:00
|
|
|
bdev_nvme_verify_pi_error(bio);
|
2021-03-03 18:38:38 +00:00
|
|
|
}
|
|
|
|
|
2021-05-12 20:56:42 +00:00
|
|
|
bdev_nvme_io_complete_nvme_status(bio, cpl);
|
2021-03-03 18:38:38 +00:00
|
|
|
}
|
|
|
|
|
2019-12-13 08:40:54 +00:00
|
|
|
static void
|
|
|
|
bdev_nvme_comparev_done(void *ref, const struct spdk_nvme_cpl *cpl)
|
|
|
|
{
|
2021-05-12 20:56:42 +00:00
|
|
|
struct nvme_bdev_io *bio = ref;
|
2019-12-13 08:40:54 +00:00
|
|
|
|
|
|
|
if (spdk_nvme_cpl_is_pi_error(cpl)) {
|
|
|
|
SPDK_ERRLOG("comparev completed with PI error (sct=%d, sc=%d)\n",
|
|
|
|
cpl->status.sct, cpl->status.sc);
|
|
|
|
/* Run PI verification for compare data buffer if PI error is detected. */
|
2021-05-28 01:49:18 +00:00
|
|
|
bdev_nvme_verify_pi_error(bio);
|
2019-12-13 08:40:54 +00:00
|
|
|
}
|
|
|
|
|
2021-05-12 20:56:42 +00:00
|
|
|
bdev_nvme_io_complete_nvme_status(bio, cpl);
|
2019-12-13 08:40:54 +00:00
|
|
|
}
|
|
|
|
|
2019-12-20 11:29:48 +00:00
|
|
|
static void
|
|
|
|
bdev_nvme_comparev_and_writev_done(void *ref, const struct spdk_nvme_cpl *cpl)
|
|
|
|
{
|
|
|
|
struct nvme_bdev_io *bio = ref;
|
|
|
|
|
2020-01-17 13:06:24 +00:00
|
|
|
/* Compare operation completion */
|
|
|
|
if ((cpl->cdw0 & 0xFF) == SPDK_NVME_OPC_COMPARE) {
|
|
|
|
/* Save compare result for write callback */
|
|
|
|
bio->cpl = *cpl;
|
|
|
|
return;
|
2019-12-20 11:29:48 +00:00
|
|
|
}
|
|
|
|
|
2020-01-17 13:06:24 +00:00
|
|
|
/* Write operation completion */
|
|
|
|
if (spdk_nvme_cpl_is_error(&bio->cpl)) {
|
|
|
|
/* If bio->cpl is already an error, it means the compare operation failed. In that case,
|
|
|
|
* complete the IO with the compare operation's status.
|
|
|
|
*/
|
|
|
|
if (!spdk_nvme_cpl_is_error(cpl)) {
|
|
|
|
SPDK_ERRLOG("Unexpected write success after compare failure.\n");
|
|
|
|
}
|
2019-12-20 11:29:48 +00:00
|
|
|
|
2021-05-12 20:56:42 +00:00
|
|
|
bdev_nvme_io_complete_nvme_status(bio, &bio->cpl);
|
2020-01-17 13:06:24 +00:00
|
|
|
} else {
|
2021-05-12 20:56:42 +00:00
|
|
|
bdev_nvme_io_complete_nvme_status(bio, cpl);
|
2020-01-17 13:06:24 +00:00
|
|
|
}
|
2019-12-20 11:29:48 +00:00
|
|
|
}
|
|
|
|
|
2016-07-20 18:16:23 +00:00
|
|
|
static void
|
2017-02-28 17:51:25 +00:00
|
|
|
bdev_nvme_queued_done(void *ref, const struct spdk_nvme_cpl *cpl)
|
2016-07-20 18:16:23 +00:00
|
|
|
{
|
2021-05-12 20:56:42 +00:00
|
|
|
struct nvme_bdev_io *bio = ref;
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2021-05-12 20:56:42 +00:00
|
|
|
bdev_nvme_io_complete_nvme_status(bio, cpl);
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
2021-03-03 18:38:38 +00:00
|
|
|
static int
|
|
|
|
fill_zone_from_report(struct spdk_bdev_zone_info *info, struct spdk_nvme_zns_zone_desc *desc)
|
|
|
|
{
|
|
|
|
switch (desc->zs) {
|
|
|
|
case SPDK_NVME_ZONE_STATE_EMPTY:
|
|
|
|
info->state = SPDK_BDEV_ZONE_STATE_EMPTY;
|
|
|
|
break;
|
|
|
|
case SPDK_NVME_ZONE_STATE_IOPEN:
|
|
|
|
info->state = SPDK_BDEV_ZONE_STATE_IMP_OPEN;
|
|
|
|
break;
|
|
|
|
case SPDK_NVME_ZONE_STATE_EOPEN:
|
|
|
|
info->state = SPDK_BDEV_ZONE_STATE_EXP_OPEN;
|
|
|
|
break;
|
|
|
|
case SPDK_NVME_ZONE_STATE_CLOSED:
|
|
|
|
info->state = SPDK_BDEV_ZONE_STATE_CLOSED;
|
|
|
|
break;
|
|
|
|
case SPDK_NVME_ZONE_STATE_RONLY:
|
|
|
|
info->state = SPDK_BDEV_ZONE_STATE_READ_ONLY;
|
|
|
|
break;
|
|
|
|
case SPDK_NVME_ZONE_STATE_FULL:
|
|
|
|
info->state = SPDK_BDEV_ZONE_STATE_FULL;
|
|
|
|
break;
|
|
|
|
case SPDK_NVME_ZONE_STATE_OFFLINE:
|
|
|
|
info->state = SPDK_BDEV_ZONE_STATE_OFFLINE;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
SPDK_ERRLOG("Invalid zone state: %#x in zone report\n", desc->zs);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
info->zone_id = desc->zslba;
|
|
|
|
info->write_pointer = desc->wp;
|
|
|
|
info->capacity = desc->zcap;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
bdev_nvme_get_zone_info_done(void *ref, const struct spdk_nvme_cpl *cpl)
|
|
|
|
{
|
|
|
|
struct nvme_bdev_io *bio = ref;
|
|
|
|
struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(bio);
|
|
|
|
struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io);
|
2021-07-06 17:35:01 +00:00
|
|
|
struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
|
2021-03-03 18:38:38 +00:00
|
|
|
uint64_t zone_id = bdev_io->u.zone_mgmt.zone_id;
|
|
|
|
uint32_t zones_to_copy = bdev_io->u.zone_mgmt.num_zones;
|
|
|
|
struct spdk_bdev_zone_info *info = bdev_io->u.zone_mgmt.buf;
|
|
|
|
uint64_t max_zones_per_buf, i;
|
|
|
|
uint32_t zone_report_bufsize;
|
2021-06-01 17:49:43 +00:00
|
|
|
struct spdk_nvme_ns *ns;
|
2021-03-03 18:38:38 +00:00
|
|
|
struct spdk_nvme_qpair *qpair;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (spdk_nvme_cpl_is_error(cpl)) {
|
|
|
|
goto out_complete_io_nvme_cpl;
|
|
|
|
}
|
|
|
|
|
2021-07-06 17:35:01 +00:00
|
|
|
if (!bdev_nvme_find_io_path(nbdev_ch, &ns, &qpair)) {
|
2021-04-20 22:54:59 +00:00
|
|
|
ret = -ENXIO;
|
|
|
|
goto out_complete_io_ret;
|
2021-03-03 18:38:38 +00:00
|
|
|
}
|
|
|
|
|
2021-06-01 17:49:43 +00:00
|
|
|
zone_report_bufsize = spdk_nvme_ns_get_max_io_xfer_size(ns);
|
2021-03-03 18:38:38 +00:00
|
|
|
max_zones_per_buf = (zone_report_bufsize - sizeof(*bio->zone_report_buf)) /
|
|
|
|
sizeof(bio->zone_report_buf->descs[0]);
|
|
|
|
|
|
|
|
if (bio->zone_report_buf->nr_zones > max_zones_per_buf) {
|
2021-04-20 22:54:59 +00:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_complete_io_ret;
|
2021-03-03 18:38:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!bio->zone_report_buf->nr_zones) {
|
2021-04-20 22:54:59 +00:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_complete_io_ret;
|
2021-03-03 18:38:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < bio->zone_report_buf->nr_zones && bio->handled_zones < zones_to_copy; i++) {
|
|
|
|
ret = fill_zone_from_report(&info[bio->handled_zones],
|
|
|
|
&bio->zone_report_buf->descs[i]);
|
|
|
|
if (ret) {
|
2021-04-20 22:54:59 +00:00
|
|
|
goto out_complete_io_ret;
|
2021-03-03 18:38:38 +00:00
|
|
|
}
|
|
|
|
bio->handled_zones++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bio->handled_zones < zones_to_copy) {
|
2021-06-01 17:49:43 +00:00
|
|
|
uint64_t zone_size_lba = spdk_nvme_zns_ns_get_zone_size_sectors(ns);
|
2021-03-03 18:38:38 +00:00
|
|
|
uint64_t slba = zone_id + (zone_size_lba * bio->handled_zones);
|
|
|
|
|
|
|
|
memset(bio->zone_report_buf, 0, zone_report_bufsize);
|
2021-06-01 17:49:43 +00:00
|
|
|
ret = spdk_nvme_zns_report_zones(ns, qpair,
|
2021-03-03 18:38:38 +00:00
|
|
|
bio->zone_report_buf, zone_report_bufsize,
|
|
|
|
slba, SPDK_NVME_ZRA_LIST_ALL, true,
|
|
|
|
bdev_nvme_get_zone_info_done, bio);
|
|
|
|
if (!ret) {
|
|
|
|
return;
|
|
|
|
} else {
|
2021-04-20 22:54:59 +00:00
|
|
|
goto out_complete_io_ret;
|
2021-03-03 18:38:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
out_complete_io_nvme_cpl:
|
|
|
|
free(bio->zone_report_buf);
|
|
|
|
bio->zone_report_buf = NULL;
|
2021-05-12 20:56:42 +00:00
|
|
|
bdev_nvme_io_complete_nvme_status(bio, cpl);
|
2021-03-03 18:38:38 +00:00
|
|
|
return;
|
|
|
|
|
2021-04-20 22:54:59 +00:00
|
|
|
out_complete_io_ret:
|
2021-03-03 18:38:38 +00:00
|
|
|
free(bio->zone_report_buf);
|
|
|
|
bio->zone_report_buf = NULL;
|
2021-04-20 22:54:59 +00:00
|
|
|
bdev_nvme_io_complete(bio, ret);
|
2021-03-03 18:38:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
bdev_nvme_zone_management_done(void *ref, const struct spdk_nvme_cpl *cpl)
|
|
|
|
{
|
2021-05-12 20:56:42 +00:00
|
|
|
struct nvme_bdev_io *bio = ref;
|
2021-03-03 18:38:38 +00:00
|
|
|
|
2021-05-12 20:56:42 +00:00
|
|
|
bdev_nvme_io_complete_nvme_status(bio, cpl);
|
2021-03-03 18:38:38 +00:00
|
|
|
}
|
|
|
|
|
2017-05-13 20:12:13 +00:00
|
|
|
static void
|
2017-06-15 20:51:31 +00:00
|
|
|
bdev_nvme_admin_passthru_completion(void *ctx)
|
2017-05-13 20:12:13 +00:00
|
|
|
{
|
2017-06-15 20:51:31 +00:00
|
|
|
struct nvme_bdev_io *bio = ctx;
|
2017-05-13 20:12:13 +00:00
|
|
|
|
2021-05-12 20:56:42 +00:00
|
|
|
bdev_nvme_io_complete_nvme_status(bio, &bio->cpl);
|
2017-05-13 20:12:13 +00:00
|
|
|
}
|
|
|
|
|
2020-07-08 08:03:49 +00:00
|
|
|
static void
|
|
|
|
bdev_nvme_abort_completion(void *ctx)
|
|
|
|
{
|
|
|
|
struct nvme_bdev_io *bio = ctx;
|
|
|
|
struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(bio);
|
|
|
|
|
|
|
|
if (spdk_nvme_cpl_is_abort_success(&bio->cpl)) {
|
|
|
|
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
|
|
|
|
} else {
|
|
|
|
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
bdev_nvme_abort_done(void *ref, const struct spdk_nvme_cpl *cpl)
|
|
|
|
{
|
|
|
|
struct nvme_bdev_io *bio = ref;
|
|
|
|
|
|
|
|
bio->cpl = *cpl;
|
|
|
|
spdk_thread_send_msg(bio->orig_thread, bdev_nvme_abort_completion, bio);
|
|
|
|
}
|
|
|
|
|
2017-05-13 20:12:13 +00:00
|
|
|
static void
|
|
|
|
bdev_nvme_admin_passthru_done(void *ref, const struct spdk_nvme_cpl *cpl)
|
|
|
|
{
|
|
|
|
struct nvme_bdev_io *bio = ref;
|
|
|
|
|
|
|
|
bio->cpl = *cpl;
|
2017-06-15 20:51:31 +00:00
|
|
|
spdk_thread_send_msg(bio->orig_thread, bdev_nvme_admin_passthru_completion, bio);
|
2017-05-13 20:12:13 +00:00
|
|
|
}
|
|
|
|
|
2016-10-04 14:39:27 +00:00
|
|
|
static void
|
2017-02-28 17:51:25 +00:00
|
|
|
bdev_nvme_queued_reset_sgl(void *ref, uint32_t sgl_offset)
|
2016-10-04 14:39:27 +00:00
|
|
|
{
|
2017-02-28 17:51:25 +00:00
|
|
|
struct nvme_bdev_io *bio = ref;
|
2016-10-04 14:39:27 +00:00
|
|
|
struct iovec *iov;
|
|
|
|
|
|
|
|
bio->iov_offset = sgl_offset;
|
|
|
|
for (bio->iovpos = 0; bio->iovpos < bio->iovcnt; bio->iovpos++) {
|
|
|
|
iov = &bio->iovs[bio->iovpos];
|
2017-12-07 23:23:48 +00:00
|
|
|
if (bio->iov_offset < iov->iov_len) {
|
2016-10-04 14:39:27 +00:00
|
|
|
break;
|
2017-12-07 23:23:48 +00:00
|
|
|
}
|
2016-10-04 14:39:27 +00:00
|
|
|
|
|
|
|
bio->iov_offset -= iov->iov_len;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2017-02-28 17:51:25 +00:00
|
|
|
bdev_nvme_queued_next_sge(void *ref, void **address, uint32_t *length)
|
2016-10-04 14:39:27 +00:00
|
|
|
{
|
2017-02-28 17:51:25 +00:00
|
|
|
struct nvme_bdev_io *bio = ref;
|
2016-10-04 14:39:27 +00:00
|
|
|
struct iovec *iov;
|
|
|
|
|
|
|
|
assert(bio->iovpos < bio->iovcnt);
|
|
|
|
|
|
|
|
iov = &bio->iovs[bio->iovpos];
|
|
|
|
|
2016-11-03 17:12:16 +00:00
|
|
|
*address = iov->iov_base;
|
2016-10-04 14:39:27 +00:00
|
|
|
*length = iov->iov_len;
|
|
|
|
|
|
|
|
if (bio->iov_offset) {
|
|
|
|
assert(bio->iov_offset <= iov->iov_len);
|
|
|
|
*address += bio->iov_offset;
|
|
|
|
*length -= bio->iov_offset;
|
2016-11-09 06:00:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bio->iov_offset += *length;
|
|
|
|
if (bio->iov_offset == iov->iov_len) {
|
|
|
|
bio->iovpos++;
|
2016-10-04 14:39:27 +00:00
|
|
|
bio->iov_offset = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-01-16 10:10:53 +00:00
|
|
|
static void
|
|
|
|
bdev_nvme_queued_reset_fused_sgl(void *ref, uint32_t sgl_offset)
|
|
|
|
{
|
|
|
|
struct nvme_bdev_io *bio = ref;
|
|
|
|
struct iovec *iov;
|
|
|
|
|
|
|
|
bio->fused_iov_offset = sgl_offset;
|
|
|
|
for (bio->fused_iovpos = 0; bio->fused_iovpos < bio->fused_iovcnt; bio->fused_iovpos++) {
|
|
|
|
iov = &bio->fused_iovs[bio->fused_iovpos];
|
|
|
|
if (bio->fused_iov_offset < iov->iov_len) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
bio->fused_iov_offset -= iov->iov_len;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
bdev_nvme_queued_next_fused_sge(void *ref, void **address, uint32_t *length)
|
|
|
|
{
|
|
|
|
struct nvme_bdev_io *bio = ref;
|
|
|
|
struct iovec *iov;
|
|
|
|
|
|
|
|
assert(bio->fused_iovpos < bio->fused_iovcnt);
|
|
|
|
|
|
|
|
iov = &bio->fused_iovs[bio->fused_iovpos];
|
|
|
|
|
|
|
|
*address = iov->iov_base;
|
|
|
|
*length = iov->iov_len;
|
|
|
|
|
|
|
|
if (bio->fused_iov_offset) {
|
|
|
|
assert(bio->fused_iov_offset <= iov->iov_len);
|
|
|
|
*address += bio->fused_iov_offset;
|
|
|
|
*length -= bio->fused_iov_offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
bio->fused_iov_offset += *length;
|
|
|
|
if (bio->fused_iov_offset == iov->iov_len) {
|
|
|
|
bio->fused_iovpos++;
|
|
|
|
bio->fused_iov_offset = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-02-08 05:42:18 +00:00
|
|
|
static int
|
2020-11-23 04:48:51 +00:00
|
|
|
bdev_nvme_no_pi_readv(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
|
2019-04-16 08:17:08 +00:00
|
|
|
struct nvme_bdev_io *bio, struct iovec *iov, int iovcnt,
|
|
|
|
void *md, uint64_t lba_count, uint64_t lba)
|
2019-02-08 05:42:18 +00:00
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
2020-11-17 16:29:21 +00:00
|
|
|
SPDK_DEBUGLOG(bdev_nvme, "read %" PRIu64 " blocks with offset %#" PRIx64 " without PI check\n",
|
2019-02-08 05:42:18 +00:00
|
|
|
lba_count, lba);
|
|
|
|
|
|
|
|
bio->iovs = iov;
|
|
|
|
bio->iovcnt = iovcnt;
|
|
|
|
bio->iovpos = 0;
|
|
|
|
bio->iov_offset = 0;
|
|
|
|
|
2020-11-23 04:48:51 +00:00
|
|
|
rc = spdk_nvme_ns_cmd_readv_with_md(ns, qpair, lba, lba_count,
|
2019-04-16 08:17:08 +00:00
|
|
|
bdev_nvme_no_pi_readv_done, bio, 0,
|
|
|
|
bdev_nvme_queued_reset_sgl, bdev_nvme_queued_next_sge,
|
|
|
|
md, 0, 0);
|
2019-02-08 05:42:18 +00:00
|
|
|
|
|
|
|
if (rc != 0 && rc != -ENOMEM) {
|
|
|
|
SPDK_ERRLOG("no_pi_readv failed: rc = %d\n", rc);
|
|
|
|
}
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2017-02-28 17:51:25 +00:00
|
|
|
static int
|
2020-11-23 04:48:51 +00:00
|
|
|
bdev_nvme_readv(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
|
2019-04-16 08:17:08 +00:00
|
|
|
struct nvme_bdev_io *bio, struct iovec *iov, int iovcnt,
|
2021-08-25 03:18:56 +00:00
|
|
|
void *md, uint64_t lba_count, uint64_t lba, uint32_t flags,
|
|
|
|
struct spdk_bdev_ext_io_opts *ext_opts)
|
2016-07-20 18:16:23 +00:00
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
2020-11-17 16:29:21 +00:00
|
|
|
SPDK_DEBUGLOG(bdev_nvme, "read %" PRIu64 " blocks with offset %#" PRIx64 "\n",
|
2019-02-05 01:47:25 +00:00
|
|
|
lba_count, lba);
|
|
|
|
|
2016-10-04 14:39:27 +00:00
|
|
|
bio->iovs = iov;
|
|
|
|
bio->iovcnt = iovcnt;
|
|
|
|
bio->iovpos = 0;
|
|
|
|
bio->iov_offset = 0;
|
|
|
|
|
2021-08-25 03:18:56 +00:00
|
|
|
if (ext_opts) {
|
2021-01-09 15:10:18 +00:00
|
|
|
bio->ext_opts.size = sizeof(struct spdk_nvme_ns_cmd_ext_io_opts);
|
2021-08-25 03:18:56 +00:00
|
|
|
bio->ext_opts.memory_domain = ext_opts->memory_domain;
|
|
|
|
bio->ext_opts.memory_domain_ctx = ext_opts->memory_domain_ctx;
|
2021-01-09 15:10:18 +00:00
|
|
|
bio->ext_opts.io_flags = flags;
|
|
|
|
bio->ext_opts.metadata = md;
|
|
|
|
|
|
|
|
rc = spdk_nvme_ns_cmd_readv_ext(ns, qpair, lba, lba_count,
|
|
|
|
bdev_nvme_readv_done, bio,
|
|
|
|
bdev_nvme_queued_reset_sgl, bdev_nvme_queued_next_sge,
|
|
|
|
&bio->ext_opts);
|
|
|
|
} else if (iovcnt == 1) {
|
2020-11-23 04:48:51 +00:00
|
|
|
rc = spdk_nvme_ns_cmd_read_with_md(ns, qpair, iov[0].iov_base, md, lba,
|
2020-09-18 17:31:23 +00:00
|
|
|
lba_count,
|
|
|
|
bdev_nvme_readv_done, bio,
|
2020-09-23 20:37:27 +00:00
|
|
|
flags,
|
2020-09-18 17:31:23 +00:00
|
|
|
0, 0);
|
|
|
|
} else {
|
2020-11-23 04:48:51 +00:00
|
|
|
rc = spdk_nvme_ns_cmd_readv_with_md(ns, qpair, lba, lba_count,
|
2020-09-23 20:37:27 +00:00
|
|
|
bdev_nvme_readv_done, bio, flags,
|
2020-09-18 17:31:23 +00:00
|
|
|
bdev_nvme_queued_reset_sgl, bdev_nvme_queued_next_sge,
|
|
|
|
md, 0, 0);
|
|
|
|
}
|
2019-02-05 01:47:25 +00:00
|
|
|
|
|
|
|
if (rc != 0 && rc != -ENOMEM) {
|
|
|
|
SPDK_ERRLOG("readv failed: rc = %d\n", rc);
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
2019-02-05 01:47:25 +00:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2020-11-23 04:48:51 +00:00
|
|
|
bdev_nvme_writev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
|
2019-02-05 01:47:25 +00:00
|
|
|
struct nvme_bdev_io *bio,
|
2020-09-23 20:37:27 +00:00
|
|
|
struct iovec *iov, int iovcnt, void *md, uint64_t lba_count, uint64_t lba,
|
2021-08-25 03:18:56 +00:00
|
|
|
uint32_t flags, struct spdk_bdev_ext_io_opts *ext_opts)
|
2019-02-05 01:47:25 +00:00
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
2020-11-17 16:29:21 +00:00
|
|
|
SPDK_DEBUGLOG(bdev_nvme, "write %" PRIu64 " blocks with offset %#" PRIx64 "\n",
|
2019-02-05 01:47:25 +00:00
|
|
|
lba_count, lba);
|
|
|
|
|
|
|
|
bio->iovs = iov;
|
|
|
|
bio->iovcnt = iovcnt;
|
|
|
|
bio->iovpos = 0;
|
|
|
|
bio->iov_offset = 0;
|
|
|
|
|
2021-08-25 03:18:56 +00:00
|
|
|
if (ext_opts) {
|
2021-01-09 15:10:18 +00:00
|
|
|
bio->ext_opts.size = sizeof(struct spdk_nvme_ns_cmd_ext_io_opts);
|
2021-08-25 03:18:56 +00:00
|
|
|
bio->ext_opts.memory_domain = ext_opts->memory_domain;
|
|
|
|
bio->ext_opts.memory_domain_ctx = ext_opts->memory_domain_ctx;
|
2021-01-09 15:10:18 +00:00
|
|
|
bio->ext_opts.io_flags = flags;
|
|
|
|
bio->ext_opts.metadata = md;
|
|
|
|
|
|
|
|
rc = spdk_nvme_ns_cmd_writev_ext(ns, qpair, lba, lba_count,
|
|
|
|
bdev_nvme_readv_done, bio,
|
|
|
|
bdev_nvme_queued_reset_sgl, bdev_nvme_queued_next_sge,
|
|
|
|
&bio->ext_opts);
|
|
|
|
} else if (iovcnt == 1) {
|
2020-11-23 04:48:51 +00:00
|
|
|
rc = spdk_nvme_ns_cmd_write_with_md(ns, qpair, iov[0].iov_base, md, lba,
|
2020-09-18 17:46:03 +00:00
|
|
|
lba_count,
|
2021-01-26 08:17:11 +00:00
|
|
|
bdev_nvme_writev_done, bio,
|
2020-09-23 20:37:27 +00:00
|
|
|
flags,
|
2020-09-18 17:46:03 +00:00
|
|
|
0, 0);
|
|
|
|
} else {
|
2020-11-23 04:48:51 +00:00
|
|
|
rc = spdk_nvme_ns_cmd_writev_with_md(ns, qpair, lba, lba_count,
|
2020-09-23 20:37:27 +00:00
|
|
|
bdev_nvme_writev_done, bio, flags,
|
2020-09-18 17:46:03 +00:00
|
|
|
bdev_nvme_queued_reset_sgl, bdev_nvme_queued_next_sge,
|
|
|
|
md, 0, 0);
|
|
|
|
}
|
2016-07-20 18:16:23 +00:00
|
|
|
|
bdev: add ENOMEM handling
At very high queue depths, bdev modules may not have enough
internal resources to track all of the incoming I/O. For example,
we allocate a finite number of nvme_request objects per allocated
queue pair. Currently if these resources are exhausted, the
bdev module will return failure (with no indication why) which
gets propagated all the way back to the application.
So instead, add SPDK_BDEV_IO_STATUS_NOMEM to allow bdev modules
to indicate this type of failure. Also add handling for this
status type in the generic bdev layer, involving queuing these
I/O for later retry after other I/O on the failing channel have
completed.
This does place an expectation on the bdev module that these
internal resources are allocated per io_channel. Otherwise we
cannot guarantee forward progress solely on reception of
completions. For example, without this guarantee, a bdev
module could theoretically return ENOMEM even if there were
no I/O oustanding for that io_channel. nvme, aio, rbd,
virtio and null drivers comply with this expectation already.
malloc only complies though when not using copy offload.
This patch will fix malloc w/ copy engine to at least
return ENOMEM when no copy descriptors are available. If the
condition above occurs, I/O waiting for resources will get
failed as part of a subsequent reset which matches the
behavior it has today.
Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: Iea7cd51a611af8abe882794d0b2361fdbb74e84e
Reviewed-on: https://review.gerrithub.io/378853
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
2017-09-15 20:47:17 +00:00
|
|
|
if (rc != 0 && rc != -ENOMEM) {
|
2019-02-05 01:47:25 +00:00
|
|
|
SPDK_ERRLOG("writev failed: rc = %d\n", rc);
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2021-03-03 18:38:38 +00:00
|
|
|
static int
|
|
|
|
bdev_nvme_zone_appendv(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
|
|
|
|
struct nvme_bdev_io *bio,
|
|
|
|
struct iovec *iov, int iovcnt, void *md, uint64_t lba_count, uint64_t zslba,
|
|
|
|
uint32_t flags)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
SPDK_DEBUGLOG(bdev_nvme, "zone append %" PRIu64 " blocks to zone start lba %#" PRIx64 "\n",
|
|
|
|
lba_count, zslba);
|
|
|
|
|
|
|
|
bio->iovs = iov;
|
|
|
|
bio->iovcnt = iovcnt;
|
|
|
|
bio->iovpos = 0;
|
|
|
|
bio->iov_offset = 0;
|
|
|
|
|
|
|
|
if (iovcnt == 1) {
|
|
|
|
rc = spdk_nvme_zns_zone_append_with_md(ns, qpair, iov[0].iov_base, md, zslba,
|
|
|
|
lba_count,
|
|
|
|
bdev_nvme_zone_appendv_done, bio,
|
|
|
|
flags,
|
|
|
|
0, 0);
|
|
|
|
} else {
|
|
|
|
rc = spdk_nvme_zns_zone_appendv_with_md(ns, qpair, zslba, lba_count,
|
|
|
|
bdev_nvme_zone_appendv_done, bio, flags,
|
|
|
|
bdev_nvme_queued_reset_sgl, bdev_nvme_queued_next_sge,
|
|
|
|
md, 0, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rc != 0 && rc != -ENOMEM) {
|
|
|
|
SPDK_ERRLOG("zone append failed: rc = %d\n", rc);
|
|
|
|
}
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2019-12-13 08:40:54 +00:00
|
|
|
static int
|
2020-11-23 04:48:51 +00:00
|
|
|
bdev_nvme_comparev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
|
2019-12-13 08:40:54 +00:00
|
|
|
struct nvme_bdev_io *bio,
|
2020-09-23 20:37:27 +00:00
|
|
|
struct iovec *iov, int iovcnt, void *md, uint64_t lba_count, uint64_t lba,
|
|
|
|
uint32_t flags)
|
2019-12-13 08:40:54 +00:00
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
2020-11-17 16:29:21 +00:00
|
|
|
SPDK_DEBUGLOG(bdev_nvme, "compare %" PRIu64 " blocks with offset %#" PRIx64 "\n",
|
2019-12-13 08:40:54 +00:00
|
|
|
lba_count, lba);
|
|
|
|
|
|
|
|
bio->iovs = iov;
|
|
|
|
bio->iovcnt = iovcnt;
|
|
|
|
bio->iovpos = 0;
|
|
|
|
bio->iov_offset = 0;
|
|
|
|
|
2020-11-23 04:48:51 +00:00
|
|
|
rc = spdk_nvme_ns_cmd_comparev_with_md(ns, qpair, lba, lba_count,
|
2020-09-23 20:37:27 +00:00
|
|
|
bdev_nvme_comparev_done, bio, flags,
|
2019-12-13 08:40:54 +00:00
|
|
|
bdev_nvme_queued_reset_sgl, bdev_nvme_queued_next_sge,
|
|
|
|
md, 0, 0);
|
|
|
|
|
|
|
|
if (rc != 0 && rc != -ENOMEM) {
|
|
|
|
SPDK_ERRLOG("comparev failed: rc = %d\n", rc);
|
|
|
|
}
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2019-12-20 11:29:48 +00:00
|
|
|
static int
|
2020-11-23 04:48:51 +00:00
|
|
|
bdev_nvme_comparev_and_writev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
|
2020-09-23 20:37:27 +00:00
|
|
|
struct nvme_bdev_io *bio, struct iovec *cmp_iov, int cmp_iovcnt,
|
|
|
|
struct iovec *write_iov, int write_iovcnt,
|
|
|
|
void *md, uint64_t lba_count, uint64_t lba, uint32_t flags)
|
2019-12-20 11:29:48 +00:00
|
|
|
{
|
|
|
|
struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(bio);
|
|
|
|
int rc;
|
|
|
|
|
2020-11-17 16:29:21 +00:00
|
|
|
SPDK_DEBUGLOG(bdev_nvme, "compare and write %" PRIu64 " blocks with offset %#" PRIx64 "\n",
|
2019-12-20 11:29:48 +00:00
|
|
|
lba_count, lba);
|
|
|
|
|
2020-01-16 10:10:53 +00:00
|
|
|
bio->iovs = cmp_iov;
|
|
|
|
bio->iovcnt = cmp_iovcnt;
|
2019-12-20 11:29:48 +00:00
|
|
|
bio->iovpos = 0;
|
|
|
|
bio->iov_offset = 0;
|
2020-01-16 10:10:53 +00:00
|
|
|
bio->fused_iovs = write_iov;
|
|
|
|
bio->fused_iovcnt = write_iovcnt;
|
|
|
|
bio->fused_iovpos = 0;
|
|
|
|
bio->fused_iov_offset = 0;
|
2019-12-20 11:29:48 +00:00
|
|
|
|
|
|
|
if (bdev_io->num_retries == 0) {
|
|
|
|
bio->first_fused_submitted = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!bio->first_fused_submitted) {
|
|
|
|
flags |= SPDK_NVME_IO_FLAGS_FUSE_FIRST;
|
|
|
|
memset(&bio->cpl, 0, sizeof(bio->cpl));
|
|
|
|
|
2020-11-23 04:48:51 +00:00
|
|
|
rc = spdk_nvme_ns_cmd_comparev_with_md(ns, qpair, lba, lba_count,
|
2019-12-20 11:29:48 +00:00
|
|
|
bdev_nvme_comparev_and_writev_done, bio, flags,
|
|
|
|
bdev_nvme_queued_reset_sgl, bdev_nvme_queued_next_sge, md, 0, 0);
|
|
|
|
if (rc == 0) {
|
|
|
|
bio->first_fused_submitted = true;
|
|
|
|
flags &= ~SPDK_NVME_IO_FLAGS_FUSE_FIRST;
|
|
|
|
} else {
|
|
|
|
if (rc != -ENOMEM) {
|
|
|
|
SPDK_ERRLOG("compare failed: rc = %d\n", rc);
|
|
|
|
}
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
flags |= SPDK_NVME_IO_FLAGS_FUSE_SECOND;
|
|
|
|
|
2020-11-23 04:48:51 +00:00
|
|
|
rc = spdk_nvme_ns_cmd_writev_with_md(ns, qpair, lba, lba_count,
|
2019-12-20 11:29:48 +00:00
|
|
|
bdev_nvme_comparev_and_writev_done, bio, flags,
|
2020-01-16 10:10:53 +00:00
|
|
|
bdev_nvme_queued_reset_fused_sgl, bdev_nvme_queued_next_fused_sge, md, 0, 0);
|
2019-12-20 11:29:48 +00:00
|
|
|
if (rc != 0 && rc != -ENOMEM) {
|
|
|
|
SPDK_ERRLOG("write failed: rc = %d\n", rc);
|
|
|
|
rc = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2016-07-20 18:16:23 +00:00
|
|
|
static int
|
2020-11-23 04:48:51 +00:00
|
|
|
bdev_nvme_unmap(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
|
2017-02-28 17:51:25 +00:00
|
|
|
struct nvme_bdev_io *bio,
|
2017-08-29 00:15:53 +00:00
|
|
|
uint64_t offset_blocks,
|
|
|
|
uint64_t num_blocks)
|
2016-07-20 18:16:23 +00:00
|
|
|
{
|
2017-09-19 17:59:58 +00:00
|
|
|
struct spdk_nvme_dsm_range dsm_ranges[SPDK_NVME_DATASET_MANAGEMENT_MAX_RANGES];
|
|
|
|
struct spdk_nvme_dsm_range *range;
|
|
|
|
uint64_t offset, remaining;
|
|
|
|
uint64_t num_ranges_u64;
|
|
|
|
uint16_t num_ranges;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
num_ranges_u64 = (num_blocks + SPDK_NVME_DATASET_MANAGEMENT_RANGE_MAX_BLOCKS - 1) /
|
|
|
|
SPDK_NVME_DATASET_MANAGEMENT_RANGE_MAX_BLOCKS;
|
|
|
|
if (num_ranges_u64 > SPDK_COUNTOF(dsm_ranges)) {
|
|
|
|
SPDK_ERRLOG("Unmap request for %" PRIu64 " blocks is too large\n", num_blocks);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
num_ranges = (uint16_t)num_ranges_u64;
|
|
|
|
|
|
|
|
offset = offset_blocks;
|
|
|
|
remaining = num_blocks;
|
|
|
|
range = &dsm_ranges[0];
|
|
|
|
|
|
|
|
/* Fill max-size ranges until the remaining blocks fit into one range */
|
|
|
|
while (remaining > SPDK_NVME_DATASET_MANAGEMENT_RANGE_MAX_BLOCKS) {
|
|
|
|
range->attributes.raw = 0;
|
|
|
|
range->length = SPDK_NVME_DATASET_MANAGEMENT_RANGE_MAX_BLOCKS;
|
|
|
|
range->starting_lba = offset;
|
|
|
|
|
|
|
|
offset += SPDK_NVME_DATASET_MANAGEMENT_RANGE_MAX_BLOCKS;
|
|
|
|
remaining -= SPDK_NVME_DATASET_MANAGEMENT_RANGE_MAX_BLOCKS;
|
|
|
|
range++;
|
|
|
|
}
|
2016-10-03 17:37:48 +00:00
|
|
|
|
2017-09-19 17:59:58 +00:00
|
|
|
/* Final range describes the remaining blocks */
|
|
|
|
range->attributes.raw = 0;
|
|
|
|
range->length = remaining;
|
|
|
|
range->starting_lba = offset;
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2020-11-23 04:48:51 +00:00
|
|
|
rc = spdk_nvme_ns_cmd_dataset_management(ns, qpair,
|
2016-10-03 17:37:48 +00:00
|
|
|
SPDK_NVME_DSM_ATTR_DEALLOCATE,
|
2017-09-19 17:59:58 +00:00
|
|
|
dsm_ranges, num_ranges,
|
2017-02-28 17:51:25 +00:00
|
|
|
bdev_nvme_queued_done, bio);
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2017-05-20 00:12:39 +00:00
|
|
|
return rc;
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
2021-05-14 11:26:40 +00:00
|
|
|
static int
|
|
|
|
bdev_nvme_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
|
|
|
|
struct nvme_bdev_io *bio,
|
|
|
|
uint64_t offset_blocks,
|
|
|
|
uint64_t num_blocks)
|
|
|
|
{
|
|
|
|
if (num_blocks > UINT16_MAX + 1) {
|
|
|
|
SPDK_ERRLOG("NVMe write zeroes is limited to 16-bit block count\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return spdk_nvme_ns_cmd_write_zeroes(ns, qpair,
|
|
|
|
offset_blocks, num_blocks,
|
|
|
|
bdev_nvme_queued_done, bio,
|
|
|
|
0);
|
|
|
|
}
|
|
|
|
|
2021-03-03 18:38:38 +00:00
|
|
|
static int
|
|
|
|
bdev_nvme_get_zone_info(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
|
|
|
|
struct nvme_bdev_io *bio, uint64_t zone_id, uint32_t num_zones,
|
|
|
|
struct spdk_bdev_zone_info *info)
|
|
|
|
{
|
|
|
|
uint32_t zone_report_bufsize = spdk_nvme_ns_get_max_io_xfer_size(ns);
|
|
|
|
uint64_t zone_size = spdk_nvme_zns_ns_get_zone_size_sectors(ns);
|
|
|
|
uint64_t total_zones = spdk_nvme_zns_ns_get_num_zones(ns);
|
|
|
|
|
|
|
|
if (zone_id % zone_size != 0) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (num_zones > total_zones || !num_zones) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(!bio->zone_report_buf);
|
|
|
|
bio->zone_report_buf = calloc(1, zone_report_bufsize);
|
|
|
|
if (!bio->zone_report_buf) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
bio->handled_zones = 0;
|
|
|
|
|
|
|
|
return spdk_nvme_zns_report_zones(ns, qpair, bio->zone_report_buf, zone_report_bufsize,
|
|
|
|
zone_id, SPDK_NVME_ZRA_LIST_ALL, true,
|
|
|
|
bdev_nvme_get_zone_info_done, bio);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
bdev_nvme_zone_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
|
|
|
|
struct nvme_bdev_io *bio, uint64_t zone_id,
|
|
|
|
enum spdk_bdev_zone_action action)
|
|
|
|
{
|
|
|
|
switch (action) {
|
|
|
|
case SPDK_BDEV_ZONE_CLOSE:
|
|
|
|
return spdk_nvme_zns_close_zone(ns, qpair, zone_id, false,
|
|
|
|
bdev_nvme_zone_management_done, bio);
|
|
|
|
case SPDK_BDEV_ZONE_FINISH:
|
|
|
|
return spdk_nvme_zns_finish_zone(ns, qpair, zone_id, false,
|
|
|
|
bdev_nvme_zone_management_done, bio);
|
|
|
|
case SPDK_BDEV_ZONE_OPEN:
|
|
|
|
return spdk_nvme_zns_open_zone(ns, qpair, zone_id, false,
|
|
|
|
bdev_nvme_zone_management_done, bio);
|
|
|
|
case SPDK_BDEV_ZONE_RESET:
|
|
|
|
return spdk_nvme_zns_reset_zone(ns, qpair, zone_id, false,
|
|
|
|
bdev_nvme_zone_management_done, bio);
|
|
|
|
case SPDK_BDEV_ZONE_OFFLINE:
|
|
|
|
return spdk_nvme_zns_offline_zone(ns, qpair, zone_id, false,
|
|
|
|
bdev_nvme_zone_management_done, bio);
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-13 20:12:13 +00:00
|
|
|
static int
|
2021-07-06 17:35:01 +00:00
|
|
|
bdev_nvme_admin_passthru(struct nvme_bdev_channel *nbdev_ch, struct nvme_bdev_io *bio,
|
2017-05-13 20:12:13 +00:00
|
|
|
struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes)
|
|
|
|
{
|
2021-07-06 19:42:41 +00:00
|
|
|
struct nvme_ctrlr *nvme_ctrlr;
|
2021-03-28 22:49:49 +00:00
|
|
|
uint32_t max_xfer_size;
|
|
|
|
|
2021-07-06 17:35:01 +00:00
|
|
|
if (!bdev_nvme_find_admin_path(nbdev_ch, &nvme_ctrlr)) {
|
2021-03-28 22:49:49 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
max_xfer_size = spdk_nvme_ctrlr_get_max_xfer_size(nvme_ctrlr->ctrlr);
|
2018-05-08 23:07:30 +00:00
|
|
|
|
|
|
|
if (nbytes > max_xfer_size) {
|
|
|
|
SPDK_ERRLOG("nbytes is greater than MDTS %" PRIu32 ".\n", max_xfer_size);
|
2017-05-13 20:12:13 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2021-03-31 01:26:55 +00:00
|
|
|
bio->orig_thread = spdk_get_thread();
|
2017-05-13 20:12:13 +00:00
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
return spdk_nvme_ctrlr_cmd_admin_raw(nvme_ctrlr->ctrlr, cmd, buf,
|
2017-05-13 20:12:13 +00:00
|
|
|
(uint32_t)nbytes, bdev_nvme_admin_passthru_done, bio);
|
|
|
|
}
|
|
|
|
|
2017-06-05 18:02:09 +00:00
|
|
|
static int
|
2020-11-23 04:48:51 +00:00
|
|
|
bdev_nvme_io_passthru(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
|
2017-06-05 18:02:09 +00:00
|
|
|
struct nvme_bdev_io *bio,
|
|
|
|
struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes)
|
|
|
|
{
|
2020-11-23 04:48:51 +00:00
|
|
|
uint32_t max_xfer_size = spdk_nvme_ns_get_max_io_xfer_size(ns);
|
|
|
|
struct spdk_nvme_ctrlr *ctrlr = spdk_nvme_ns_get_ctrlr(ns);
|
2017-06-05 18:02:09 +00:00
|
|
|
|
2018-05-08 23:07:30 +00:00
|
|
|
if (nbytes > max_xfer_size) {
|
|
|
|
SPDK_ERRLOG("nbytes is greater than MDTS %" PRIu32 ".\n", max_xfer_size);
|
2017-06-05 18:02:09 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Each NVMe bdev is a specific namespace, and all NVMe I/O commands require a nsid,
|
|
|
|
* so fill it out automatically.
|
|
|
|
*/
|
2020-11-23 04:48:51 +00:00
|
|
|
cmd->nsid = spdk_nvme_ns_get_id(ns);
|
2017-06-05 18:02:09 +00:00
|
|
|
|
2020-11-23 04:48:51 +00:00
|
|
|
return spdk_nvme_ctrlr_cmd_io_raw(ctrlr, qpair, cmd, buf,
|
2017-06-05 18:02:09 +00:00
|
|
|
(uint32_t)nbytes, bdev_nvme_queued_done, bio);
|
|
|
|
}
|
|
|
|
|
2017-11-14 06:33:11 +00:00
|
|
|
static int
|
2020-11-23 04:48:51 +00:00
|
|
|
bdev_nvme_io_passthru_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
|
2017-11-14 06:33:11 +00:00
|
|
|
struct nvme_bdev_io *bio,
|
|
|
|
struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes, void *md_buf, size_t md_len)
|
|
|
|
{
|
2020-11-23 04:48:51 +00:00
|
|
|
size_t nr_sectors = nbytes / spdk_nvme_ns_get_extended_sector_size(ns);
|
|
|
|
uint32_t max_xfer_size = spdk_nvme_ns_get_max_io_xfer_size(ns);
|
|
|
|
struct spdk_nvme_ctrlr *ctrlr = spdk_nvme_ns_get_ctrlr(ns);
|
2017-11-14 06:33:11 +00:00
|
|
|
|
2018-05-08 23:07:30 +00:00
|
|
|
if (nbytes > max_xfer_size) {
|
|
|
|
SPDK_ERRLOG("nbytes is greater than MDTS %" PRIu32 ".\n", max_xfer_size);
|
2017-11-14 06:33:11 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-11-23 04:48:51 +00:00
|
|
|
if (md_len != nr_sectors * spdk_nvme_ns_get_md_size(ns)) {
|
2017-11-14 06:33:11 +00:00
|
|
|
SPDK_ERRLOG("invalid meta data buffer size\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Each NVMe bdev is a specific namespace, and all NVMe I/O commands require a nsid,
|
|
|
|
* so fill it out automatically.
|
|
|
|
*/
|
2020-11-23 04:48:51 +00:00
|
|
|
cmd->nsid = spdk_nvme_ns_get_id(ns);
|
2017-11-14 06:33:11 +00:00
|
|
|
|
2020-11-23 04:48:51 +00:00
|
|
|
return spdk_nvme_ctrlr_cmd_io_raw_with_md(ctrlr, qpair, cmd, buf,
|
2017-11-14 06:33:11 +00:00
|
|
|
(uint32_t)nbytes, md_buf, bdev_nvme_queued_done, bio);
|
|
|
|
}
|
|
|
|
|
2020-07-08 08:03:49 +00:00
|
|
|
static int
|
2021-07-06 17:35:01 +00:00
|
|
|
bdev_nvme_abort(struct nvme_bdev_channel *nbdev_ch, struct nvme_bdev_io *bio,
|
2020-11-23 06:43:41 +00:00
|
|
|
struct nvme_bdev_io *bio_to_abort)
|
2020-07-08 08:03:49 +00:00
|
|
|
{
|
2021-07-06 17:35:01 +00:00
|
|
|
struct nvme_ctrlr_channel *ctrlr_ch = nbdev_ch->ctrlr_ch;
|
2020-07-08 08:03:49 +00:00
|
|
|
int rc;
|
|
|
|
|
2021-03-31 01:26:55 +00:00
|
|
|
bio->orig_thread = spdk_get_thread();
|
2020-07-08 08:03:49 +00:00
|
|
|
|
2021-07-07 01:02:14 +00:00
|
|
|
rc = spdk_nvme_ctrlr_cmd_abort_ext(ctrlr_ch->ctrlr->ctrlr,
|
|
|
|
ctrlr_ch->qpair,
|
2020-07-08 08:03:49 +00:00
|
|
|
bio_to_abort,
|
|
|
|
bdev_nvme_abort_done, bio);
|
|
|
|
if (rc == -ENOENT) {
|
|
|
|
/* If no command was found in I/O qpair, the target command may be
|
2021-05-31 07:11:13 +00:00
|
|
|
* admin command.
|
2020-07-08 08:03:49 +00:00
|
|
|
*/
|
2021-07-07 01:02:14 +00:00
|
|
|
rc = spdk_nvme_ctrlr_cmd_abort_ext(ctrlr_ch->ctrlr->ctrlr,
|
2021-05-31 07:11:13 +00:00
|
|
|
NULL,
|
|
|
|
bio_to_abort,
|
|
|
|
bdev_nvme_abort_done, bio);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rc == -ENOENT) {
|
|
|
|
/* If no command was found, complete the abort request with failure. */
|
|
|
|
bio->cpl.cdw0 |= 1U;
|
|
|
|
bio->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
|
|
|
|
bio->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
|
|
|
|
|
|
|
|
bdev_nvme_abort_completion(bio);
|
|
|
|
|
2020-07-08 08:03:49 +00:00
|
|
|
rc = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2020-12-07 18:46:51 +00:00
|
|
|
static void
|
|
|
|
bdev_nvme_opts_config_json(struct spdk_json_write_ctx *w)
|
2018-04-05 15:43:55 +00:00
|
|
|
{
|
2020-12-07 18:46:51 +00:00
|
|
|
const char *action;
|
2018-07-09 21:04:33 +00:00
|
|
|
|
|
|
|
if (g_opts.action_on_timeout == SPDK_BDEV_NVME_TIMEOUT_ACTION_RESET) {
|
|
|
|
action = "reset";
|
|
|
|
} else if (g_opts.action_on_timeout == SPDK_BDEV_NVME_TIMEOUT_ACTION_ABORT) {
|
|
|
|
action = "abort";
|
|
|
|
} else {
|
|
|
|
action = "none";
|
|
|
|
}
|
|
|
|
|
|
|
|
spdk_json_write_object_begin(w);
|
|
|
|
|
2019-08-22 12:44:02 +00:00
|
|
|
spdk_json_write_named_string(w, "method", "bdev_nvme_set_options");
|
2018-07-09 21:04:33 +00:00
|
|
|
|
|
|
|
spdk_json_write_named_object_begin(w, "params");
|
|
|
|
spdk_json_write_named_string(w, "action_on_timeout", action);
|
|
|
|
spdk_json_write_named_uint64(w, "timeout_us", g_opts.timeout_us);
|
2021-06-02 17:42:34 +00:00
|
|
|
spdk_json_write_named_uint64(w, "timeout_admin_us", g_opts.timeout_admin_us);
|
2020-12-09 21:44:31 +00:00
|
|
|
spdk_json_write_named_uint32(w, "keep_alive_timeout_ms", g_opts.keep_alive_timeout_ms);
|
2018-07-09 21:04:33 +00:00
|
|
|
spdk_json_write_named_uint32(w, "retry_count", g_opts.retry_count);
|
2019-09-03 03:48:49 +00:00
|
|
|
spdk_json_write_named_uint32(w, "arbitration_burst", g_opts.arbitration_burst);
|
|
|
|
spdk_json_write_named_uint32(w, "low_priority_weight", g_opts.low_priority_weight);
|
|
|
|
spdk_json_write_named_uint32(w, "medium_priority_weight", g_opts.medium_priority_weight);
|
|
|
|
spdk_json_write_named_uint32(w, "high_priority_weight", g_opts.high_priority_weight);
|
2018-07-09 21:04:33 +00:00
|
|
|
spdk_json_write_named_uint64(w, "nvme_adminq_poll_period_us", g_opts.nvme_adminq_poll_period_us);
|
2019-03-11 22:26:53 +00:00
|
|
|
spdk_json_write_named_uint64(w, "nvme_ioq_poll_period_us", g_opts.nvme_ioq_poll_period_us);
|
2019-07-10 05:13:31 +00:00
|
|
|
spdk_json_write_named_uint32(w, "io_queue_requests", g_opts.io_queue_requests);
|
2019-11-18 17:11:39 +00:00
|
|
|
spdk_json_write_named_bool(w, "delay_cmd_submit", g_opts.delay_cmd_submit);
|
2018-07-09 21:04:33 +00:00
|
|
|
spdk_json_write_object_end(w);
|
|
|
|
|
|
|
|
spdk_json_write_object_end(w);
|
2020-12-07 18:46:51 +00:00
|
|
|
}
|
2018-04-05 15:43:55 +00:00
|
|
|
|
2020-12-07 18:46:51 +00:00
|
|
|
static void
|
2021-07-06 19:42:41 +00:00
|
|
|
nvme_ctrlr_config_json(struct spdk_json_write_ctx *w,
|
|
|
|
struct nvme_ctrlr *nvme_ctrlr)
|
2020-12-07 18:46:51 +00:00
|
|
|
{
|
|
|
|
struct spdk_nvme_transport_id *trid;
|
2018-04-05 15:43:55 +00:00
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
trid = nvme_ctrlr->connected_trid;
|
2018-04-05 15:43:55 +00:00
|
|
|
|
2020-12-07 18:46:51 +00:00
|
|
|
spdk_json_write_object_begin(w);
|
2018-04-05 15:43:55 +00:00
|
|
|
|
2020-12-07 18:46:51 +00:00
|
|
|
spdk_json_write_named_string(w, "method", "bdev_nvme_attach_controller");
|
2018-04-05 15:43:55 +00:00
|
|
|
|
2020-12-07 18:46:51 +00:00
|
|
|
spdk_json_write_named_object_begin(w, "params");
|
2021-07-06 19:42:41 +00:00
|
|
|
spdk_json_write_named_string(w, "name", nvme_ctrlr->name);
|
2020-12-07 18:46:51 +00:00
|
|
|
nvme_bdev_dump_trid_json(trid, w);
|
|
|
|
spdk_json_write_named_bool(w, "prchk_reftag",
|
2021-07-06 19:42:41 +00:00
|
|
|
(nvme_ctrlr->prchk_flags & SPDK_NVME_IO_FLAGS_PRCHK_REFTAG) != 0);
|
2020-12-07 18:46:51 +00:00
|
|
|
spdk_json_write_named_bool(w, "prchk_guard",
|
2021-07-06 19:42:41 +00:00
|
|
|
(nvme_ctrlr->prchk_flags & SPDK_NVME_IO_FLAGS_PRCHK_GUARD) != 0);
|
2018-04-05 15:43:55 +00:00
|
|
|
|
2020-12-07 18:46:51 +00:00
|
|
|
spdk_json_write_object_end(w);
|
|
|
|
|
|
|
|
spdk_json_write_object_end(w);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
bdev_nvme_hotplug_config_json(struct spdk_json_write_ctx *w)
|
|
|
|
{
|
|
|
|
spdk_json_write_object_begin(w);
|
|
|
|
spdk_json_write_named_string(w, "method", "bdev_nvme_set_hotplug");
|
|
|
|
|
|
|
|
spdk_json_write_named_object_begin(w, "params");
|
|
|
|
spdk_json_write_named_uint64(w, "period_us", g_nvme_hotplug_poll_period_us);
|
|
|
|
spdk_json_write_named_bool(w, "enable", g_nvme_hotplug_enabled);
|
|
|
|
spdk_json_write_object_end(w);
|
|
|
|
|
|
|
|
spdk_json_write_object_end(w);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
bdev_nvme_config_json(struct spdk_json_write_ctx *w)
|
|
|
|
{
|
2021-07-06 19:42:41 +00:00
|
|
|
struct nvme_ctrlr *nvme_ctrlr;
|
2020-12-07 18:46:51 +00:00
|
|
|
|
|
|
|
bdev_nvme_opts_config_json(w);
|
|
|
|
|
|
|
|
pthread_mutex_lock(&g_bdev_nvme_mutex);
|
|
|
|
|
2021-07-06 19:42:41 +00:00
|
|
|
TAILQ_FOREACH(nvme_ctrlr, &g_nvme_ctrlrs, tailq) {
|
|
|
|
nvme_ctrlr_config_json(w, nvme_ctrlr);
|
2018-04-05 15:43:55 +00:00
|
|
|
}
|
|
|
|
|
2018-07-12 12:26:19 +00:00
|
|
|
/* Dump as last parameter to give all NVMe bdevs chance to be constructed
|
|
|
|
* before enabling hotplug poller.
|
|
|
|
*/
|
2020-12-07 18:46:51 +00:00
|
|
|
bdev_nvme_hotplug_config_json(w);
|
2018-07-12 12:26:19 +00:00
|
|
|
|
2018-04-05 15:43:55 +00:00
|
|
|
pthread_mutex_unlock(&g_bdev_nvme_mutex);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-07-11 23:32:32 +00:00
|
|
|
struct spdk_nvme_ctrlr *
|
2020-05-10 07:46:07 +00:00
|
|
|
bdev_nvme_get_ctrlr(struct spdk_bdev *bdev)
|
2017-07-11 23:32:32 +00:00
|
|
|
{
|
2018-03-06 18:52:46 +00:00
|
|
|
if (!bdev || bdev->module != &nvme_if) {
|
2017-07-11 23:32:32 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-09-16 01:08:54 +00:00
|
|
|
return SPDK_CONTAINEROF(bdev, struct nvme_bdev, disk)->nvme_ns->ctrlr->ctrlr;
|
2017-07-11 23:32:32 +00:00
|
|
|
}
|
|
|
|
|
2020-09-04 11:27:29 +00:00
|
|
|
SPDK_LOG_REGISTER_COMPONENT(bdev_nvme)
|