2022-06-03 19:15:11 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
2022-11-01 20:26:26 +00:00
|
|
|
* Copyright (C) 2015 Intel Corporation.
|
2015-09-21 15:52:41 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
2021-01-11 15:58:49 +00:00
|
|
|
* Copyright (c) 2019-2021 Mellanox Technologies LTD. All rights reserved.
|
2021-12-01 08:27:59 +00:00
|
|
|
* Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
2015-09-21 15:52:41 +00:00
|
|
|
*/
|
|
|
|
|
2017-05-02 18:18:25 +00:00
|
|
|
#include "spdk/stdinc.h"
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2023-01-28 00:48:09 +00:00
|
|
|
#include "spdk/config.h"
|
2017-09-12 22:35:32 +00:00
|
|
|
#include "spdk/env.h"
|
2016-09-14 15:47:21 +00:00
|
|
|
#include "spdk/fd.h"
|
2015-09-21 15:52:41 +00:00
|
|
|
#include "spdk/nvme.h"
|
2019-04-11 14:24:55 +00:00
|
|
|
#include "spdk/vmd.h"
|
2017-01-19 21:45:55 +00:00
|
|
|
#include "spdk/queue.h"
|
2015-10-02 18:13:11 +00:00
|
|
|
#include "spdk/string.h"
|
2016-01-18 02:04:48 +00:00
|
|
|
#include "spdk/nvme_intel.h"
|
2017-06-13 15:48:55 +00:00
|
|
|
#include "spdk/histogram_data.h"
|
2017-06-22 01:57:56 +00:00
|
|
|
#include "spdk/endian.h"
|
2019-01-17 22:38:28 +00:00
|
|
|
#include "spdk/dif.h"
|
2019-01-25 11:21:53 +00:00
|
|
|
#include "spdk/util.h"
|
2019-03-27 09:00:16 +00:00
|
|
|
#include "spdk/log.h"
|
2019-05-29 09:38:50 +00:00
|
|
|
#include "spdk/likely.h"
|
2020-09-14 10:47:39 +00:00
|
|
|
#include "spdk/sock.h"
|
2021-05-07 00:01:23 +00:00
|
|
|
#include "spdk/zipf.h"
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2020-07-03 16:14:58 +00:00
|
|
|
#ifdef SPDK_CONFIG_URING
|
|
|
|
#include <liburing.h>
|
|
|
|
#endif
|
|
|
|
|
2019-08-29 15:16:41 +00:00
|
|
|
#if HAVE_LIBAIO
|
|
|
|
#include <libaio.h>
|
|
|
|
#endif
|
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
struct ctrlr_entry {
|
2016-02-10 18:26:12 +00:00
|
|
|
struct spdk_nvme_ctrlr *ctrlr;
|
2019-01-24 06:56:49 +00:00
|
|
|
enum spdk_nvme_transport_type trtype;
|
2016-02-08 23:06:31 +00:00
|
|
|
struct spdk_nvme_intel_rw_latency_page *latency_page;
|
2017-12-26 18:40:00 +00:00
|
|
|
|
|
|
|
struct spdk_nvme_qpair **unused_qpairs;
|
|
|
|
|
2020-09-28 00:40:05 +00:00
|
|
|
TAILQ_ENTRY(ctrlr_entry) link;
|
2016-01-18 02:04:48 +00:00
|
|
|
char name[1024];
|
2015-09-21 15:52:41 +00:00
|
|
|
};
|
|
|
|
|
2019-08-29 15:16:41 +00:00
|
|
|
enum entry_type {
|
|
|
|
ENTRY_TYPE_NVME_NS,
|
|
|
|
ENTRY_TYPE_AIO_FILE,
|
2020-07-03 16:14:58 +00:00
|
|
|
ENTRY_TYPE_URING_FILE,
|
2019-08-29 15:16:41 +00:00
|
|
|
};
|
|
|
|
|
2019-01-15 23:23:35 +00:00
|
|
|
struct ns_fn_table;
|
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
struct ns_entry {
|
2019-08-29 15:16:41 +00:00
|
|
|
enum entry_type type;
|
2019-01-15 23:23:35 +00:00
|
|
|
const struct ns_fn_table *fn_table;
|
2015-10-19 22:57:04 +00:00
|
|
|
|
2019-08-29 15:16:41 +00:00
|
|
|
union {
|
|
|
|
struct {
|
|
|
|
struct spdk_nvme_ctrlr *ctrlr;
|
|
|
|
struct spdk_nvme_ns *ns;
|
|
|
|
} nvme;
|
2020-07-03 16:14:58 +00:00
|
|
|
#ifdef SPDK_CONFIG_URING
|
2019-08-29 15:16:41 +00:00
|
|
|
struct {
|
|
|
|
int fd;
|
2020-07-03 16:14:58 +00:00
|
|
|
} uring;
|
|
|
|
#endif
|
|
|
|
#if HAVE_LIBAIO
|
|
|
|
struct {
|
|
|
|
int fd;
|
2019-08-29 15:16:41 +00:00
|
|
|
} aio;
|
|
|
|
#endif
|
|
|
|
} u;
|
2015-10-19 22:57:04 +00:00
|
|
|
|
2020-09-28 00:40:05 +00:00
|
|
|
TAILQ_ENTRY(ns_entry) link;
|
2015-09-21 15:52:41 +00:00
|
|
|
uint32_t io_size_blocks;
|
2018-04-19 05:19:24 +00:00
|
|
|
uint32_t num_io_requests;
|
2015-09-21 15:52:41 +00:00
|
|
|
uint64_t size_in_ios;
|
2019-01-16 07:29:56 +00:00
|
|
|
uint32_t block_size;
|
|
|
|
uint32_t md_size;
|
|
|
|
bool md_interleave;
|
2021-05-07 00:02:40 +00:00
|
|
|
unsigned int seed;
|
2021-05-07 00:01:23 +00:00
|
|
|
struct spdk_zipf *zipf;
|
2019-01-16 07:29:56 +00:00
|
|
|
bool pi_loc;
|
|
|
|
enum spdk_nvme_pi_type pi_type;
|
2017-06-22 01:57:56 +00:00
|
|
|
uint32_t io_flags;
|
2015-10-29 22:18:48 +00:00
|
|
|
char name[1024];
|
|
|
|
};
|
|
|
|
|
2017-05-23 21:47:14 +00:00
|
|
|
static const double g_latency_cutoffs[] = {
|
|
|
|
0.01,
|
|
|
|
0.10,
|
|
|
|
0.25,
|
|
|
|
0.50,
|
|
|
|
0.75,
|
|
|
|
0.90,
|
|
|
|
0.95,
|
2017-06-24 00:40:17 +00:00
|
|
|
0.98,
|
2017-05-23 21:47:14 +00:00
|
|
|
0.99,
|
2017-06-24 00:40:17 +00:00
|
|
|
0.995,
|
2017-05-23 21:47:14 +00:00
|
|
|
0.999,
|
|
|
|
0.9999,
|
|
|
|
0.99999,
|
|
|
|
0.999999,
|
2017-06-24 00:40:17 +00:00
|
|
|
0.9999999,
|
2017-05-23 21:47:14 +00:00
|
|
|
-1,
|
|
|
|
};
|
|
|
|
|
2020-08-14 14:13:14 +00:00
|
|
|
struct ns_worker_stats {
|
2022-09-05 18:22:13 +00:00
|
|
|
uint64_t io_submitted;
|
2015-10-29 22:18:48 +00:00
|
|
|
uint64_t io_completed;
|
2019-11-05 12:55:11 +00:00
|
|
|
uint64_t last_io_completed;
|
2016-05-11 11:42:41 +00:00
|
|
|
uint64_t total_tsc;
|
2016-05-26 17:24:25 +00:00
|
|
|
uint64_t min_tsc;
|
|
|
|
uint64_t max_tsc;
|
2021-02-17 14:50:17 +00:00
|
|
|
uint64_t last_tsc;
|
|
|
|
uint64_t busy_tsc;
|
|
|
|
uint64_t idle_tsc;
|
|
|
|
uint64_t last_busy_tsc;
|
|
|
|
uint64_t last_idle_tsc;
|
2020-08-14 14:13:14 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct ns_worker_ctx {
|
|
|
|
struct ns_entry *entry;
|
|
|
|
struct ns_worker_stats stats;
|
2015-10-29 22:18:48 +00:00
|
|
|
uint64_t current_queue_depth;
|
2015-09-21 15:52:41 +00:00
|
|
|
uint64_t offset_in_ios;
|
|
|
|
bool is_draining;
|
2015-10-29 22:18:48 +00:00
|
|
|
|
2019-08-29 15:16:41 +00:00
|
|
|
union {
|
|
|
|
struct {
|
2020-04-24 02:10:57 +00:00
|
|
|
int num_active_qpairs;
|
|
|
|
int num_all_qpairs;
|
|
|
|
struct spdk_nvme_qpair **qpair;
|
|
|
|
struct spdk_nvme_poll_group *group;
|
|
|
|
int last_qpair;
|
2019-08-29 15:16:41 +00:00
|
|
|
} nvme;
|
|
|
|
|
2020-07-03 16:14:58 +00:00
|
|
|
#ifdef SPDK_CONFIG_URING
|
|
|
|
struct {
|
|
|
|
struct io_uring ring;
|
|
|
|
uint64_t io_inflight;
|
|
|
|
uint64_t io_pending;
|
|
|
|
struct io_uring_cqe **cqes;
|
|
|
|
|
|
|
|
} uring;
|
|
|
|
#endif
|
2019-08-29 15:16:41 +00:00
|
|
|
#if HAVE_LIBAIO
|
|
|
|
struct {
|
|
|
|
struct io_event *events;
|
|
|
|
io_context_t ctx;
|
|
|
|
} aio;
|
|
|
|
#endif
|
|
|
|
} u;
|
2016-02-29 21:19:02 +00:00
|
|
|
|
2020-09-28 00:40:05 +00:00
|
|
|
TAILQ_ENTRY(ns_worker_ctx) link;
|
2017-05-15 22:57:26 +00:00
|
|
|
|
2017-12-21 18:56:38 +00:00
|
|
|
struct spdk_histogram_data *histogram;
|
2015-09-21 15:52:41 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct perf_task {
|
2015-10-29 22:18:48 +00:00
|
|
|
struct ns_worker_ctx *ns_ctx;
|
2020-07-29 08:57:10 +00:00
|
|
|
struct iovec *iovs; /* array of iovecs to transfer. */
|
|
|
|
int iovcnt; /* Number of iovecs in iovs array. */
|
|
|
|
int iovpos; /* Current iovec position. */
|
|
|
|
uint32_t iov_offset; /* Offset in current iovec. */
|
2019-01-17 08:03:02 +00:00
|
|
|
struct iovec md_iov;
|
2016-05-11 11:42:41 +00:00
|
|
|
uint64_t submit_tsc;
|
2018-01-10 12:25:19 +00:00
|
|
|
bool is_read;
|
2019-01-17 22:38:28 +00:00
|
|
|
struct spdk_dif_ctx dif_ctx;
|
2019-08-29 15:16:41 +00:00
|
|
|
#if HAVE_LIBAIO
|
|
|
|
struct iocb iocb;
|
|
|
|
#endif
|
2015-09-21 15:52:41 +00:00
|
|
|
};
|
|
|
|
|
2015-09-25 21:09:41 +00:00
|
|
|
struct worker_thread {
|
2020-09-28 00:40:05 +00:00
|
|
|
TAILQ_HEAD(, ns_worker_ctx) ns_ctx;
|
|
|
|
TAILQ_ENTRY(worker_thread) link;
|
|
|
|
unsigned lcore;
|
2015-09-25 21:09:41 +00:00
|
|
|
};
|
|
|
|
|
2019-01-15 23:23:35 +00:00
|
|
|
struct ns_fn_table {
|
|
|
|
void (*setup_payload)(struct perf_task *task, uint8_t pattern);
|
|
|
|
|
|
|
|
int (*submit_io)(struct perf_task *task, struct ns_worker_ctx *ns_ctx,
|
|
|
|
struct ns_entry *entry, uint64_t offset_in_ios);
|
|
|
|
|
2021-02-17 14:50:17 +00:00
|
|
|
int64_t (*check_io)(struct ns_worker_ctx *ns_ctx);
|
2019-01-15 23:23:35 +00:00
|
|
|
|
|
|
|
void (*verify_io)(struct perf_task *task, struct ns_entry *entry);
|
|
|
|
|
|
|
|
int (*init_ns_worker_ctx)(struct ns_worker_ctx *ns_ctx);
|
|
|
|
|
|
|
|
void (*cleanup_ns_worker_ctx)(struct ns_worker_ctx *ns_ctx);
|
2020-12-04 14:22:54 +00:00
|
|
|
void (*dump_transport_stats)(uint32_t lcore, struct ns_worker_ctx *ns_ctx);
|
2019-01-15 23:23:35 +00:00
|
|
|
};
|
|
|
|
|
2020-07-29 08:57:10 +00:00
|
|
|
static uint32_t g_io_unit_size = (UINT32_MAX & (~0x03));
|
|
|
|
|
2016-01-18 02:04:48 +00:00
|
|
|
static int g_outstanding_commands;
|
|
|
|
|
2020-02-11 21:59:30 +00:00
|
|
|
static bool g_latency_ssd_tracking_enable;
|
|
|
|
static int g_latency_sw_tracking_level;
|
|
|
|
|
|
|
|
static bool g_vmd;
|
|
|
|
static const char *g_workload_type;
|
2020-09-28 00:40:05 +00:00
|
|
|
static TAILQ_HEAD(, ctrlr_entry) g_controllers = TAILQ_HEAD_INITIALIZER(g_controllers);
|
|
|
|
static TAILQ_HEAD(, ns_entry) g_namespaces = TAILQ_HEAD_INITIALIZER(g_namespaces);
|
2020-02-11 21:59:30 +00:00
|
|
|
static int g_num_namespaces;
|
2020-09-28 00:40:05 +00:00
|
|
|
static TAILQ_HEAD(, worker_thread) g_workers = TAILQ_HEAD_INITIALIZER(g_workers);
|
|
|
|
static int g_num_workers = 0;
|
2022-09-07 07:53:14 +00:00
|
|
|
static bool g_use_every_core = false;
|
2020-11-30 20:02:59 +00:00
|
|
|
static uint32_t g_main_core;
|
2020-11-17 12:20:09 +00:00
|
|
|
static pthread_barrier_t g_worker_sync_barrier;
|
2015-09-21 15:52:41 +00:00
|
|
|
|
|
|
|
static uint64_t g_tsc_rate;
|
|
|
|
|
2021-02-17 14:50:17 +00:00
|
|
|
static bool g_monitor_perf_cores = false;
|
|
|
|
|
2017-04-06 16:59:32 +00:00
|
|
|
static uint32_t g_io_align = 0x200;
|
2020-09-18 16:42:13 +00:00
|
|
|
static bool g_io_align_specified;
|
2015-12-28 21:07:58 +00:00
|
|
|
static uint32_t g_io_size_bytes;
|
2017-06-22 01:57:56 +00:00
|
|
|
static uint32_t g_max_io_md_size;
|
|
|
|
static uint32_t g_max_io_size_blocks;
|
|
|
|
static uint32_t g_metacfg_pract_flag;
|
|
|
|
static uint32_t g_metacfg_prchk_flags;
|
2020-02-11 21:59:30 +00:00
|
|
|
static int g_rw_percentage = -1;
|
|
|
|
static int g_is_random;
|
2022-09-14 17:47:41 +00:00
|
|
|
static uint32_t g_queue_depth;
|
2017-12-14 22:13:43 +00:00
|
|
|
static int g_nr_io_queues_per_ns = 1;
|
2020-02-11 21:59:30 +00:00
|
|
|
static int g_nr_unused_io_queues;
|
2015-09-21 15:52:41 +00:00
|
|
|
static int g_time_in_sec;
|
2022-09-05 18:22:13 +00:00
|
|
|
static uint64_t g_number_ios;
|
2021-02-19 00:31:50 +00:00
|
|
|
static uint64_t g_elapsed_time_in_usec;
|
2020-08-14 14:13:14 +00:00
|
|
|
static int g_warmup_time_in_sec;
|
2015-11-05 23:45:33 +00:00
|
|
|
static uint32_t g_max_completions;
|
2017-06-03 01:49:47 +00:00
|
|
|
static uint32_t g_disable_sq_cmb;
|
2020-07-03 16:14:58 +00:00
|
|
|
static bool g_use_uring;
|
2017-11-08 05:25:02 +00:00
|
|
|
static bool g_warn;
|
2018-11-30 01:46:55 +00:00
|
|
|
static bool g_header_digest;
|
|
|
|
static bool g_data_digest;
|
2020-02-11 21:59:30 +00:00
|
|
|
static bool g_no_shn_notification;
|
|
|
|
static bool g_mix_specified;
|
2020-08-13 16:10:07 +00:00
|
|
|
/* The flag is used to exit the program while keep alive fails on the transport */
|
|
|
|
static bool g_exit;
|
2019-11-01 21:54:29 +00:00
|
|
|
/* Default to 10 seconds for the keep alive value. This value is arbitrary. */
|
|
|
|
static uint32_t g_keep_alive_timeout_in_ms = 10000;
|
2021-01-25 16:10:05 +00:00
|
|
|
static uint32_t g_quiet_count = 1;
|
2021-05-07 00:01:23 +00:00
|
|
|
static double g_zipf_theta;
|
2021-08-05 12:56:07 +00:00
|
|
|
/* Set default io_queue_size to UINT16_MAX, NVMe driver will then reduce this
|
|
|
|
* to MQES to maximize the io_queue_size as much as possible.
|
|
|
|
*/
|
|
|
|
static uint32_t g_io_queue_size = UINT16_MAX;
|
2021-01-25 16:10:05 +00:00
|
|
|
|
2022-05-25 01:44:37 +00:00
|
|
|
static uint32_t g_sock_zcopy_threshold;
|
|
|
|
static char *g_sock_threshold_impl;
|
|
|
|
|
2022-12-13 20:56:40 +00:00
|
|
|
static uint8_t g_transport_tos = 0;
|
|
|
|
|
2023-01-06 11:09:55 +00:00
|
|
|
static uint32_t g_rdma_srq_size;
|
|
|
|
|
2021-01-25 16:10:05 +00:00
|
|
|
/* When user specifies -Q, some error messages are rate limited. When rate
|
|
|
|
* limited, we only print the error message every g_quiet_count times the
|
|
|
|
* error occurs.
|
|
|
|
*
|
|
|
|
* Note: the __count is not thread safe, meaning the rate limiting will not
|
|
|
|
* be exact when running perf with multiple thread with lots of errors.
|
|
|
|
* Thread-local __count would mean rate-limiting per thread which doesn't
|
|
|
|
* seem as useful.
|
|
|
|
*/
|
|
|
|
#define RATELIMIT_LOG(...) \
|
|
|
|
{ \
|
|
|
|
static uint64_t __count = 0; \
|
|
|
|
if ((__count % g_quiet_count) == 0) { \
|
|
|
|
if (__count > 0 && g_quiet_count > 1) { \
|
|
|
|
fprintf(stderr, "Message suppressed %" PRIu32 " times: ", \
|
|
|
|
g_quiet_count - 1); \
|
|
|
|
} \
|
|
|
|
fprintf(stderr, __VA_ARGS__); \
|
|
|
|
} \
|
|
|
|
__count++; \
|
|
|
|
}
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2020-12-04 14:22:54 +00:00
|
|
|
static bool g_dump_transport_stats;
|
|
|
|
static pthread_mutex_t g_stats_mutex;
|
|
|
|
|
2020-07-08 12:20:59 +00:00
|
|
|
#define MAX_ALLOWED_PCI_DEVICE_NUM 128
|
|
|
|
static struct spdk_pci_addr g_allowed_pci_addr[MAX_ALLOWED_PCI_DEVICE_NUM];
|
|
|
|
|
2017-01-19 21:45:55 +00:00
|
|
|
struct trid_entry {
|
|
|
|
struct spdk_nvme_transport_id trid;
|
2018-08-03 18:51:28 +00:00
|
|
|
uint16_t nsid;
|
2020-12-07 17:06:26 +00:00
|
|
|
char hostnqn[SPDK_NVMF_NQN_MAX_LEN + 1];
|
2017-01-19 21:45:55 +00:00
|
|
|
TAILQ_ENTRY(trid_entry) tailq;
|
|
|
|
};
|
|
|
|
|
|
|
|
static TAILQ_HEAD(, trid_entry) g_trid_list = TAILQ_HEAD_INITIALIZER(g_trid_list);
|
2015-09-25 21:09:41 +00:00
|
|
|
|
2020-07-03 16:14:58 +00:00
|
|
|
static int g_file_optind; /* Index of first filename in argv */
|
2015-10-19 22:57:04 +00:00
|
|
|
|
2022-06-22 21:35:04 +00:00
|
|
|
static inline void task_complete(struct perf_task *task);
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2020-09-14 10:47:39 +00:00
|
|
|
static void
|
2022-07-22 14:27:45 +00:00
|
|
|
perf_set_sock_opts(const char *impl_name, const char *field, uint32_t val, const char *valstr)
|
2020-09-14 10:47:39 +00:00
|
|
|
{
|
|
|
|
struct spdk_sock_impl_opts sock_opts = {};
|
|
|
|
size_t opts_size = sizeof(sock_opts);
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = spdk_sock_impl_get_opts(impl_name, &sock_opts, &opts_size);
|
|
|
|
if (rc != 0) {
|
|
|
|
if (errno == EINVAL) {
|
|
|
|
fprintf(stderr, "Unknown sock impl %s\n", impl_name);
|
|
|
|
} else {
|
|
|
|
fprintf(stderr, "Failed to get opts for sock impl %s: error %d (%s)\n", impl_name, errno,
|
|
|
|
strerror(errno));
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (opts_size != sizeof(sock_opts)) {
|
|
|
|
fprintf(stderr, "Warning: sock_opts size mismatch. Expected %zu, received %zu\n",
|
|
|
|
sizeof(sock_opts), opts_size);
|
|
|
|
opts_size = sizeof(sock_opts);
|
|
|
|
}
|
|
|
|
|
2022-05-02 16:08:50 +00:00
|
|
|
if (!field) {
|
|
|
|
fprintf(stderr, "Warning: no socket opts field specified\n");
|
|
|
|
return;
|
|
|
|
} else if (strcmp(field, "enable_zerocopy_send_client") == 0) {
|
|
|
|
sock_opts.enable_zerocopy_send_client = val;
|
|
|
|
} else if (strcmp(field, "tls_version") == 0) {
|
|
|
|
sock_opts.tls_version = val;
|
|
|
|
} else if (strcmp(field, "ktls") == 0) {
|
|
|
|
sock_opts.enable_ktls = val;
|
2022-07-22 14:27:45 +00:00
|
|
|
} else if (strcmp(field, "psk_key") == 0) {
|
|
|
|
if (!valstr) {
|
|
|
|
fprintf(stderr, "No socket opts value specified\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
sock_opts.psk_key = strdup(valstr);
|
|
|
|
if (sock_opts.psk_key == NULL) {
|
|
|
|
fprintf(stderr, "Failed to allocate psk_key in sock_impl\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
} else if (strcmp(field, "psk_identity") == 0) {
|
|
|
|
if (!valstr) {
|
|
|
|
fprintf(stderr, "No socket opts value specified\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
sock_opts.psk_identity = strdup(valstr);
|
|
|
|
if (sock_opts.psk_identity == NULL) {
|
|
|
|
fprintf(stderr, "Failed to allocate psk_identity in sock_impl\n");
|
|
|
|
return;
|
|
|
|
}
|
2022-05-25 01:44:37 +00:00
|
|
|
} else if (strcmp(field, "zerocopy_threshold") == 0) {
|
|
|
|
sock_opts.zerocopy_threshold = val;
|
2022-05-02 16:08:50 +00:00
|
|
|
} else {
|
|
|
|
fprintf(stderr, "Warning: invalid or unprocessed socket opts field: %s\n", field);
|
|
|
|
return;
|
|
|
|
}
|
2020-09-14 10:47:39 +00:00
|
|
|
|
|
|
|
if (spdk_sock_impl_set_opts(impl_name, &sock_opts, opts_size)) {
|
2022-05-02 16:08:50 +00:00
|
|
|
fprintf(stderr, "Failed to set %s: %d for sock impl %s : error %d (%s)\n", field, val, impl_name,
|
|
|
|
errno, strerror(errno));
|
2020-09-14 10:47:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-29 08:57:10 +00:00
|
|
|
static void
|
|
|
|
nvme_perf_reset_sgl(void *ref, uint32_t sgl_offset)
|
|
|
|
{
|
|
|
|
struct iovec *iov;
|
|
|
|
struct perf_task *task = (struct perf_task *)ref;
|
|
|
|
|
|
|
|
task->iov_offset = sgl_offset;
|
|
|
|
for (task->iovpos = 0; task->iovpos < task->iovcnt; task->iovpos++) {
|
|
|
|
iov = &task->iovs[task->iovpos];
|
|
|
|
if (task->iov_offset < iov->iov_len) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
task->iov_offset -= iov->iov_len;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nvme_perf_next_sge(void *ref, void **address, uint32_t *length)
|
|
|
|
{
|
|
|
|
struct iovec *iov;
|
|
|
|
struct perf_task *task = (struct perf_task *)ref;
|
|
|
|
|
|
|
|
assert(task->iovpos < task->iovcnt);
|
|
|
|
|
|
|
|
iov = &task->iovs[task->iovpos];
|
|
|
|
assert(task->iov_offset <= iov->iov_len);
|
|
|
|
|
|
|
|
*address = iov->iov_base + task->iov_offset;
|
|
|
|
*length = iov->iov_len - task->iov_offset;
|
|
|
|
task->iovpos++;
|
|
|
|
task->iov_offset = 0;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nvme_perf_allocate_iovs(struct perf_task *task, void *buf, uint32_t length)
|
|
|
|
{
|
|
|
|
int iovpos = 0;
|
|
|
|
struct iovec *iov;
|
|
|
|
uint32_t offset = 0;
|
|
|
|
|
|
|
|
task->iovcnt = SPDK_CEIL_DIV(length, (uint64_t)g_io_unit_size);
|
|
|
|
task->iovs = calloc(task->iovcnt, sizeof(struct iovec));
|
|
|
|
if (!task->iovs) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (length > 0) {
|
|
|
|
iov = &task->iovs[iovpos];
|
|
|
|
iov->iov_len = spdk_min(length, g_io_unit_size);
|
|
|
|
iov->iov_base = buf + offset;
|
|
|
|
length -= iov->iov_len;
|
|
|
|
offset += iov->iov_len;
|
|
|
|
iovpos++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-07-03 16:14:58 +00:00
|
|
|
#ifdef SPDK_CONFIG_URING
|
|
|
|
|
|
|
|
static void
|
|
|
|
uring_setup_payload(struct perf_task *task, uint8_t pattern)
|
|
|
|
{
|
2020-07-29 08:57:10 +00:00
|
|
|
struct iovec *iov;
|
|
|
|
|
|
|
|
task->iovs = calloc(1, sizeof(struct iovec));
|
|
|
|
if (!task->iovs) {
|
|
|
|
fprintf(stderr, "perf task failed to allocate iovs\n");
|
2020-07-03 16:14:58 +00:00
|
|
|
exit(1);
|
|
|
|
}
|
2020-07-29 08:57:10 +00:00
|
|
|
task->iovcnt = 1;
|
|
|
|
|
|
|
|
iov = &task->iovs[0];
|
|
|
|
iov->iov_base = spdk_dma_zmalloc(g_io_size_bytes, g_io_align, NULL);
|
|
|
|
iov->iov_len = g_io_size_bytes;
|
|
|
|
if (iov->iov_base == NULL) {
|
|
|
|
fprintf(stderr, "spdk_dma_zmalloc() for task->iovs[0].iov_base failed\n");
|
|
|
|
free(task->iovs);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
memset(iov->iov_base, pattern, iov->iov_len);
|
2020-07-03 16:14:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
uring_submit_io(struct perf_task *task, struct ns_worker_ctx *ns_ctx,
|
|
|
|
struct ns_entry *entry, uint64_t offset_in_ios)
|
|
|
|
{
|
|
|
|
struct io_uring_sqe *sqe;
|
|
|
|
|
|
|
|
sqe = io_uring_get_sqe(&ns_ctx->u.uring.ring);
|
|
|
|
if (!sqe) {
|
|
|
|
fprintf(stderr, "Cannot get sqe\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (task->is_read) {
|
2020-07-29 08:57:10 +00:00
|
|
|
io_uring_prep_readv(sqe, entry->u.uring.fd, task->iovs, 1, offset_in_ios * task->iovs[0].iov_len);
|
2020-07-03 16:14:58 +00:00
|
|
|
} else {
|
2020-07-29 08:57:10 +00:00
|
|
|
io_uring_prep_writev(sqe, entry->u.uring.fd, task->iovs, 1, offset_in_ios * task->iovs[0].iov_len);
|
2020-07-03 16:14:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
io_uring_sqe_set_data(sqe, task);
|
|
|
|
ns_ctx->u.uring.io_pending++;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-02-17 14:50:17 +00:00
|
|
|
static int64_t
|
2020-07-03 16:14:58 +00:00
|
|
|
uring_check_io(struct ns_worker_ctx *ns_ctx)
|
|
|
|
{
|
2021-02-17 14:50:17 +00:00
|
|
|
int i, to_complete, to_submit, count = 0, ret = 0;
|
2020-07-03 16:14:58 +00:00
|
|
|
struct perf_task *task;
|
|
|
|
|
|
|
|
to_submit = ns_ctx->u.uring.io_pending;
|
|
|
|
|
|
|
|
if (to_submit > 0) {
|
|
|
|
/* If there are I/O to submit, use io_uring_submit here.
|
|
|
|
* It will automatically call spdk_io_uring_enter appropriately. */
|
|
|
|
ret = io_uring_submit(&ns_ctx->u.uring.ring);
|
2020-07-28 00:23:47 +00:00
|
|
|
if (ret < 0) {
|
2021-02-17 14:50:17 +00:00
|
|
|
return -1;
|
2020-07-28 00:23:47 +00:00
|
|
|
}
|
2020-07-03 16:14:58 +00:00
|
|
|
ns_ctx->u.uring.io_pending = 0;
|
|
|
|
ns_ctx->u.uring.io_inflight += to_submit;
|
|
|
|
}
|
|
|
|
|
2020-07-28 00:23:47 +00:00
|
|
|
to_complete = ns_ctx->u.uring.io_inflight;
|
2020-07-03 16:14:58 +00:00
|
|
|
if (to_complete > 0) {
|
|
|
|
count = io_uring_peek_batch_cqe(&ns_ctx->u.uring.ring, ns_ctx->u.uring.cqes, to_complete);
|
|
|
|
ns_ctx->u.uring.io_inflight -= count;
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
assert(ns_ctx->u.uring.cqes[i] != NULL);
|
|
|
|
task = (struct perf_task *)ns_ctx->u.uring.cqes[i]->user_data;
|
2020-07-29 08:57:10 +00:00
|
|
|
if (ns_ctx->u.uring.cqes[i]->res != (int)task->iovs[0].iov_len) {
|
2023-02-02 07:10:35 +00:00
|
|
|
fprintf(stderr, "cqe->status=%d, iov_len=%d\n", ns_ctx->u.uring.cqes[i]->res,
|
|
|
|
(int)task->iovs[0].iov_len);
|
|
|
|
exit(1);
|
2020-07-03 16:14:58 +00:00
|
|
|
}
|
|
|
|
io_uring_cqe_seen(&ns_ctx->u.uring.ring, ns_ctx->u.uring.cqes[i]);
|
|
|
|
task_complete(task);
|
|
|
|
}
|
|
|
|
}
|
2021-02-17 14:50:17 +00:00
|
|
|
return count;
|
2020-07-03 16:14:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
uring_verify_io(struct perf_task *task, struct ns_entry *entry)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
uring_init_ns_worker_ctx(struct ns_worker_ctx *ns_ctx)
|
|
|
|
{
|
2020-07-28 00:23:47 +00:00
|
|
|
if (io_uring_queue_init(g_queue_depth, &ns_ctx->u.uring.ring, 0) < 0) {
|
2020-07-03 16:14:58 +00:00
|
|
|
SPDK_ERRLOG("uring I/O context setup failure\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
ns_ctx->u.uring.cqes = calloc(g_queue_depth, sizeof(struct io_uring_cqe *));
|
|
|
|
if (!ns_ctx->u.uring.cqes) {
|
|
|
|
io_uring_queue_exit(&ns_ctx->u.uring.ring);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
uring_cleanup_ns_worker_ctx(struct ns_worker_ctx *ns_ctx)
|
|
|
|
{
|
|
|
|
io_uring_queue_exit(&ns_ctx->u.uring.ring);
|
|
|
|
free(ns_ctx->u.uring.cqes);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct ns_fn_table uring_fn_table = {
|
|
|
|
.setup_payload = uring_setup_payload,
|
|
|
|
.submit_io = uring_submit_io,
|
|
|
|
.check_io = uring_check_io,
|
|
|
|
.verify_io = uring_verify_io,
|
|
|
|
.init_ns_worker_ctx = uring_init_ns_worker_ctx,
|
|
|
|
.cleanup_ns_worker_ctx = uring_cleanup_ns_worker_ctx,
|
|
|
|
};
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef HAVE_LIBAIO
|
2019-08-29 15:16:41 +00:00
|
|
|
static void
|
|
|
|
aio_setup_payload(struct perf_task *task, uint8_t pattern)
|
|
|
|
{
|
2020-07-29 08:57:10 +00:00
|
|
|
struct iovec *iov;
|
|
|
|
|
|
|
|
task->iovs = calloc(1, sizeof(struct iovec));
|
|
|
|
if (!task->iovs) {
|
|
|
|
fprintf(stderr, "perf task failed to allocate iovs\n");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
task->iovcnt = 1;
|
|
|
|
|
|
|
|
iov = &task->iovs[0];
|
|
|
|
iov->iov_base = spdk_dma_zmalloc(g_io_size_bytes, g_io_align, NULL);
|
|
|
|
iov->iov_len = g_io_size_bytes;
|
|
|
|
if (iov->iov_base == NULL) {
|
|
|
|
fprintf(stderr, "spdk_dma_zmalloc() for task->iovs[0].iov_base failed\n");
|
|
|
|
free(task->iovs);
|
2019-08-29 15:16:41 +00:00
|
|
|
exit(1);
|
|
|
|
}
|
2020-07-29 08:57:10 +00:00
|
|
|
memset(iov->iov_base, pattern, iov->iov_len);
|
2019-08-29 15:16:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
aio_submit(io_context_t aio_ctx, struct iocb *iocb, int fd, enum io_iocb_cmd cmd,
|
|
|
|
struct iovec *iov, uint64_t offset, void *cb_ctx)
|
|
|
|
{
|
|
|
|
iocb->aio_fildes = fd;
|
|
|
|
iocb->aio_reqprio = 0;
|
|
|
|
iocb->aio_lio_opcode = cmd;
|
|
|
|
iocb->u.c.buf = iov->iov_base;
|
|
|
|
iocb->u.c.nbytes = iov->iov_len;
|
|
|
|
iocb->u.c.offset = offset * iov->iov_len;
|
|
|
|
iocb->data = cb_ctx;
|
|
|
|
|
|
|
|
if (io_submit(aio_ctx, 1, &iocb) < 0) {
|
|
|
|
printf("io_submit");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
aio_submit_io(struct perf_task *task, struct ns_worker_ctx *ns_ctx,
|
|
|
|
struct ns_entry *entry, uint64_t offset_in_ios)
|
|
|
|
{
|
|
|
|
if (task->is_read) {
|
|
|
|
return aio_submit(ns_ctx->u.aio.ctx, &task->iocb, entry->u.aio.fd, IO_CMD_PREAD,
|
2020-07-29 08:57:10 +00:00
|
|
|
task->iovs, offset_in_ios, task);
|
2019-08-29 15:16:41 +00:00
|
|
|
} else {
|
|
|
|
return aio_submit(ns_ctx->u.aio.ctx, &task->iocb, entry->u.aio.fd, IO_CMD_PWRITE,
|
2020-07-29 08:57:10 +00:00
|
|
|
task->iovs, offset_in_ios, task);
|
2019-08-29 15:16:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-17 14:50:17 +00:00
|
|
|
static int64_t
|
2019-08-29 15:16:41 +00:00
|
|
|
aio_check_io(struct ns_worker_ctx *ns_ctx)
|
|
|
|
{
|
|
|
|
int count, i;
|
|
|
|
struct timespec timeout;
|
2023-02-02 07:10:35 +00:00
|
|
|
struct perf_task *task;
|
2019-08-29 15:16:41 +00:00
|
|
|
|
|
|
|
timeout.tv_sec = 0;
|
|
|
|
timeout.tv_nsec = 0;
|
|
|
|
|
|
|
|
count = io_getevents(ns_ctx->u.aio.ctx, 1, g_queue_depth, ns_ctx->u.aio.events, &timeout);
|
|
|
|
if (count < 0) {
|
|
|
|
fprintf(stderr, "io_getevents error\n");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
2023-02-02 07:10:35 +00:00
|
|
|
task = (struct perf_task *)ns_ctx->u.aio.events[i].data;
|
|
|
|
if (ns_ctx->u.aio.events[i].res != (uint64_t)task->iovs[0].iov_len) {
|
|
|
|
fprintf(stderr, "event->res=%lu, iov_len=%lu\n", ns_ctx->u.aio.events[i].res,
|
|
|
|
(uint64_t)task->iovs[0].iov_len);
|
|
|
|
exit(1);
|
|
|
|
}
|
2019-08-29 15:16:41 +00:00
|
|
|
task_complete(ns_ctx->u.aio.events[i].data);
|
|
|
|
}
|
2021-02-17 14:50:17 +00:00
|
|
|
return count;
|
2019-08-29 15:16:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
aio_verify_io(struct perf_task *task, struct ns_entry *entry)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
aio_init_ns_worker_ctx(struct ns_worker_ctx *ns_ctx)
|
|
|
|
{
|
|
|
|
ns_ctx->u.aio.events = calloc(g_queue_depth, sizeof(struct io_event));
|
|
|
|
if (!ns_ctx->u.aio.events) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
ns_ctx->u.aio.ctx = 0;
|
|
|
|
if (io_setup(g_queue_depth, &ns_ctx->u.aio.ctx) < 0) {
|
|
|
|
free(ns_ctx->u.aio.events);
|
|
|
|
perror("io_setup");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
aio_cleanup_ns_worker_ctx(struct ns_worker_ctx *ns_ctx)
|
|
|
|
{
|
|
|
|
io_destroy(ns_ctx->u.aio.ctx);
|
|
|
|
free(ns_ctx->u.aio.events);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct ns_fn_table aio_fn_table = {
|
|
|
|
.setup_payload = aio_setup_payload,
|
|
|
|
.submit_io = aio_submit_io,
|
|
|
|
.check_io = aio_check_io,
|
|
|
|
.verify_io = aio_verify_io,
|
|
|
|
.init_ns_worker_ctx = aio_init_ns_worker_ctx,
|
|
|
|
.cleanup_ns_worker_ctx = aio_cleanup_ns_worker_ctx,
|
|
|
|
};
|
|
|
|
|
2020-07-03 16:14:58 +00:00
|
|
|
#endif /* HAVE_LIBAIO */
|
|
|
|
|
|
|
|
#if defined(HAVE_LIBAIO) || defined(SPDK_CONFIG_URING)
|
|
|
|
|
2019-08-29 15:16:41 +00:00
|
|
|
static int
|
2020-07-03 16:14:58 +00:00
|
|
|
register_file(const char *path)
|
2019-08-29 15:16:41 +00:00
|
|
|
{
|
|
|
|
struct ns_entry *entry;
|
|
|
|
|
|
|
|
int flags, fd;
|
|
|
|
uint64_t size;
|
|
|
|
uint32_t blklen;
|
|
|
|
|
|
|
|
if (g_rw_percentage == 100) {
|
|
|
|
flags = O_RDONLY;
|
|
|
|
} else if (g_rw_percentage == 0) {
|
|
|
|
flags = O_WRONLY;
|
|
|
|
} else {
|
|
|
|
flags = O_RDWR;
|
|
|
|
}
|
|
|
|
|
|
|
|
flags |= O_DIRECT;
|
|
|
|
|
|
|
|
fd = open(path, flags);
|
|
|
|
if (fd < 0) {
|
2020-07-03 16:14:58 +00:00
|
|
|
fprintf(stderr, "Could not open device %s: %s\n", path, strerror(errno));
|
2019-08-29 15:16:41 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
size = spdk_fd_get_size(fd);
|
|
|
|
if (size == 0) {
|
2020-07-03 16:14:58 +00:00
|
|
|
fprintf(stderr, "Could not determine size of device %s\n", path);
|
2019-08-29 15:16:41 +00:00
|
|
|
close(fd);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
blklen = spdk_fd_get_blocklen(fd);
|
|
|
|
if (blklen == 0) {
|
2020-07-03 16:14:58 +00:00
|
|
|
fprintf(stderr, "Could not determine block size of device %s\n", path);
|
2019-08-29 15:16:41 +00:00
|
|
|
close(fd);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* TODO: This should really calculate the LCM of the current g_io_align and blklen.
|
|
|
|
* For now, it's fairly safe to just assume all block sizes are powers of 2.
|
|
|
|
*/
|
|
|
|
if (g_io_align < blklen) {
|
2020-09-18 16:42:13 +00:00
|
|
|
if (g_io_align_specified) {
|
|
|
|
fprintf(stderr, "Wrong IO alignment (%u). aio requires block-sized alignment (%u)\n", g_io_align,
|
|
|
|
blklen);
|
|
|
|
close(fd);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2019-08-29 15:16:41 +00:00
|
|
|
g_io_align = blklen;
|
|
|
|
}
|
|
|
|
|
2023-01-19 21:23:48 +00:00
|
|
|
entry = calloc(1, sizeof(struct ns_entry));
|
2019-08-29 15:16:41 +00:00
|
|
|
if (entry == NULL) {
|
|
|
|
close(fd);
|
2020-07-03 16:14:58 +00:00
|
|
|
perror("ns_entry malloc");
|
2019-08-29 15:16:41 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-07-03 16:14:58 +00:00
|
|
|
if (g_use_uring) {
|
|
|
|
#ifdef SPDK_CONFIG_URING
|
|
|
|
entry->type = ENTRY_TYPE_URING_FILE;
|
|
|
|
entry->fn_table = &uring_fn_table;
|
|
|
|
entry->u.uring.fd = fd;
|
|
|
|
#endif
|
|
|
|
} else {
|
|
|
|
#if HAVE_LIBAIO
|
|
|
|
entry->type = ENTRY_TYPE_AIO_FILE;
|
|
|
|
entry->fn_table = &aio_fn_table;
|
|
|
|
entry->u.aio.fd = fd;
|
|
|
|
#endif
|
|
|
|
}
|
2019-08-29 15:16:41 +00:00
|
|
|
entry->size_in_ios = size / g_io_size_bytes;
|
|
|
|
entry->io_size_blocks = g_io_size_bytes / blklen;
|
|
|
|
|
2022-10-11 06:12:15 +00:00
|
|
|
if (g_is_random) {
|
|
|
|
entry->seed = rand();
|
|
|
|
if (g_zipf_theta > 0) {
|
|
|
|
entry->zipf = spdk_zipf_create(entry->size_in_ios, g_zipf_theta, 0);
|
|
|
|
}
|
2021-05-07 00:01:23 +00:00
|
|
|
}
|
|
|
|
|
2019-08-29 15:16:41 +00:00
|
|
|
snprintf(entry->name, sizeof(entry->name), "%s", path);
|
|
|
|
|
|
|
|
g_num_namespaces++;
|
2020-09-28 00:40:05 +00:00
|
|
|
TAILQ_INSERT_TAIL(&g_namespaces, entry, link);
|
2019-08-29 15:16:41 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2020-07-03 16:14:58 +00:00
|
|
|
register_files(int argc, char **argv)
|
2019-08-29 15:16:41 +00:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2020-07-03 16:14:58 +00:00
|
|
|
/* Treat everything after the options as files for AIO/URING */
|
|
|
|
for (i = g_file_optind; i < argc; i++) {
|
|
|
|
if (register_file(argv[i]) != 0) {
|
2019-08-29 15:16:41 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2020-07-03 16:14:58 +00:00
|
|
|
#endif
|
2019-08-29 15:16:41 +00:00
|
|
|
|
2019-01-15 01:35:09 +00:00
|
|
|
static void io_complete(void *ctx, const struct spdk_nvme_cpl *cpl);
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2019-01-15 23:23:35 +00:00
|
|
|
static void
|
|
|
|
nvme_setup_payload(struct perf_task *task, uint8_t pattern)
|
|
|
|
{
|
2019-01-17 08:03:02 +00:00
|
|
|
uint32_t max_io_size_bytes, max_io_md_size;
|
2020-07-29 08:57:10 +00:00
|
|
|
void *buf;
|
|
|
|
int rc;
|
2019-01-15 23:23:35 +00:00
|
|
|
|
|
|
|
/* maximum extended lba format size from all active namespace,
|
|
|
|
* it's same with g_io_size_bytes for namespace without metadata.
|
|
|
|
*/
|
|
|
|
max_io_size_bytes = g_io_size_bytes + g_max_io_md_size * g_max_io_size_blocks;
|
2020-07-29 08:57:10 +00:00
|
|
|
buf = spdk_dma_zmalloc(max_io_size_bytes, g_io_align, NULL);
|
|
|
|
if (buf == NULL) {
|
2019-01-15 23:23:35 +00:00
|
|
|
fprintf(stderr, "task->buf spdk_dma_zmalloc failed\n");
|
|
|
|
exit(1);
|
|
|
|
}
|
2020-07-29 08:57:10 +00:00
|
|
|
memset(buf, pattern, max_io_size_bytes);
|
|
|
|
|
|
|
|
rc = nvme_perf_allocate_iovs(task, buf, max_io_size_bytes);
|
|
|
|
if (rc < 0) {
|
|
|
|
fprintf(stderr, "perf task failed to allocate iovs\n");
|
|
|
|
spdk_dma_free(buf);
|
|
|
|
exit(1);
|
|
|
|
}
|
2019-01-17 08:03:02 +00:00
|
|
|
|
|
|
|
max_io_md_size = g_max_io_md_size * g_max_io_size_blocks;
|
|
|
|
if (max_io_md_size != 0) {
|
|
|
|
task->md_iov.iov_base = spdk_dma_zmalloc(max_io_md_size, g_io_align, NULL);
|
|
|
|
task->md_iov.iov_len = max_io_md_size;
|
|
|
|
if (task->md_iov.iov_base == NULL) {
|
|
|
|
fprintf(stderr, "task->md_buf spdk_dma_zmalloc failed\n");
|
2020-07-29 08:57:10 +00:00
|
|
|
spdk_dma_free(task->iovs[0].iov_base);
|
|
|
|
free(task->iovs);
|
2019-01-17 08:03:02 +00:00
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
2019-01-15 23:23:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nvme_submit_io(struct perf_task *task, struct ns_worker_ctx *ns_ctx,
|
|
|
|
struct ns_entry *entry, uint64_t offset_in_ios)
|
|
|
|
{
|
2019-01-17 22:38:28 +00:00
|
|
|
uint64_t lba;
|
|
|
|
int rc;
|
2017-12-14 22:13:43 +00:00
|
|
|
int qp_num;
|
2019-01-17 22:38:28 +00:00
|
|
|
|
2019-01-17 08:03:02 +00:00
|
|
|
enum dif_mode {
|
|
|
|
DIF_MODE_NONE = 0,
|
|
|
|
DIF_MODE_DIF = 1,
|
|
|
|
DIF_MODE_DIX = 2,
|
|
|
|
} mode = DIF_MODE_NONE;
|
|
|
|
|
2019-01-17 22:38:28 +00:00
|
|
|
lba = offset_in_ios * entry->io_size_blocks;
|
|
|
|
|
2019-01-17 08:03:02 +00:00
|
|
|
if (entry->md_size != 0 && !(entry->io_flags & SPDK_NVME_IO_FLAGS_PRACT)) {
|
|
|
|
if (entry->md_interleave) {
|
|
|
|
mode = DIF_MODE_DIF;
|
|
|
|
} else {
|
|
|
|
mode = DIF_MODE_DIX;
|
|
|
|
}
|
|
|
|
}
|
2019-01-17 22:38:28 +00:00
|
|
|
|
2019-08-29 15:16:41 +00:00
|
|
|
qp_num = ns_ctx->u.nvme.last_qpair;
|
|
|
|
ns_ctx->u.nvme.last_qpair++;
|
2020-04-24 01:49:26 +00:00
|
|
|
if (ns_ctx->u.nvme.last_qpair == ns_ctx->u.nvme.num_active_qpairs) {
|
2019-08-29 15:16:41 +00:00
|
|
|
ns_ctx->u.nvme.last_qpair = 0;
|
2017-12-14 22:13:43 +00:00
|
|
|
}
|
|
|
|
|
2019-01-17 08:03:02 +00:00
|
|
|
if (mode != DIF_MODE_NONE) {
|
2019-01-17 22:38:28 +00:00
|
|
|
rc = spdk_dif_ctx_init(&task->dif_ctx, entry->block_size, entry->md_size,
|
|
|
|
entry->md_interleave, entry->pi_loc,
|
|
|
|
(enum spdk_dif_type)entry->pi_type, entry->io_flags,
|
2019-06-04 05:52:24 +00:00
|
|
|
lba, 0xFFFF, (uint16_t)entry->io_size_blocks, 0, 0);
|
2019-01-17 22:38:28 +00:00
|
|
|
if (rc != 0) {
|
|
|
|
fprintf(stderr, "Initialization of DIF context failed\n");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
2019-01-15 23:23:35 +00:00
|
|
|
|
|
|
|
if (task->is_read) {
|
2020-07-29 08:57:10 +00:00
|
|
|
if (task->iovcnt == 1) {
|
|
|
|
return spdk_nvme_ns_cmd_read_with_md(entry->u.nvme.ns, ns_ctx->u.nvme.qpair[qp_num],
|
|
|
|
task->iovs[0].iov_base, task->md_iov.iov_base,
|
|
|
|
lba,
|
|
|
|
entry->io_size_blocks, io_complete,
|
|
|
|
task, entry->io_flags,
|
|
|
|
task->dif_ctx.apptag_mask, task->dif_ctx.app_tag);
|
|
|
|
} else {
|
|
|
|
return spdk_nvme_ns_cmd_readv_with_md(entry->u.nvme.ns, ns_ctx->u.nvme.qpair[qp_num],
|
|
|
|
lba, entry->io_size_blocks,
|
|
|
|
io_complete, task, entry->io_flags,
|
|
|
|
nvme_perf_reset_sgl, nvme_perf_next_sge,
|
|
|
|
task->md_iov.iov_base,
|
|
|
|
task->dif_ctx.apptag_mask, task->dif_ctx.app_tag);
|
|
|
|
}
|
2019-01-15 23:23:35 +00:00
|
|
|
} else {
|
2019-01-17 08:03:02 +00:00
|
|
|
switch (mode) {
|
|
|
|
case DIF_MODE_DIF:
|
2020-07-29 08:57:10 +00:00
|
|
|
rc = spdk_dif_generate(task->iovs, task->iovcnt, entry->io_size_blocks, &task->dif_ctx);
|
2019-01-17 22:38:28 +00:00
|
|
|
if (rc != 0) {
|
|
|
|
fprintf(stderr, "Generation of DIF failed\n");
|
|
|
|
return rc;
|
|
|
|
}
|
2019-01-17 08:03:02 +00:00
|
|
|
break;
|
|
|
|
case DIF_MODE_DIX:
|
2020-07-29 08:57:10 +00:00
|
|
|
rc = spdk_dix_generate(task->iovs, task->iovcnt, &task->md_iov, entry->io_size_blocks,
|
2019-01-17 08:03:02 +00:00
|
|
|
&task->dif_ctx);
|
|
|
|
if (rc != 0) {
|
|
|
|
fprintf(stderr, "Generation of DIX failed\n");
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
2019-01-17 22:38:28 +00:00
|
|
|
}
|
|
|
|
|
2020-07-29 08:57:10 +00:00
|
|
|
if (task->iovcnt == 1) {
|
|
|
|
return spdk_nvme_ns_cmd_write_with_md(entry->u.nvme.ns, ns_ctx->u.nvme.qpair[qp_num],
|
|
|
|
task->iovs[0].iov_base, task->md_iov.iov_base,
|
|
|
|
lba,
|
|
|
|
entry->io_size_blocks, io_complete,
|
|
|
|
task, entry->io_flags,
|
|
|
|
task->dif_ctx.apptag_mask, task->dif_ctx.app_tag);
|
|
|
|
} else {
|
|
|
|
return spdk_nvme_ns_cmd_writev_with_md(entry->u.nvme.ns, ns_ctx->u.nvme.qpair[qp_num],
|
|
|
|
lba, entry->io_size_blocks,
|
|
|
|
io_complete, task, entry->io_flags,
|
|
|
|
nvme_perf_reset_sgl, nvme_perf_next_sge,
|
|
|
|
task->md_iov.iov_base,
|
|
|
|
task->dif_ctx.apptag_mask, task->dif_ctx.app_tag);
|
|
|
|
}
|
2019-01-15 23:23:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-24 02:10:57 +00:00
|
|
|
static void
|
|
|
|
perf_disconnect_cb(struct spdk_nvme_qpair *qpair, void *ctx)
|
|
|
|
{
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2021-02-17 14:50:17 +00:00
|
|
|
static int64_t
|
2019-01-15 23:23:35 +00:00
|
|
|
nvme_check_io(struct ns_worker_ctx *ns_ctx)
|
|
|
|
{
|
2020-04-24 02:10:57 +00:00
|
|
|
int64_t rc;
|
2019-02-13 06:04:11 +00:00
|
|
|
|
2021-04-14 13:47:52 +00:00
|
|
|
rc = spdk_nvme_poll_group_process_completions(ns_ctx->u.nvme.group, g_max_completions,
|
|
|
|
perf_disconnect_cb);
|
2020-04-24 02:10:57 +00:00
|
|
|
if (rc < 0) {
|
|
|
|
fprintf(stderr, "NVMe io qpair process completion error\n");
|
|
|
|
exit(1);
|
2019-02-13 06:04:11 +00:00
|
|
|
}
|
2021-02-17 14:50:17 +00:00
|
|
|
return rc;
|
2019-01-15 23:23:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nvme_verify_io(struct perf_task *task, struct ns_entry *entry)
|
|
|
|
{
|
2019-01-17 22:38:28 +00:00
|
|
|
struct spdk_dif_error err_blk = {};
|
|
|
|
int rc;
|
|
|
|
|
2019-05-29 09:38:50 +00:00
|
|
|
if (!task->is_read || (entry->io_flags & SPDK_NVME_IO_FLAGS_PRACT)) {
|
2019-01-17 08:03:02 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (entry->md_interleave) {
|
2020-07-29 08:57:10 +00:00
|
|
|
rc = spdk_dif_verify(task->iovs, task->iovcnt, entry->io_size_blocks, &task->dif_ctx,
|
2019-01-17 22:38:28 +00:00
|
|
|
&err_blk);
|
|
|
|
if (rc != 0) {
|
|
|
|
fprintf(stderr, "DIF error detected. type=%d, offset=%" PRIu32 "\n",
|
|
|
|
err_blk.err_type, err_blk.err_offset);
|
|
|
|
}
|
2019-01-17 08:03:02 +00:00
|
|
|
} else {
|
2020-07-29 08:57:10 +00:00
|
|
|
rc = spdk_dix_verify(task->iovs, task->iovcnt, &task->md_iov, entry->io_size_blocks,
|
2019-01-17 08:03:02 +00:00
|
|
|
&task->dif_ctx, &err_blk);
|
|
|
|
if (rc != 0) {
|
|
|
|
fprintf(stderr, "DIX error detected. type=%d, offset=%" PRIu32 "\n",
|
|
|
|
err_blk.err_type, err_blk.err_offset);
|
|
|
|
}
|
2019-01-15 23:23:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* TODO: If a controller has multiple namespaces, they could all use the same queue.
|
|
|
|
* For now, give each namespace/thread combination its own queue.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
nvme_init_ns_worker_ctx(struct ns_worker_ctx *ns_ctx)
|
|
|
|
{
|
|
|
|
struct spdk_nvme_io_qpair_opts opts;
|
|
|
|
struct ns_entry *entry = ns_ctx->entry;
|
2020-04-24 02:10:57 +00:00
|
|
|
struct spdk_nvme_poll_group *group;
|
|
|
|
struct spdk_nvme_qpair *qpair;
|
2017-12-14 22:13:43 +00:00
|
|
|
int i;
|
|
|
|
|
2020-04-24 01:49:26 +00:00
|
|
|
ns_ctx->u.nvme.num_active_qpairs = g_nr_io_queues_per_ns;
|
|
|
|
ns_ctx->u.nvme.num_all_qpairs = g_nr_io_queues_per_ns + g_nr_unused_io_queues;
|
|
|
|
ns_ctx->u.nvme.qpair = calloc(ns_ctx->u.nvme.num_all_qpairs, sizeof(struct spdk_nvme_qpair *));
|
2019-08-29 15:16:41 +00:00
|
|
|
if (!ns_ctx->u.nvme.qpair) {
|
2017-12-14 22:13:43 +00:00
|
|
|
return -1;
|
|
|
|
}
|
2019-01-15 23:23:35 +00:00
|
|
|
|
2019-08-29 15:16:41 +00:00
|
|
|
spdk_nvme_ctrlr_get_default_io_qpair_opts(entry->u.nvme.ctrlr, &opts, sizeof(opts));
|
2019-01-15 23:23:35 +00:00
|
|
|
if (opts.io_queue_requests < entry->num_io_requests) {
|
|
|
|
opts.io_queue_requests = entry->num_io_requests;
|
|
|
|
}
|
2019-10-09 06:40:31 +00:00
|
|
|
opts.delay_cmd_submit = true;
|
2020-04-24 02:10:57 +00:00
|
|
|
opts.create_only = true;
|
2019-01-15 23:23:35 +00:00
|
|
|
|
2021-03-08 15:04:46 +00:00
|
|
|
ns_ctx->u.nvme.group = spdk_nvme_poll_group_create(NULL, NULL);
|
2020-04-24 02:10:57 +00:00
|
|
|
if (ns_ctx->u.nvme.group == NULL) {
|
2020-06-22 09:05:01 +00:00
|
|
|
goto poll_group_failed;
|
2020-04-24 02:10:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
group = ns_ctx->u.nvme.group;
|
2020-04-24 01:49:26 +00:00
|
|
|
for (i = 0; i < ns_ctx->u.nvme.num_all_qpairs; i++) {
|
2019-08-29 15:16:41 +00:00
|
|
|
ns_ctx->u.nvme.qpair[i] = spdk_nvme_ctrlr_alloc_io_qpair(entry->u.nvme.ctrlr, &opts,
|
|
|
|
sizeof(opts));
|
2020-04-24 02:10:57 +00:00
|
|
|
qpair = ns_ctx->u.nvme.qpair[i];
|
|
|
|
if (!qpair) {
|
2017-12-14 22:13:43 +00:00
|
|
|
printf("ERROR: spdk_nvme_ctrlr_alloc_io_qpair failed\n");
|
2020-06-22 09:05:01 +00:00
|
|
|
goto qpair_failed;
|
2017-12-14 22:13:43 +00:00
|
|
|
}
|
2020-04-24 02:10:57 +00:00
|
|
|
|
|
|
|
if (spdk_nvme_poll_group_add(group, qpair)) {
|
|
|
|
printf("ERROR: unable to add I/O qpair to poll group.\n");
|
2020-06-22 09:05:01 +00:00
|
|
|
spdk_nvme_ctrlr_free_io_qpair(qpair);
|
|
|
|
goto qpair_failed;
|
2020-04-24 02:10:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (spdk_nvme_ctrlr_connect_io_qpair(entry->u.nvme.ctrlr, qpair)) {
|
|
|
|
printf("ERROR: unable to connect I/O qpair.\n");
|
2020-06-22 09:05:01 +00:00
|
|
|
spdk_nvme_ctrlr_free_io_qpair(qpair);
|
|
|
|
goto qpair_failed;
|
2020-04-24 02:10:57 +00:00
|
|
|
}
|
2019-01-15 23:23:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2020-06-22 09:05:01 +00:00
|
|
|
|
|
|
|
qpair_failed:
|
|
|
|
for (; i > 0; --i) {
|
|
|
|
spdk_nvme_ctrlr_free_io_qpair(ns_ctx->u.nvme.qpair[i - 1]);
|
|
|
|
}
|
|
|
|
|
|
|
|
spdk_nvme_poll_group_destroy(ns_ctx->u.nvme.group);
|
|
|
|
poll_group_failed:
|
|
|
|
free(ns_ctx->u.nvme.qpair);
|
|
|
|
return -1;
|
2019-01-15 23:23:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nvme_cleanup_ns_worker_ctx(struct ns_worker_ctx *ns_ctx)
|
|
|
|
{
|
2017-12-14 22:13:43 +00:00
|
|
|
int i;
|
|
|
|
|
2020-04-24 01:49:26 +00:00
|
|
|
for (i = 0; i < ns_ctx->u.nvme.num_all_qpairs; i++) {
|
2019-08-29 15:16:41 +00:00
|
|
|
spdk_nvme_ctrlr_free_io_qpair(ns_ctx->u.nvme.qpair[i]);
|
2017-12-14 22:13:43 +00:00
|
|
|
}
|
|
|
|
|
2020-04-24 02:10:57 +00:00
|
|
|
spdk_nvme_poll_group_destroy(ns_ctx->u.nvme.group);
|
2019-08-29 15:16:41 +00:00
|
|
|
free(ns_ctx->u.nvme.qpair);
|
2019-01-15 23:23:35 +00:00
|
|
|
}
|
|
|
|
|
2020-12-04 14:22:54 +00:00
|
|
|
static void
|
|
|
|
nvme_dump_rdma_statistics(struct spdk_nvme_transport_poll_group_stat *stat)
|
|
|
|
{
|
|
|
|
struct spdk_nvme_rdma_device_stat *device_stats;
|
|
|
|
uint32_t i;
|
|
|
|
|
|
|
|
printf("RDMA transport:\n");
|
|
|
|
for (i = 0; i < stat->rdma.num_devices; i++) {
|
|
|
|
device_stats = &stat->rdma.device_stats[i];
|
2021-08-12 06:38:53 +00:00
|
|
|
printf("\tdev name: %s\n", device_stats->name);
|
|
|
|
printf("\tpolls: %"PRIu64"\n", device_stats->polls);
|
|
|
|
printf("\tidle_polls: %"PRIu64"\n", device_stats->idle_polls);
|
|
|
|
printf("\tcompletions: %"PRIu64"\n", device_stats->completions);
|
|
|
|
printf("\tqueued_requests: %"PRIu64"\n", device_stats->queued_requests);
|
|
|
|
printf("\ttotal_send_wrs: %"PRIu64"\n", device_stats->total_send_wrs);
|
2020-12-04 14:22:54 +00:00
|
|
|
printf("\tsend_doorbell_updates: %"PRIu64"\n", device_stats->send_doorbell_updates);
|
2021-08-12 06:38:53 +00:00
|
|
|
printf("\ttotal_recv_wrs: %"PRIu64"\n", device_stats->total_recv_wrs);
|
2020-12-04 14:22:54 +00:00
|
|
|
printf("\trecv_doorbell_updates: %"PRIu64"\n", device_stats->recv_doorbell_updates);
|
|
|
|
printf("\t---------------------------------\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-07 10:51:17 +00:00
|
|
|
static void
|
|
|
|
nvme_dump_pcie_statistics(struct spdk_nvme_transport_poll_group_stat *stat)
|
|
|
|
{
|
|
|
|
struct spdk_nvme_pcie_stat *pcie_stat;
|
|
|
|
|
|
|
|
pcie_stat = &stat->pcie;
|
|
|
|
|
|
|
|
printf("PCIE transport:\n");
|
2021-08-12 06:38:53 +00:00
|
|
|
printf("\tpolls: %"PRIu64"\n", pcie_stat->polls);
|
|
|
|
printf("\tidle_polls: %"PRIu64"\n", pcie_stat->idle_polls);
|
|
|
|
printf("\tcompletions: %"PRIu64"\n", pcie_stat->completions);
|
2022-03-08 12:42:09 +00:00
|
|
|
printf("\tcq_mmio_doorbell_updates: %"PRIu64"\n", pcie_stat->cq_mmio_doorbell_updates);
|
|
|
|
printf("\tcq_shadow_doorbell_updates: %"PRIu64"\n", pcie_stat->cq_shadow_doorbell_updates);
|
2021-08-12 06:38:53 +00:00
|
|
|
printf("\tsubmitted_requests: %"PRIu64"\n", pcie_stat->submitted_requests);
|
2022-03-08 12:42:09 +00:00
|
|
|
printf("\tsq_mmio_doorbell_updates: %"PRIu64"\n", pcie_stat->sq_mmio_doorbell_updates);
|
|
|
|
printf("\tsq_shadow_doorbell_updates: %"PRIu64"\n", pcie_stat->sq_shadow_doorbell_updates);
|
2021-08-12 06:38:53 +00:00
|
|
|
printf("\tqueued_requests: %"PRIu64"\n", pcie_stat->queued_requests);
|
2020-12-07 10:51:17 +00:00
|
|
|
}
|
|
|
|
|
2021-07-19 08:57:48 +00:00
|
|
|
static void
|
|
|
|
nvme_dump_tcp_statistics(struct spdk_nvme_transport_poll_group_stat *stat)
|
|
|
|
{
|
|
|
|
struct spdk_nvme_tcp_stat *tcp_stat;
|
|
|
|
|
|
|
|
tcp_stat = &stat->tcp;
|
|
|
|
|
|
|
|
printf("TCP transport:\n");
|
|
|
|
printf("\tpolls: %"PRIu64"\n", tcp_stat->polls);
|
|
|
|
printf("\tidle_polls: %"PRIu64"\n", tcp_stat->idle_polls);
|
|
|
|
printf("\tsock_completions: %"PRIu64"\n", tcp_stat->socket_completions);
|
|
|
|
printf("\tnvme_completions: %"PRIu64"\n", tcp_stat->nvme_completions);
|
|
|
|
printf("\tsubmitted_requests: %"PRIu64"\n", tcp_stat->submitted_requests);
|
|
|
|
printf("\tqueued_requests: %"PRIu64"\n", tcp_stat->queued_requests);
|
|
|
|
}
|
|
|
|
|
2020-12-04 14:22:54 +00:00
|
|
|
static void
|
|
|
|
nvme_dump_transport_stats(uint32_t lcore, struct ns_worker_ctx *ns_ctx)
|
|
|
|
{
|
|
|
|
struct spdk_nvme_poll_group *group;
|
|
|
|
struct spdk_nvme_poll_group_stat *stat = NULL;
|
|
|
|
uint32_t i;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
group = ns_ctx->u.nvme.group;
|
|
|
|
if (group == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = spdk_nvme_poll_group_get_stats(group, &stat);
|
|
|
|
if (rc) {
|
|
|
|
fprintf(stderr, "Can't get transport stats, error %d\n", rc);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
printf("\n====================\n");
|
|
|
|
printf("lcore %u, ns %s statistics:\n", lcore, ns_ctx->entry->name);
|
|
|
|
|
|
|
|
for (i = 0; i < stat->num_transports; i++) {
|
|
|
|
switch (stat->transport_stat[i]->trtype) {
|
|
|
|
case SPDK_NVME_TRANSPORT_RDMA:
|
|
|
|
nvme_dump_rdma_statistics(stat->transport_stat[i]);
|
|
|
|
break;
|
2020-12-07 10:51:17 +00:00
|
|
|
case SPDK_NVME_TRANSPORT_PCIE:
|
|
|
|
nvme_dump_pcie_statistics(stat->transport_stat[i]);
|
|
|
|
break;
|
2021-07-19 08:57:48 +00:00
|
|
|
case SPDK_NVME_TRANSPORT_TCP:
|
|
|
|
nvme_dump_tcp_statistics(stat->transport_stat[i]);
|
|
|
|
break;
|
2020-12-04 14:22:54 +00:00
|
|
|
default:
|
|
|
|
fprintf(stderr, "Unknown transport statistics %d %s\n", stat->transport_stat[i]->trtype,
|
|
|
|
spdk_nvme_transport_id_trtype_str(stat->transport_stat[i]->trtype));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
spdk_nvme_poll_group_free_stats(group, stat);
|
|
|
|
}
|
|
|
|
|
2019-01-15 23:23:35 +00:00
|
|
|
static const struct ns_fn_table nvme_fn_table = {
|
|
|
|
.setup_payload = nvme_setup_payload,
|
|
|
|
.submit_io = nvme_submit_io,
|
|
|
|
.check_io = nvme_check_io,
|
|
|
|
.verify_io = nvme_verify_io,
|
|
|
|
.init_ns_worker_ctx = nvme_init_ns_worker_ctx,
|
|
|
|
.cleanup_ns_worker_ctx = nvme_cleanup_ns_worker_ctx,
|
2020-12-04 14:22:54 +00:00
|
|
|
.dump_transport_stats = nvme_dump_transport_stats
|
2019-01-15 23:23:35 +00:00
|
|
|
};
|
|
|
|
|
2020-02-17 13:45:32 +00:00
|
|
|
static int
|
2019-05-24 22:04:32 +00:00
|
|
|
build_nvme_name(char *name, size_t length, struct spdk_nvme_ctrlr *ctrlr)
|
|
|
|
{
|
2019-05-24 21:45:55 +00:00
|
|
|
const struct spdk_nvme_transport_id *trid;
|
2020-02-17 13:45:32 +00:00
|
|
|
int res = 0;
|
2019-05-24 21:45:55 +00:00
|
|
|
|
|
|
|
trid = spdk_nvme_ctrlr_get_transport_id(ctrlr);
|
|
|
|
|
|
|
|
switch (trid->trtype) {
|
|
|
|
case SPDK_NVME_TRANSPORT_PCIE:
|
2020-02-17 13:45:32 +00:00
|
|
|
res = snprintf(name, length, "PCIE (%s)", trid->traddr);
|
2019-05-24 21:45:55 +00:00
|
|
|
break;
|
|
|
|
case SPDK_NVME_TRANSPORT_RDMA:
|
2020-02-17 13:45:32 +00:00
|
|
|
res = snprintf(name, length, "RDMA (addr:%s subnqn:%s)", trid->traddr, trid->subnqn);
|
2019-05-24 21:45:55 +00:00
|
|
|
break;
|
|
|
|
case SPDK_NVME_TRANSPORT_TCP:
|
2020-08-21 04:32:38 +00:00
|
|
|
res = snprintf(name, length, "TCP (addr:%s subnqn:%s)", trid->traddr, trid->subnqn);
|
|
|
|
break;
|
2020-08-19 15:35:38 +00:00
|
|
|
case SPDK_NVME_TRANSPORT_VFIOUSER:
|
|
|
|
res = snprintf(name, length, "VFIOUSER (%s)", trid->traddr);
|
|
|
|
break;
|
2020-08-21 04:32:38 +00:00
|
|
|
case SPDK_NVME_TRANSPORT_CUSTOM:
|
|
|
|
res = snprintf(name, length, "CUSTOM (%s)", trid->traddr);
|
2019-05-24 21:45:55 +00:00
|
|
|
break;
|
2020-02-17 13:45:32 +00:00
|
|
|
|
2019-05-24 21:45:55 +00:00
|
|
|
default:
|
|
|
|
fprintf(stderr, "Unknown transport type %d\n", trid->trtype);
|
|
|
|
break;
|
|
|
|
}
|
2020-02-17 13:45:32 +00:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
build_nvme_ns_name(char *name, size_t length, struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
|
|
|
|
{
|
|
|
|
int res = 0;
|
|
|
|
|
|
|
|
res = build_nvme_name(name, length, ctrlr);
|
|
|
|
if (res > 0) {
|
|
|
|
snprintf(name + res, length - res, " NSID %u", nsid);
|
|
|
|
}
|
|
|
|
|
2019-05-24 22:04:32 +00:00
|
|
|
}
|
|
|
|
|
2019-01-15 23:03:38 +00:00
|
|
|
static void
|
|
|
|
register_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns *ns)
|
|
|
|
{
|
|
|
|
struct ns_entry *entry;
|
|
|
|
const struct spdk_nvme_ctrlr_data *cdata;
|
2019-02-07 12:46:03 +00:00
|
|
|
uint32_t max_xfer_size, entries, sector_size;
|
|
|
|
uint64_t ns_size;
|
2019-01-15 23:03:38 +00:00
|
|
|
struct spdk_nvme_io_qpair_opts opts;
|
|
|
|
|
|
|
|
cdata = spdk_nvme_ctrlr_get_data(ctrlr);
|
|
|
|
|
|
|
|
if (!spdk_nvme_ns_is_active(ns)) {
|
|
|
|
printf("Controller %-20.20s (%-20.20s): Skipping inactive NS %u\n",
|
|
|
|
cdata->mn, cdata->sn,
|
|
|
|
spdk_nvme_ns_get_id(ns));
|
|
|
|
g_warn = true;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-01-16 07:29:56 +00:00
|
|
|
ns_size = spdk_nvme_ns_get_size(ns);
|
|
|
|
sector_size = spdk_nvme_ns_get_sector_size(ns);
|
|
|
|
|
|
|
|
if (ns_size < g_io_size_bytes || sector_size > g_io_size_bytes) {
|
2019-01-15 23:03:38 +00:00
|
|
|
printf("WARNING: controller %-20.20s (%-20.20s) ns %u has invalid "
|
2019-02-07 12:46:03 +00:00
|
|
|
"ns size %" PRIu64 " / block size %u for I/O size %u\n",
|
2019-01-15 23:03:38 +00:00
|
|
|
cdata->mn, cdata->sn, spdk_nvme_ns_get_id(ns),
|
2019-01-16 07:29:56 +00:00
|
|
|
ns_size, spdk_nvme_ns_get_sector_size(ns), g_io_size_bytes);
|
2019-01-15 23:03:38 +00:00
|
|
|
g_warn = true;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
max_xfer_size = spdk_nvme_ns_get_max_io_xfer_size(ns);
|
|
|
|
spdk_nvme_ctrlr_get_default_io_qpair_opts(ctrlr, &opts, sizeof(opts));
|
|
|
|
/* NVMe driver may add additional entries based on
|
|
|
|
* stripe size and maximum transfer size, we assume
|
|
|
|
* 1 more entry be used for stripe.
|
|
|
|
*/
|
|
|
|
entries = (g_io_size_bytes - 1) / max_xfer_size + 2;
|
|
|
|
if ((g_queue_depth * entries) > opts.io_queue_size) {
|
|
|
|
printf("controller IO queue size %u less than required\n",
|
|
|
|
opts.io_queue_size);
|
|
|
|
printf("Consider using lower queue depth or small IO size because "
|
|
|
|
"IO requests may be queued at the NVMe driver.\n");
|
|
|
|
}
|
2019-01-18 01:01:38 +00:00
|
|
|
/* For requests which have children requests, parent request itself
|
|
|
|
* will also occupy 1 entry.
|
|
|
|
*/
|
|
|
|
entries += 1;
|
2019-01-15 23:03:38 +00:00
|
|
|
|
|
|
|
entry = calloc(1, sizeof(struct ns_entry));
|
|
|
|
if (entry == NULL) {
|
|
|
|
perror("ns_entry malloc");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
2019-08-29 15:16:41 +00:00
|
|
|
entry->type = ENTRY_TYPE_NVME_NS;
|
2019-01-15 23:23:35 +00:00
|
|
|
entry->fn_table = &nvme_fn_table;
|
2019-08-29 15:16:41 +00:00
|
|
|
entry->u.nvme.ctrlr = ctrlr;
|
|
|
|
entry->u.nvme.ns = ns;
|
2019-01-18 01:01:38 +00:00
|
|
|
entry->num_io_requests = g_queue_depth * entries;
|
2019-01-15 23:03:38 +00:00
|
|
|
|
2019-01-16 07:29:56 +00:00
|
|
|
entry->size_in_ios = ns_size / g_io_size_bytes;
|
|
|
|
entry->io_size_blocks = g_io_size_bytes / sector_size;
|
|
|
|
|
2022-10-21 13:24:55 +00:00
|
|
|
if (g_is_random) {
|
|
|
|
entry->seed = rand();
|
|
|
|
if (g_zipf_theta > 0) {
|
|
|
|
entry->zipf = spdk_zipf_create(entry->size_in_ios, g_zipf_theta, 0);
|
|
|
|
}
|
2021-05-07 00:01:23 +00:00
|
|
|
}
|
|
|
|
|
2019-01-16 07:29:56 +00:00
|
|
|
entry->block_size = spdk_nvme_ns_get_extended_sector_size(ns);
|
|
|
|
entry->md_size = spdk_nvme_ns_get_md_size(ns);
|
|
|
|
entry->md_interleave = spdk_nvme_ns_supports_extended_lba(ns);
|
|
|
|
entry->pi_loc = spdk_nvme_ns_get_data(ns)->dps.md_start;
|
|
|
|
entry->pi_type = spdk_nvme_ns_get_pi_type(ns);
|
2019-01-15 23:03:38 +00:00
|
|
|
|
|
|
|
if (spdk_nvme_ns_get_flags(ns) & SPDK_NVME_NS_DPS_PI_SUPPORTED) {
|
|
|
|
entry->io_flags = g_metacfg_pract_flag | g_metacfg_prchk_flags;
|
|
|
|
}
|
|
|
|
|
2019-09-11 05:31:12 +00:00
|
|
|
/* If metadata size = 8 bytes, PI is stripped (read) or inserted (write),
|
|
|
|
* and so reduce metadata size from block size. (If metadata size > 8 bytes,
|
|
|
|
* PI is passed (read) or replaced (write). So block size is not necessary
|
|
|
|
* to change.)
|
|
|
|
*/
|
|
|
|
if ((entry->io_flags & SPDK_NVME_IO_FLAGS_PRACT) && (entry->md_size == 8)) {
|
|
|
|
entry->block_size = spdk_nvme_ns_get_sector_size(ns);
|
|
|
|
}
|
|
|
|
|
2022-03-28 05:07:55 +00:00
|
|
|
if (g_io_size_bytes % entry->block_size != 0) {
|
|
|
|
printf("WARNING: IO size %u (-o) is not a multiple of nsid %u sector size %u."
|
|
|
|
" Removing this ns from test\n", g_io_size_bytes, spdk_nvme_ns_get_id(ns), entry->block_size);
|
|
|
|
g_warn = true;
|
2022-05-10 15:14:48 +00:00
|
|
|
spdk_zipf_free(&entry->zipf);
|
2022-03-28 05:07:55 +00:00
|
|
|
free(entry);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-01-16 07:29:56 +00:00
|
|
|
if (g_max_io_md_size < entry->md_size) {
|
|
|
|
g_max_io_md_size = entry->md_size;
|
2019-01-15 23:03:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (g_max_io_size_blocks < entry->io_size_blocks) {
|
|
|
|
g_max_io_size_blocks = entry->io_size_blocks;
|
|
|
|
}
|
|
|
|
|
2020-02-17 13:45:32 +00:00
|
|
|
build_nvme_ns_name(entry->name, sizeof(entry->name), ctrlr, spdk_nvme_ns_get_id(ns));
|
2019-01-15 23:03:38 +00:00
|
|
|
|
|
|
|
g_num_namespaces++;
|
2020-09-28 00:40:05 +00:00
|
|
|
TAILQ_INSERT_TAIL(&g_namespaces, entry, link);
|
2019-01-15 23:03:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
unregister_namespaces(void)
|
|
|
|
{
|
2020-09-28 00:40:05 +00:00
|
|
|
struct ns_entry *entry, *tmp;
|
2019-01-15 23:03:38 +00:00
|
|
|
|
2020-09-28 00:40:05 +00:00
|
|
|
TAILQ_FOREACH_SAFE(entry, &g_namespaces, link, tmp) {
|
|
|
|
TAILQ_REMOVE(&g_namespaces, entry, link);
|
2021-05-07 00:01:23 +00:00
|
|
|
spdk_zipf_free(&entry->zipf);
|
2022-11-28 05:39:46 +00:00
|
|
|
if (g_use_uring) {
|
|
|
|
#ifdef SPDK_CONFIG_URING
|
|
|
|
close(entry->u.uring.fd);
|
|
|
|
#endif
|
|
|
|
} else {
|
|
|
|
#if HAVE_LIBAIO
|
|
|
|
close(entry->u.aio.fd);
|
|
|
|
#endif
|
|
|
|
}
|
2019-01-15 23:03:38 +00:00
|
|
|
free(entry);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
enable_latency_tracking_complete(void *cb_arg, const struct spdk_nvme_cpl *cpl)
|
|
|
|
{
|
|
|
|
if (spdk_nvme_cpl_is_error(cpl)) {
|
|
|
|
printf("enable_latency_tracking_complete failed\n");
|
|
|
|
}
|
|
|
|
g_outstanding_commands--;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
set_latency_tracking_feature(struct spdk_nvme_ctrlr *ctrlr, bool enable)
|
|
|
|
{
|
|
|
|
int res;
|
|
|
|
union spdk_nvme_intel_feat_latency_tracking latency_tracking;
|
|
|
|
|
|
|
|
if (enable) {
|
|
|
|
latency_tracking.bits.enable = 0x01;
|
|
|
|
} else {
|
|
|
|
latency_tracking.bits.enable = 0x00;
|
|
|
|
}
|
|
|
|
|
|
|
|
res = spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_INTEL_FEAT_LATENCY_TRACKING,
|
|
|
|
latency_tracking.raw, 0, NULL, 0, enable_latency_tracking_complete, NULL);
|
|
|
|
if (res) {
|
|
|
|
printf("fail to allocate nvme request.\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
g_outstanding_commands++;
|
|
|
|
|
|
|
|
while (g_outstanding_commands) {
|
|
|
|
spdk_nvme_ctrlr_process_admin_completions(ctrlr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
register_ctrlr(struct spdk_nvme_ctrlr *ctrlr, struct trid_entry *trid_entry)
|
|
|
|
{
|
|
|
|
struct spdk_nvme_ns *ns;
|
|
|
|
struct ctrlr_entry *entry = malloc(sizeof(struct ctrlr_entry));
|
|
|
|
uint32_t nsid;
|
|
|
|
|
|
|
|
if (entry == NULL) {
|
|
|
|
perror("ctrlr_entry malloc");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
entry->latency_page = spdk_dma_zmalloc(sizeof(struct spdk_nvme_intel_rw_latency_page),
|
|
|
|
4096, NULL);
|
|
|
|
if (entry->latency_page == NULL) {
|
|
|
|
printf("Allocation error (latency page)\n");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
2019-05-24 22:04:32 +00:00
|
|
|
build_nvme_name(entry->name, sizeof(entry->name), ctrlr);
|
2019-01-15 23:03:38 +00:00
|
|
|
|
|
|
|
entry->ctrlr = ctrlr;
|
2019-01-24 06:56:49 +00:00
|
|
|
entry->trtype = trid_entry->trid.trtype;
|
2020-09-28 00:40:05 +00:00
|
|
|
TAILQ_INSERT_TAIL(&g_controllers, entry, link);
|
2019-01-15 23:03:38 +00:00
|
|
|
|
|
|
|
if (g_latency_ssd_tracking_enable &&
|
|
|
|
spdk_nvme_ctrlr_is_feature_supported(ctrlr, SPDK_NVME_INTEL_FEAT_LATENCY_TRACKING)) {
|
|
|
|
set_latency_tracking_feature(ctrlr, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (trid_entry->nsid == 0) {
|
|
|
|
for (nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
|
|
|
|
nsid != 0; nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, nsid)) {
|
|
|
|
ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
|
|
|
|
if (ns == NULL) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
register_ns(ctrlr, ns);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ns = spdk_nvme_ctrlr_get_ns(ctrlr, trid_entry->nsid);
|
|
|
|
if (!ns) {
|
|
|
|
perror("Namespace does not exist.");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
register_ns(ctrlr, ns);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-29 09:37:28 +00:00
|
|
|
static inline void
|
2017-09-12 22:35:32 +00:00
|
|
|
submit_single_io(struct perf_task *task)
|
2015-09-21 15:52:41 +00:00
|
|
|
{
|
|
|
|
uint64_t offset_in_ios;
|
|
|
|
int rc;
|
2017-09-12 22:35:32 +00:00
|
|
|
struct ns_worker_ctx *ns_ctx = task->ns_ctx;
|
2015-10-29 22:18:48 +00:00
|
|
|
struct ns_entry *entry = ns_ctx->entry;
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2022-09-05 18:22:13 +00:00
|
|
|
assert(!ns_ctx->is_draining);
|
|
|
|
|
2021-05-07 00:01:23 +00:00
|
|
|
if (entry->zipf) {
|
|
|
|
offset_in_ios = spdk_zipf_generate(entry->zipf);
|
|
|
|
} else if (g_is_random) {
|
2021-05-07 00:02:40 +00:00
|
|
|
offset_in_ios = rand_r(&entry->seed) % entry->size_in_ios;
|
2015-09-21 15:52:41 +00:00
|
|
|
} else {
|
2015-10-29 22:18:48 +00:00
|
|
|
offset_in_ios = ns_ctx->offset_in_ios++;
|
|
|
|
if (ns_ctx->offset_in_ios == entry->size_in_ios) {
|
|
|
|
ns_ctx->offset_in_ios = 0;
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-18 19:52:48 +00:00
|
|
|
task->submit_tsc = spdk_get_ticks();
|
2016-05-11 11:42:41 +00:00
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
if ((g_rw_percentage == 100) ||
|
2021-05-07 00:02:40 +00:00
|
|
|
(g_rw_percentage != 0 && ((rand_r(&entry->seed) % 100) < g_rw_percentage))) {
|
2019-01-15 23:23:35 +00:00
|
|
|
task->is_read = true;
|
2015-09-21 15:52:41 +00:00
|
|
|
} else {
|
2019-01-15 23:23:35 +00:00
|
|
|
task->is_read = false;
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
|
|
|
|
2019-01-15 23:23:35 +00:00
|
|
|
rc = entry->fn_table->submit_io(task, ns_ctx, entry, offset_in_ios);
|
|
|
|
|
2019-05-29 09:41:16 +00:00
|
|
|
if (spdk_unlikely(rc != 0)) {
|
2021-01-25 16:10:05 +00:00
|
|
|
RATELIMIT_LOG("starting I/O failed\n");
|
2021-04-14 15:25:24 +00:00
|
|
|
spdk_dma_free(task->iovs[0].iov_base);
|
|
|
|
free(task->iovs);
|
|
|
|
spdk_dma_free(task->md_iov.iov_base);
|
|
|
|
free(task);
|
2017-12-13 05:41:38 +00:00
|
|
|
} else {
|
|
|
|
ns_ctx->current_queue_depth++;
|
2022-09-05 18:22:13 +00:00
|
|
|
ns_ctx->stats.io_submitted++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (spdk_unlikely(g_number_ios && ns_ctx->stats.io_submitted >= g_number_ios)) {
|
|
|
|
ns_ctx->is_draining = true;
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-29 11:32:04 +00:00
|
|
|
static inline void
|
2015-10-19 22:57:04 +00:00
|
|
|
task_complete(struct perf_task *task)
|
2015-09-21 15:52:41 +00:00
|
|
|
{
|
2015-10-29 22:18:48 +00:00
|
|
|
struct ns_worker_ctx *ns_ctx;
|
2016-05-26 17:24:25 +00:00
|
|
|
uint64_t tsc_diff;
|
2018-01-10 12:25:19 +00:00
|
|
|
struct ns_entry *entry;
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2015-10-29 22:18:48 +00:00
|
|
|
ns_ctx = task->ns_ctx;
|
2018-01-10 12:25:19 +00:00
|
|
|
entry = ns_ctx->entry;
|
2015-10-29 22:18:48 +00:00
|
|
|
ns_ctx->current_queue_depth--;
|
2020-08-14 14:13:14 +00:00
|
|
|
ns_ctx->stats.io_completed++;
|
2016-08-18 19:52:48 +00:00
|
|
|
tsc_diff = spdk_get_ticks() - task->submit_tsc;
|
2020-08-14 14:13:14 +00:00
|
|
|
ns_ctx->stats.total_tsc += tsc_diff;
|
|
|
|
if (spdk_unlikely(ns_ctx->stats.min_tsc > tsc_diff)) {
|
|
|
|
ns_ctx->stats.min_tsc = tsc_diff;
|
2016-05-26 17:24:25 +00:00
|
|
|
}
|
2020-08-14 14:13:14 +00:00
|
|
|
if (spdk_unlikely(ns_ctx->stats.max_tsc < tsc_diff)) {
|
|
|
|
ns_ctx->stats.max_tsc = tsc_diff;
|
2016-05-26 17:24:25 +00:00
|
|
|
}
|
2019-05-29 09:41:16 +00:00
|
|
|
if (spdk_unlikely(g_latency_sw_tracking_level > 0)) {
|
2017-12-21 18:56:38 +00:00
|
|
|
spdk_histogram_data_tally(ns_ctx->histogram, tsc_diff);
|
2017-05-15 22:57:26 +00:00
|
|
|
}
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2019-05-29 09:38:50 +00:00
|
|
|
if (spdk_unlikely(entry->md_size > 0)) {
|
|
|
|
/* add application level verification for end-to-end data protection */
|
|
|
|
entry->fn_table->verify_io(task, entry);
|
|
|
|
}
|
2018-01-10 12:25:19 +00:00
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
/*
|
2022-09-05 18:22:13 +00:00
|
|
|
* is_draining indicates when time has expired or io_submitted exceeded
|
|
|
|
* g_number_ios for the test run and we are just waiting for the previously
|
|
|
|
* submitted I/O to complete. In this case, do not submit a new I/O to
|
|
|
|
* replace the one just completed.
|
2015-09-21 15:52:41 +00:00
|
|
|
*/
|
2019-05-29 09:41:16 +00:00
|
|
|
if (spdk_unlikely(ns_ctx->is_draining)) {
|
2020-07-29 08:57:10 +00:00
|
|
|
spdk_dma_free(task->iovs[0].iov_base);
|
|
|
|
free(task->iovs);
|
2019-01-17 08:03:02 +00:00
|
|
|
spdk_dma_free(task->md_iov.iov_base);
|
2017-09-12 22:35:32 +00:00
|
|
|
free(task);
|
|
|
|
} else {
|
|
|
|
submit_single_io(task);
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-19 22:57:04 +00:00
|
|
|
static void
|
2019-01-15 01:35:09 +00:00
|
|
|
io_complete(void *ctx, const struct spdk_nvme_cpl *cpl)
|
2015-10-19 22:57:04 +00:00
|
|
|
{
|
2019-01-15 01:35:09 +00:00
|
|
|
struct perf_task *task = ctx;
|
|
|
|
|
2019-05-29 09:41:16 +00:00
|
|
|
if (spdk_unlikely(spdk_nvme_cpl_is_error(cpl))) {
|
2021-01-25 16:02:35 +00:00
|
|
|
if (task->is_read) {
|
2021-01-25 16:10:05 +00:00
|
|
|
RATELIMIT_LOG("Read completed with error (sct=%d, sc=%d)\n",
|
|
|
|
cpl->status.sct, cpl->status.sc);
|
2021-01-25 16:02:35 +00:00
|
|
|
} else {
|
2021-01-25 16:10:05 +00:00
|
|
|
RATELIMIT_LOG("Write completed with error (sct=%d, sc=%d)\n",
|
|
|
|
cpl->status.sct, cpl->status.sc);
|
2021-01-25 16:02:35 +00:00
|
|
|
}
|
2021-01-26 06:21:01 +00:00
|
|
|
if (cpl->status.sct == SPDK_NVME_SCT_GENERIC &&
|
|
|
|
cpl->status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT) {
|
|
|
|
/* The namespace was hotplugged. Stop trying to send I/O to it. */
|
|
|
|
task->ns_ctx->is_draining = true;
|
|
|
|
}
|
2019-01-15 01:35:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
task_complete(task);
|
2015-10-19 22:57:04 +00:00
|
|
|
}
|
|
|
|
|
2018-12-20 00:03:39 +00:00
|
|
|
static struct perf_task *
|
|
|
|
allocate_task(struct ns_worker_ctx *ns_ctx, int queue_depth)
|
2015-09-21 15:52:41 +00:00
|
|
|
{
|
2017-09-12 22:35:32 +00:00
|
|
|
struct perf_task *task;
|
|
|
|
|
2018-12-20 00:03:39 +00:00
|
|
|
task = calloc(1, sizeof(*task));
|
|
|
|
if (task == NULL) {
|
|
|
|
fprintf(stderr, "Out of memory allocating tasks\n");
|
|
|
|
exit(1);
|
|
|
|
}
|
2017-09-12 22:35:32 +00:00
|
|
|
|
2019-01-15 23:23:35 +00:00
|
|
|
ns_ctx->entry->fn_table->setup_payload(task, queue_depth % 8 + 1);
|
2018-12-20 00:03:39 +00:00
|
|
|
|
|
|
|
task->ns_ctx = ns_ctx;
|
|
|
|
|
|
|
|
return task;
|
|
|
|
}
|
2017-09-12 22:35:32 +00:00
|
|
|
|
2018-12-20 00:03:39 +00:00
|
|
|
static void
|
|
|
|
submit_io(struct ns_worker_ctx *ns_ctx, int queue_depth)
|
|
|
|
{
|
|
|
|
struct perf_task *task;
|
2017-09-12 22:35:32 +00:00
|
|
|
|
2018-12-20 00:03:39 +00:00
|
|
|
while (queue_depth-- > 0) {
|
|
|
|
task = allocate_task(ns_ctx, queue_depth);
|
2017-09-12 22:35:32 +00:00
|
|
|
submit_single_io(task);
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-29 21:19:02 +00:00
|
|
|
static int
|
|
|
|
init_ns_worker_ctx(struct ns_worker_ctx *ns_ctx)
|
|
|
|
{
|
2019-01-15 23:23:35 +00:00
|
|
|
return ns_ctx->entry->fn_table->init_ns_worker_ctx(ns_ctx);
|
2016-02-29 21:19:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
cleanup_ns_worker_ctx(struct ns_worker_ctx *ns_ctx)
|
|
|
|
{
|
2019-01-15 23:23:35 +00:00
|
|
|
ns_ctx->entry->fn_table->cleanup_ns_worker_ctx(ns_ctx);
|
2016-02-29 21:19:02 +00:00
|
|
|
}
|
|
|
|
|
2019-11-05 12:55:11 +00:00
|
|
|
static void
|
2020-08-14 14:13:14 +00:00
|
|
|
print_periodic_performance(bool warmup)
|
2019-11-05 12:55:11 +00:00
|
|
|
{
|
|
|
|
uint64_t io_this_second;
|
|
|
|
double mb_this_second;
|
|
|
|
struct worker_thread *worker;
|
|
|
|
struct ns_worker_ctx *ns_ctx;
|
2021-02-17 14:50:17 +00:00
|
|
|
uint64_t busy_tsc;
|
|
|
|
uint64_t idle_tsc;
|
|
|
|
uint64_t core_busy_tsc = 0;
|
|
|
|
uint64_t core_idle_tsc = 0;
|
|
|
|
double core_busy_perc = 0;
|
2019-11-05 12:55:11 +00:00
|
|
|
|
|
|
|
if (!isatty(STDOUT_FILENO)) {
|
|
|
|
/* Don't print periodic stats if output is not going
|
|
|
|
* to a terminal.
|
|
|
|
*/
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
io_this_second = 0;
|
2020-09-28 00:40:05 +00:00
|
|
|
TAILQ_FOREACH(worker, &g_workers, link) {
|
2021-02-17 14:50:17 +00:00
|
|
|
busy_tsc = 0;
|
|
|
|
idle_tsc = 0;
|
2020-09-28 00:40:05 +00:00
|
|
|
TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
|
2020-08-14 14:13:14 +00:00
|
|
|
io_this_second += ns_ctx->stats.io_completed - ns_ctx->stats.last_io_completed;
|
|
|
|
ns_ctx->stats.last_io_completed = ns_ctx->stats.io_completed;
|
2021-02-17 14:50:17 +00:00
|
|
|
|
|
|
|
if (g_monitor_perf_cores) {
|
|
|
|
busy_tsc += ns_ctx->stats.busy_tsc - ns_ctx->stats.last_busy_tsc;
|
|
|
|
idle_tsc += ns_ctx->stats.idle_tsc - ns_ctx->stats.last_idle_tsc;
|
|
|
|
ns_ctx->stats.last_busy_tsc = ns_ctx->stats.busy_tsc;
|
|
|
|
ns_ctx->stats.last_idle_tsc = ns_ctx->stats.idle_tsc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (g_monitor_perf_cores) {
|
|
|
|
core_busy_tsc += busy_tsc;
|
|
|
|
core_idle_tsc += idle_tsc;
|
2019-11-05 12:55:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
mb_this_second = (double)io_this_second * g_io_size_bytes / (1024 * 1024);
|
2021-02-17 14:50:17 +00:00
|
|
|
|
|
|
|
printf("%s%9ju IOPS, %8.2f MiB/s", warmup ? "[warmup] " : "", io_this_second, mb_this_second);
|
|
|
|
if (g_monitor_perf_cores) {
|
2021-04-16 06:15:17 +00:00
|
|
|
core_busy_perc = (double)core_busy_tsc / (core_idle_tsc + core_busy_tsc) * 100;
|
2021-02-17 14:50:17 +00:00
|
|
|
printf("%3d Core(s): %6.2f%% Busy", g_num_workers, core_busy_perc);
|
|
|
|
}
|
|
|
|
printf("\r");
|
2019-11-05 12:55:11 +00:00
|
|
|
fflush(stdout);
|
|
|
|
}
|
|
|
|
|
2020-12-04 14:22:54 +00:00
|
|
|
static void
|
|
|
|
perf_dump_transport_statistics(struct worker_thread *worker)
|
|
|
|
{
|
|
|
|
struct ns_worker_ctx *ns_ctx;
|
|
|
|
|
|
|
|
TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
|
|
|
|
if (ns_ctx->entry->fn_table->dump_transport_stats) {
|
|
|
|
ns_ctx->entry->fn_table->dump_transport_stats(worker->lcore, ns_ctx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
static int
|
|
|
|
work_fn(void *arg)
|
|
|
|
{
|
2021-02-19 00:31:50 +00:00
|
|
|
uint64_t tsc_start, tsc_end, tsc_current, tsc_next_print;
|
2020-08-14 14:13:14 +00:00
|
|
|
struct worker_thread *worker = (struct worker_thread *) arg;
|
2015-10-29 22:18:48 +00:00
|
|
|
struct ns_worker_ctx *ns_ctx = NULL;
|
2018-12-24 05:20:07 +00:00
|
|
|
uint32_t unfinished_ns_ctx;
|
2020-08-14 14:13:14 +00:00
|
|
|
bool warmup = false;
|
2020-11-17 12:20:09 +00:00
|
|
|
int rc;
|
2021-02-17 14:50:17 +00:00
|
|
|
int64_t check_rc;
|
|
|
|
uint64_t check_now;
|
2015-09-25 21:09:41 +00:00
|
|
|
|
2017-12-14 22:13:43 +00:00
|
|
|
/* Allocate queue pairs for each namespace. */
|
2020-09-28 00:40:05 +00:00
|
|
|
TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
|
2016-02-29 21:19:02 +00:00
|
|
|
if (init_ns_worker_ctx(ns_ctx) != 0) {
|
|
|
|
printf("ERROR: init_ns_worker_ctx() failed\n");
|
2020-11-17 12:20:09 +00:00
|
|
|
/* Wait on barrier to avoid blocking of successful workers */
|
|
|
|
pthread_barrier_wait(&g_worker_sync_barrier);
|
2016-02-29 21:19:02 +00:00
|
|
|
return 1;
|
|
|
|
}
|
2015-11-03 20:26:19 +00:00
|
|
|
}
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2020-11-17 12:20:09 +00:00
|
|
|
rc = pthread_barrier_wait(&g_worker_sync_barrier);
|
|
|
|
if (rc != 0 && rc != PTHREAD_BARRIER_SERIAL_THREAD) {
|
|
|
|
printf("ERROR: failed to wait on thread sync barrier\n");
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2021-02-19 00:31:50 +00:00
|
|
|
tsc_start = spdk_get_ticks();
|
|
|
|
tsc_current = tsc_start;
|
2019-11-05 12:55:11 +00:00
|
|
|
tsc_next_print = tsc_current + g_tsc_rate;
|
2016-02-29 21:19:02 +00:00
|
|
|
|
2020-08-14 14:13:14 +00:00
|
|
|
if (g_warmup_time_in_sec) {
|
|
|
|
warmup = true;
|
|
|
|
tsc_end = tsc_current + g_warmup_time_in_sec * g_tsc_rate;
|
|
|
|
} else {
|
|
|
|
tsc_end = tsc_current + g_time_in_sec * g_tsc_rate;
|
|
|
|
}
|
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
/* Submit initial I/O for each namespace. */
|
2020-09-28 00:40:05 +00:00
|
|
|
TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
|
2015-10-29 22:18:48 +00:00
|
|
|
submit_io(ns_ctx, g_queue_depth);
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
|
|
|
|
2020-08-13 16:10:07 +00:00
|
|
|
while (spdk_likely(!g_exit)) {
|
2022-09-05 18:22:13 +00:00
|
|
|
bool all_draining = true;
|
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
/*
|
|
|
|
* Check for completed I/O for each controller. A new
|
|
|
|
* I/O will be submitted in the io_complete callback
|
|
|
|
* to replace each I/O that is completed.
|
|
|
|
*/
|
2020-09-28 00:40:05 +00:00
|
|
|
TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
|
2021-02-17 14:50:17 +00:00
|
|
|
check_now = spdk_get_ticks();
|
|
|
|
check_rc = ns_ctx->entry->fn_table->check_io(ns_ctx);
|
|
|
|
|
|
|
|
if (check_rc > 0) {
|
|
|
|
ns_ctx->stats.busy_tsc += check_now - ns_ctx->stats.last_tsc;
|
|
|
|
} else {
|
|
|
|
ns_ctx->stats.idle_tsc += check_now - ns_ctx->stats.last_tsc;
|
|
|
|
}
|
|
|
|
ns_ctx->stats.last_tsc = check_now;
|
2022-09-05 18:22:13 +00:00
|
|
|
|
|
|
|
if (!ns_ctx->is_draining) {
|
|
|
|
all_draining = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (spdk_unlikely(all_draining)) {
|
|
|
|
break;
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
|
|
|
|
2019-11-05 12:55:11 +00:00
|
|
|
tsc_current = spdk_get_ticks();
|
|
|
|
|
2020-11-30 20:02:59 +00:00
|
|
|
if (worker->lcore == g_main_core && tsc_current > tsc_next_print) {
|
2019-11-05 12:55:11 +00:00
|
|
|
tsc_next_print += g_tsc_rate;
|
2020-08-14 14:13:14 +00:00
|
|
|
print_periodic_performance(warmup);
|
2019-11-05 12:55:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (tsc_current > tsc_end) {
|
2020-08-14 14:13:14 +00:00
|
|
|
if (warmup) {
|
2022-01-14 14:52:35 +00:00
|
|
|
/* Update test start and end time, clear statistics */
|
|
|
|
tsc_start = spdk_get_ticks();
|
|
|
|
tsc_end = tsc_start + g_time_in_sec * g_tsc_rate;
|
2020-08-14 14:13:14 +00:00
|
|
|
|
2020-09-28 00:40:05 +00:00
|
|
|
TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
|
2020-08-14 14:13:14 +00:00
|
|
|
memset(&ns_ctx->stats, 0, sizeof(ns_ctx->stats));
|
|
|
|
ns_ctx->stats.min_tsc = UINT64_MAX;
|
2022-04-28 10:31:14 +00:00
|
|
|
spdk_histogram_data_reset(ns_ctx->histogram);
|
2020-08-14 14:13:14 +00:00
|
|
|
}
|
|
|
|
|
2020-11-30 20:02:59 +00:00
|
|
|
if (worker->lcore == g_main_core && isatty(STDOUT_FILENO)) {
|
2020-08-14 14:13:14 +00:00
|
|
|
/* warmup stage prints a longer string to stdout, need to erase it */
|
|
|
|
printf("%c[2K", 27);
|
|
|
|
}
|
|
|
|
|
|
|
|
warmup = false;
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-19 00:31:50 +00:00
|
|
|
/* Capture the actual elapsed time when we break out of the main loop. This will account
|
|
|
|
* for cases where we exit prematurely due to a signal. We only need to capture it on
|
|
|
|
* one core, so use the main core.
|
|
|
|
*/
|
|
|
|
if (worker->lcore == g_main_core) {
|
|
|
|
g_elapsed_time_in_usec = (tsc_current - tsc_start) * SPDK_SEC_TO_USEC / g_tsc_rate;
|
|
|
|
}
|
|
|
|
|
2020-12-04 14:22:54 +00:00
|
|
|
if (g_dump_transport_stats) {
|
|
|
|
pthread_mutex_lock(&g_stats_mutex);
|
|
|
|
perf_dump_transport_statistics(worker);
|
|
|
|
pthread_mutex_unlock(&g_stats_mutex);
|
|
|
|
}
|
|
|
|
|
2018-12-24 05:20:07 +00:00
|
|
|
/* drain the io of each ns_ctx in round robin to make the fairness */
|
|
|
|
do {
|
|
|
|
unfinished_ns_ctx = 0;
|
2020-09-28 00:40:05 +00:00
|
|
|
TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
|
2018-12-24 05:20:07 +00:00
|
|
|
/* first time will enter into this if case */
|
|
|
|
if (!ns_ctx->is_draining) {
|
|
|
|
ns_ctx->is_draining = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ns_ctx->current_queue_depth > 0) {
|
2020-02-10 20:44:37 +00:00
|
|
|
ns_ctx->entry->fn_table->check_io(ns_ctx);
|
2021-04-08 07:37:29 +00:00
|
|
|
if (ns_ctx->current_queue_depth > 0) {
|
2018-12-24 05:20:07 +00:00
|
|
|
unfinished_ns_ctx++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} while (unfinished_ns_ctx > 0);
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2021-04-08 07:37:29 +00:00
|
|
|
TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
|
|
|
|
cleanup_ns_worker_ctx(ns_ctx);
|
|
|
|
}
|
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-06-22 21:35:04 +00:00
|
|
|
static void
|
|
|
|
usage(char *program_name)
|
2015-09-21 15:52:41 +00:00
|
|
|
{
|
2015-10-19 22:57:04 +00:00
|
|
|
printf("%s options", program_name);
|
2020-07-03 16:14:58 +00:00
|
|
|
#if defined(SPDK_CONFIG_URING) || defined(HAVE_LIBAIO)
|
|
|
|
printf(" [Kernel device(s)]...");
|
2019-08-29 15:16:41 +00:00
|
|
|
#endif
|
2015-10-19 22:57:04 +00:00
|
|
|
printf("\n");
|
2020-12-04 12:33:39 +00:00
|
|
|
printf("\t[-b, --allowed-pci-addr <addr> allowed local PCIe device address]\n");
|
2020-07-08 12:20:59 +00:00
|
|
|
printf("\t Example: -b 0000:d8:00.0 -b 0000:d9:00.0\n");
|
2020-12-04 12:33:39 +00:00
|
|
|
printf("\t[-q, --io-depth <val> io depth]\n");
|
|
|
|
printf("\t[-o, --io-size <val> io size in bytes]\n");
|
|
|
|
printf("\t[-O, --io-unit-size io unit size in bytes (4-byte aligned) for SPDK driver. default: same as io size]\n");
|
|
|
|
printf("\t[-P, --num-qpairs <val> number of io queues per namespace. default: 1]\n");
|
|
|
|
printf("\t[-U, --num-unused-qpairs <val> number of unused io queues per controller. default: 0]\n");
|
|
|
|
printf("\t[-w, --io-pattern <pattern> io pattern type, must be one of\n");
|
2015-09-21 15:52:41 +00:00
|
|
|
printf("\t\t(read, write, randread, randwrite, rw, randrw)]\n");
|
2020-12-04 12:33:39 +00:00
|
|
|
printf("\t[-M, --rwmixread <0-100> rwmixread (100 for reads, 0 for writes)]\n");
|
2022-06-02 15:19:06 +00:00
|
|
|
printf("\t[-F, --zipf <theta> use zipf distribution for random I/O]\n");
|
2020-12-04 12:33:39 +00:00
|
|
|
printf("\t[-L, --enable-sw-latency-tracking enable latency tracking via sw, default: disabled]\n");
|
2017-05-23 21:47:14 +00:00
|
|
|
printf("\t\t-L for latency summary, -LL for detailed histogram\n");
|
2020-12-04 12:33:39 +00:00
|
|
|
printf("\t[-l, --enable-ssd-latency-tracking enable latency tracking via ssd (if supported), default: disabled]\n");
|
|
|
|
printf("\t[-t, --time <sec> time in seconds]\n");
|
|
|
|
printf("\t[-a, --warmup-time <sec> warmup time in seconds]\n");
|
|
|
|
printf("\t[-c, --core-mask <mask> core mask for I/O submission/completion.]\n");
|
2018-06-21 09:54:25 +00:00
|
|
|
printf("\t\t(default: 1)\n");
|
2022-09-05 18:22:13 +00:00
|
|
|
printf("\t[-d, --number-ios <val> number of I/O to perform per thread on each namespace. Note: this is additional exit criteria.]\n");
|
|
|
|
printf("\t\t(default: 0 - unlimited)\n");
|
2020-12-04 12:33:39 +00:00
|
|
|
printf("\t[-D, --disable-sq-cmb disable submission queue in controller memory buffer, default: enabled]\n");
|
|
|
|
printf("\t[-H, --enable-tcp-hdgst enable header digest for TCP transport, default: disabled]\n");
|
|
|
|
printf("\t[-I, --enable-tcp-ddgst enable data digest for TCP transport, default: disabled]\n");
|
|
|
|
printf("\t[-N, --no-shst-notification no shutdown notification process for controllers, default: disabled]\n");
|
|
|
|
printf("\t[-r, --transport <fmt> Transport ID for local PCIe NVMe or NVMeoF]\n");
|
2017-01-13 21:50:29 +00:00
|
|
|
printf("\t Format: 'key:value [key:value] ...'\n");
|
|
|
|
printf("\t Keys:\n");
|
2017-03-22 07:37:20 +00:00
|
|
|
printf("\t trtype Transport type (e.g. PCIe, RDMA)\n");
|
2017-01-13 21:50:29 +00:00
|
|
|
printf("\t adrfam Address family (e.g. IPv4, IPv6)\n");
|
2017-03-22 07:37:20 +00:00
|
|
|
printf("\t traddr Transport address (e.g. 0000:04:00.0 for PCIe or 192.168.100.8 for RDMA)\n");
|
2017-01-13 21:50:29 +00:00
|
|
|
printf("\t trsvcid Transport service identifier (e.g. 4420)\n");
|
|
|
|
printf("\t subnqn Subsystem NQN (default: %s)\n", SPDK_NVMF_DISCOVERY_NQN);
|
2021-01-21 06:38:55 +00:00
|
|
|
printf("\t ns NVMe namespace ID (all active namespaces are used by default)\n");
|
2020-12-07 17:06:26 +00:00
|
|
|
printf("\t hostnqn Host NQN\n");
|
2017-03-22 07:37:20 +00:00
|
|
|
printf("\t Example: -r 'trtype:PCIe traddr:0000:04:00.0' for PCIe or\n");
|
|
|
|
printf("\t -r 'trtype:RDMA adrfam:IPv4 traddr:192.168.100.8 trsvcid:4420' for NVMeoF\n");
|
2021-01-21 06:38:55 +00:00
|
|
|
printf("\t Note: can be specified multiple times to test multiple disks/targets.\n");
|
2020-12-04 12:33:39 +00:00
|
|
|
printf("\t[-e, --metadata <fmt> metadata configuration]\n");
|
2017-06-22 01:57:56 +00:00
|
|
|
printf("\t Keys:\n");
|
|
|
|
printf("\t PRACT Protection Information Action bit (PRACT=1 or PRACT=0)\n");
|
|
|
|
printf("\t PRCHK Control of Protection Information Checking (PRCHK=GUARD|REFTAG|APPTAG)\n");
|
|
|
|
printf("\t Example: -e 'PRACT=0,PRCHK=GUARD|REFTAG|APPTAG'\n");
|
|
|
|
printf("\t -e 'PRACT=1,PRCHK=GUARD'\n");
|
2020-12-04 12:33:39 +00:00
|
|
|
printf("\t[-k, --keepalive <ms> keep alive timeout period in millisecond]\n");
|
|
|
|
printf("\t[-s, --hugemem-size <MB> DPDK huge memory size in MB.]\n");
|
|
|
|
printf("\t[-g, --mem-single-seg use single file descriptor for DPDK memory segments]\n");
|
|
|
|
printf("\t[-C, --max-completion-per-poll <val> max completions per poll]\n");
|
2015-11-05 23:45:33 +00:00
|
|
|
printf("\t\t(default: 0 - unlimited)\n");
|
2020-12-04 12:33:39 +00:00
|
|
|
printf("\t[-i, --shmem-grp-id <id> shared memory group ID]\n");
|
2022-06-02 15:19:06 +00:00
|
|
|
printf("\t[-Q, --skip-errors log I/O errors every N times (default: 1)]\n");
|
2019-10-01 02:54:25 +00:00
|
|
|
printf("\t");
|
|
|
|
spdk_log_usage(stdout, "-T");
|
2020-12-04 12:33:39 +00:00
|
|
|
printf("\t[-V, --enable-vmd enable VMD enumeration]\n");
|
|
|
|
printf("\t[-z, --disable-zcopy <impl> disable zero copy send for the given sock implementation. Default for posix impl]\n");
|
|
|
|
printf("\t[-Z, --enable-zcopy <impl> enable zero copy send for the given sock implementation]\n");
|
|
|
|
printf("\t[-A, --buffer-alignment IO buffer alignment. Must be power of 2 and not less than cache line (%u)]\n",
|
2020-09-18 16:42:13 +00:00
|
|
|
SPDK_CACHE_LINE_SIZE);
|
2020-12-04 12:33:39 +00:00
|
|
|
printf("\t[-S, --default-sock-impl <impl> set the default sock impl, e.g. \"posix\"]\n");
|
|
|
|
printf("\t[-m, --cpu-usage display real-time overall cpu usage on used cores]\n");
|
2020-07-03 16:14:58 +00:00
|
|
|
#ifdef SPDK_CONFIG_URING
|
2020-12-04 12:33:39 +00:00
|
|
|
printf("\t[-R, --enable-uring enable using liburing to drive kernel devices (Default: libaio)]\n");
|
2020-07-03 16:14:58 +00:00
|
|
|
#endif
|
2019-03-27 09:00:16 +00:00
|
|
|
#ifdef DEBUG
|
2020-12-04 12:33:39 +00:00
|
|
|
printf("\t[-G, --enable-debug enable debug logging]\n");
|
2019-03-27 09:00:16 +00:00
|
|
|
#else
|
2022-06-02 15:19:06 +00:00
|
|
|
printf("\t[-G, --enable-debug enable debug logging (flag disabled, must reconfigure with --enable-debug)]\n");
|
2021-06-22 14:15:19 +00:00
|
|
|
#endif
|
2020-12-04 14:22:54 +00:00
|
|
|
printf("\t[--transport-stats dump transport statistics]\n");
|
2021-04-14 13:44:48 +00:00
|
|
|
printf("\t[--iova-mode <mode> specify DPDK IOVA mode: va|pa]\n");
|
2021-08-05 12:56:07 +00:00
|
|
|
printf("\t[--io-queue-size <val> size of NVMe IO queue. Default: maximum allowed by controller]\n");
|
2022-05-02 16:08:50 +00:00
|
|
|
printf("\t[--disable-ktls disable Kernel TLS. Only valid for ssl impl. Default for ssl impl]\n");
|
|
|
|
printf("\t[--enable-ktls enable Kernel TLS. Only valid for ssl impl]\n");
|
|
|
|
printf("\t[--tls-version <val> TLS version to use. Only valid for ssl impl. Default: 0 (auto-negotiation)]\n");
|
2022-07-22 14:27:45 +00:00
|
|
|
printf("\t[--psk-key <val> Default PSK KEY in hexadecimal digits, e.g. 1234567890ABCDEF (only applies when sock_impl == ssl)]\n");
|
|
|
|
printf("\t[--psk-identity <val> Default PSK ID, e.g. psk.spdk.io (only applies when sock_impl == ssl)]\n");
|
2022-05-25 01:44:37 +00:00
|
|
|
printf("\t[--zerocopy-threshold <val> data is sent with MSG_ZEROCOPY if size is greater than this val. Default: 0 to disable it]\n");
|
|
|
|
printf("\t[--zerocopy-threshold-sock-impl <impl> specify the sock implementation to set zerocopy_threshold]\n");
|
2022-12-13 20:56:40 +00:00
|
|
|
printf("\t[--transport-tos <val> specify the type of service for RDMA transport. Default: 0 (disabled)]\n");
|
2023-01-06 11:09:55 +00:00
|
|
|
printf("\t[--rdma-srq-size <val> The size of a shared rdma receive queue. Default: 0 (disabled)]\n");
|
2022-09-07 07:53:14 +00:00
|
|
|
printf("\t[--use-every-core for each namespace, I/Os are submitted from all cores]\n");
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
|
|
|
|
2017-06-13 15:48:55 +00:00
|
|
|
static void
|
|
|
|
check_cutoff(void *ctx, uint64_t start, uint64_t end, uint64_t count,
|
|
|
|
uint64_t total, uint64_t so_far)
|
|
|
|
{
|
|
|
|
double so_far_pct;
|
|
|
|
double **cutoff = ctx;
|
|
|
|
|
|
|
|
if (count == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
so_far_pct = (double)so_far / total;
|
|
|
|
while (so_far_pct >= **cutoff && **cutoff > 0) {
|
2017-06-24 00:40:17 +00:00
|
|
|
printf("%9.5f%% : %9.3fus\n", **cutoff * 100, (double)end * 1000 * 1000 / g_tsc_rate);
|
2017-06-13 15:48:55 +00:00
|
|
|
(*cutoff)++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
print_bucket(void *ctx, uint64_t start, uint64_t end, uint64_t count,
|
|
|
|
uint64_t total, uint64_t so_far)
|
|
|
|
{
|
|
|
|
double so_far_pct;
|
|
|
|
|
|
|
|
if (count == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
so_far_pct = (double)so_far * 100 / total;
|
|
|
|
printf("%9.3f - %9.3f: %9.4f%% (%9ju)\n",
|
|
|
|
(double)start * 1000 * 1000 / g_tsc_rate,
|
|
|
|
(double)end * 1000 * 1000 / g_tsc_rate,
|
|
|
|
so_far_pct, count);
|
|
|
|
}
|
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
static void
|
2016-01-18 02:04:48 +00:00
|
|
|
print_performance(void)
|
2015-09-21 15:52:41 +00:00
|
|
|
{
|
2018-01-15 02:05:20 +00:00
|
|
|
uint64_t total_io_completed, total_io_tsc;
|
|
|
|
double io_per_second, mb_per_second, average_latency, min_latency, max_latency;
|
|
|
|
double sum_ave_latency, min_latency_so_far, max_latency_so_far;
|
|
|
|
double total_io_per_second, total_mb_per_second;
|
2016-05-26 17:24:25 +00:00
|
|
|
int ns_count;
|
2015-10-29 22:18:48 +00:00
|
|
|
struct worker_thread *worker;
|
|
|
|
struct ns_worker_ctx *ns_ctx;
|
2019-05-24 22:36:15 +00:00
|
|
|
uint32_t max_strlen;
|
2015-09-21 15:52:41 +00:00
|
|
|
|
|
|
|
total_io_per_second = 0;
|
|
|
|
total_mb_per_second = 0;
|
2016-05-11 11:42:41 +00:00
|
|
|
total_io_completed = 0;
|
2018-01-15 02:05:20 +00:00
|
|
|
total_io_tsc = 0;
|
|
|
|
min_latency_so_far = (double)UINT64_MAX;
|
|
|
|
max_latency_so_far = 0;
|
2016-05-26 17:24:25 +00:00
|
|
|
ns_count = 0;
|
|
|
|
|
2019-05-24 22:36:15 +00:00
|
|
|
max_strlen = 0;
|
2020-09-28 00:40:05 +00:00
|
|
|
TAILQ_FOREACH(worker, &g_workers, link) {
|
|
|
|
TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
|
2019-05-24 22:36:15 +00:00
|
|
|
max_strlen = spdk_max(strlen(ns_ctx->entry->name), max_strlen);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-26 17:24:25 +00:00
|
|
|
printf("========================================================\n");
|
2019-05-24 22:36:15 +00:00
|
|
|
printf("%*s\n", max_strlen + 60, "Latency(us)");
|
|
|
|
printf("%-*s: %10s %10s %10s %10s %10s\n",
|
2019-11-01 16:32:42 +00:00
|
|
|
max_strlen + 13, "Device Information", "IOPS", "MiB/s", "Average", "min", "max");
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2020-09-28 00:40:05 +00:00
|
|
|
TAILQ_FOREACH(worker, &g_workers, link) {
|
|
|
|
TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
|
2020-08-14 14:13:14 +00:00
|
|
|
if (ns_ctx->stats.io_completed != 0) {
|
2021-02-19 00:31:50 +00:00
|
|
|
io_per_second = (double)ns_ctx->stats.io_completed * 1000 * 1000 / g_elapsed_time_in_usec;
|
2017-06-12 20:59:17 +00:00
|
|
|
mb_per_second = io_per_second * g_io_size_bytes / (1024 * 1024);
|
2020-08-14 14:13:14 +00:00
|
|
|
average_latency = ((double)ns_ctx->stats.total_tsc / ns_ctx->stats.io_completed) * 1000 * 1000 /
|
|
|
|
g_tsc_rate;
|
|
|
|
min_latency = (double)ns_ctx->stats.min_tsc * 1000 * 1000 / g_tsc_rate;
|
2018-01-15 02:05:20 +00:00
|
|
|
if (min_latency < min_latency_so_far) {
|
|
|
|
min_latency_so_far = min_latency;
|
|
|
|
}
|
|
|
|
|
2020-08-14 14:13:14 +00:00
|
|
|
max_latency = (double)ns_ctx->stats.max_tsc * 1000 * 1000 / g_tsc_rate;
|
2018-01-15 02:05:20 +00:00
|
|
|
if (max_latency > max_latency_so_far) {
|
|
|
|
max_latency_so_far = max_latency;
|
|
|
|
}
|
|
|
|
|
2019-11-01 16:32:42 +00:00
|
|
|
printf("%-*.*s from core %2u: %10.2f %10.2f %10.2f %10.2f %10.2f\n",
|
2019-05-24 22:36:15 +00:00
|
|
|
max_strlen, max_strlen, ns_ctx->entry->name, worker->lcore,
|
2017-06-12 20:59:17 +00:00
|
|
|
io_per_second, mb_per_second,
|
|
|
|
average_latency, min_latency, max_latency);
|
|
|
|
total_io_per_second += io_per_second;
|
|
|
|
total_mb_per_second += mb_per_second;
|
2020-08-14 14:13:14 +00:00
|
|
|
total_io_completed += ns_ctx->stats.io_completed;
|
|
|
|
total_io_tsc += ns_ctx->stats.total_tsc;
|
2017-06-12 20:59:17 +00:00
|
|
|
ns_count++;
|
|
|
|
}
|
2015-09-25 21:09:41 +00:00
|
|
|
}
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
2016-05-11 11:42:41 +00:00
|
|
|
|
2018-01-15 02:05:20 +00:00
|
|
|
if (ns_count != 0 && total_io_completed) {
|
|
|
|
sum_ave_latency = ((double)total_io_tsc / total_io_completed) * 1000 * 1000 / g_tsc_rate;
|
2017-06-02 20:00:47 +00:00
|
|
|
printf("========================================================\n");
|
2019-05-24 22:36:15 +00:00
|
|
|
printf("%-*s: %10.2f %10.2f %10.2f %10.2f %10.2f\n",
|
2019-11-01 16:32:42 +00:00
|
|
|
max_strlen + 13, "Total", total_io_per_second, total_mb_per_second,
|
2018-01-15 02:05:20 +00:00
|
|
|
sum_ave_latency, min_latency_so_far, max_latency_so_far);
|
2017-06-02 20:00:47 +00:00
|
|
|
printf("\n");
|
|
|
|
}
|
2017-05-15 22:57:26 +00:00
|
|
|
|
2017-06-12 20:59:17 +00:00
|
|
|
if (g_latency_sw_tracking_level == 0 || total_io_completed == 0) {
|
2017-05-15 22:57:26 +00:00
|
|
|
return;
|
|
|
|
}
|
2017-05-23 21:47:14 +00:00
|
|
|
|
2020-09-28 00:40:05 +00:00
|
|
|
TAILQ_FOREACH(worker, &g_workers, link) {
|
|
|
|
TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
|
2017-05-23 21:47:14 +00:00
|
|
|
const double *cutoff = g_latency_cutoffs;
|
|
|
|
|
|
|
|
printf("Summary latency data for %-43.43s from core %u:\n", ns_ctx->entry->name, worker->lcore);
|
|
|
|
printf("=================================================================================\n");
|
|
|
|
|
2017-12-21 18:56:38 +00:00
|
|
|
spdk_histogram_data_iterate(ns_ctx->histogram, check_cutoff, &cutoff);
|
2017-06-13 15:48:55 +00:00
|
|
|
|
2017-05-23 21:47:14 +00:00
|
|
|
printf("\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (g_latency_sw_tracking_level == 1) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-09-28 00:40:05 +00:00
|
|
|
TAILQ_FOREACH(worker, &g_workers, link) {
|
|
|
|
TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
|
2017-05-23 21:47:14 +00:00
|
|
|
printf("Latency histogram for %-43.43s from core %u:\n", ns_ctx->entry->name, worker->lcore);
|
|
|
|
printf("==============================================================================\n");
|
2017-05-15 22:57:26 +00:00
|
|
|
printf(" Range in us Cumulative IO count\n");
|
|
|
|
|
2017-12-21 18:56:38 +00:00
|
|
|
spdk_histogram_data_iterate(ns_ctx->histogram, print_bucket, NULL);
|
2017-05-15 22:57:26 +00:00
|
|
|
printf("\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-18 02:04:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
print_latency_page(struct ctrlr_entry *entry)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
printf("\n");
|
|
|
|
printf("%s\n", entry->name);
|
|
|
|
printf("--------------------------------------------------------\n");
|
|
|
|
|
|
|
|
for (i = 0; i < 32; i++) {
|
2017-12-07 23:23:48 +00:00
|
|
|
if (entry->latency_page->buckets_32us[i]) {
|
2016-01-18 02:04:48 +00:00
|
|
|
printf("Bucket %dus - %dus: %d\n", i * 32, (i + 1) * 32, entry->latency_page->buckets_32us[i]);
|
2017-12-07 23:23:48 +00:00
|
|
|
}
|
2016-01-18 02:04:48 +00:00
|
|
|
}
|
|
|
|
for (i = 0; i < 31; i++) {
|
2017-12-07 23:23:48 +00:00
|
|
|
if (entry->latency_page->buckets_1ms[i]) {
|
2016-01-18 02:04:48 +00:00
|
|
|
printf("Bucket %dms - %dms: %d\n", i + 1, i + 2, entry->latency_page->buckets_1ms[i]);
|
2017-12-07 23:23:48 +00:00
|
|
|
}
|
2016-01-18 02:04:48 +00:00
|
|
|
}
|
|
|
|
for (i = 0; i < 31; i++) {
|
|
|
|
if (entry->latency_page->buckets_32ms[i])
|
|
|
|
printf("Bucket %dms - %dms: %d\n", (i + 1) * 32, (i + 2) * 32,
|
|
|
|
entry->latency_page->buckets_32ms[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-02-08 23:06:31 +00:00
|
|
|
print_latency_statistics(const char *op_name, enum spdk_nvme_intel_log_page log_page)
|
2016-01-18 02:04:48 +00:00
|
|
|
{
|
|
|
|
struct ctrlr_entry *ctrlr;
|
|
|
|
|
|
|
|
printf("%s Latency Statistics:\n", op_name);
|
|
|
|
printf("========================================================\n");
|
2020-09-28 00:40:05 +00:00
|
|
|
TAILQ_FOREACH(ctrlr, &g_controllers, link) {
|
2016-02-10 18:26:12 +00:00
|
|
|
if (spdk_nvme_ctrlr_is_log_page_supported(ctrlr->ctrlr, log_page)) {
|
|
|
|
if (spdk_nvme_ctrlr_cmd_get_log_page(ctrlr->ctrlr, log_page, SPDK_NVME_GLOBAL_NS_TAG,
|
2016-12-14 18:11:02 +00:00
|
|
|
ctrlr->latency_page, sizeof(struct spdk_nvme_intel_rw_latency_page), 0,
|
2016-02-10 18:26:12 +00:00
|
|
|
enable_latency_tracking_complete,
|
|
|
|
NULL)) {
|
2016-01-18 02:04:48 +00:00
|
|
|
printf("nvme_ctrlr_cmd_get_log_page() failed\n");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
g_outstanding_commands++;
|
|
|
|
} else {
|
|
|
|
printf("Controller %s: %s latency statistics not supported\n", ctrlr->name, op_name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
while (g_outstanding_commands) {
|
2020-09-28 00:40:05 +00:00
|
|
|
TAILQ_FOREACH(ctrlr, &g_controllers, link) {
|
2016-02-10 18:26:12 +00:00
|
|
|
spdk_nvme_ctrlr_process_admin_completions(ctrlr->ctrlr);
|
2016-01-18 02:04:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-28 00:40:05 +00:00
|
|
|
TAILQ_FOREACH(ctrlr, &g_controllers, link) {
|
2016-02-10 18:26:12 +00:00
|
|
|
if (spdk_nvme_ctrlr_is_log_page_supported(ctrlr->ctrlr, log_page)) {
|
2016-01-18 02:04:48 +00:00
|
|
|
print_latency_page(ctrlr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
printf("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
print_stats(void)
|
|
|
|
{
|
|
|
|
print_performance();
|
2017-05-15 22:57:26 +00:00
|
|
|
if (g_latency_ssd_tracking_enable) {
|
2016-01-18 02:04:48 +00:00
|
|
|
if (g_rw_percentage != 0) {
|
2016-02-08 23:06:31 +00:00
|
|
|
print_latency_statistics("Read", SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY);
|
2016-01-18 02:04:48 +00:00
|
|
|
}
|
|
|
|
if (g_rw_percentage != 100) {
|
2016-02-08 23:06:31 +00:00
|
|
|
print_latency_statistics("Write", SPDK_NVME_INTEL_LOG_WRITE_CMD_LATENCY);
|
2016-01-18 02:04:48 +00:00
|
|
|
}
|
|
|
|
}
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
|
|
|
|
2017-01-19 21:45:55 +00:00
|
|
|
static void
|
|
|
|
unregister_trids(void)
|
|
|
|
{
|
|
|
|
struct trid_entry *trid_entry, *tmp;
|
|
|
|
|
|
|
|
TAILQ_FOREACH_SAFE(trid_entry, &g_trid_list, tailq, tmp) {
|
2018-12-12 16:07:33 +00:00
|
|
|
TAILQ_REMOVE(&g_trid_list, trid_entry, tailq);
|
2017-01-19 21:45:55 +00:00
|
|
|
free(trid_entry);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
add_trid(const char *trid_str)
|
|
|
|
{
|
|
|
|
struct trid_entry *trid_entry;
|
|
|
|
struct spdk_nvme_transport_id *trid;
|
2018-08-03 18:51:28 +00:00
|
|
|
char *ns;
|
2020-12-07 17:06:26 +00:00
|
|
|
char *hostnqn;
|
2017-01-19 21:45:55 +00:00
|
|
|
|
|
|
|
trid_entry = calloc(1, sizeof(*trid_entry));
|
|
|
|
if (trid_entry == NULL) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
trid = &trid_entry->trid;
|
|
|
|
trid->trtype = SPDK_NVME_TRANSPORT_PCIE;
|
|
|
|
snprintf(trid->subnqn, sizeof(trid->subnqn), "%s", SPDK_NVMF_DISCOVERY_NQN);
|
|
|
|
|
|
|
|
if (spdk_nvme_transport_id_parse(trid, trid_str) != 0) {
|
|
|
|
fprintf(stderr, "Invalid transport ID format '%s'\n", trid_str);
|
|
|
|
free(trid_entry);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2018-08-03 18:51:28 +00:00
|
|
|
ns = strcasestr(trid_str, "ns:");
|
|
|
|
if (ns) {
|
|
|
|
char nsid_str[6]; /* 5 digits maximum in an nsid */
|
|
|
|
int len;
|
|
|
|
int nsid;
|
|
|
|
|
|
|
|
ns += 3;
|
|
|
|
|
|
|
|
len = strcspn(ns, " \t\n");
|
|
|
|
if (len > 5) {
|
|
|
|
fprintf(stderr, "NVMe namespace IDs must be 5 digits or less\n");
|
|
|
|
free(trid_entry);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(nsid_str, ns, len);
|
|
|
|
nsid_str[len] = '\0';
|
|
|
|
|
2019-01-23 00:31:40 +00:00
|
|
|
nsid = spdk_strtol(nsid_str, 10);
|
2018-08-03 18:51:28 +00:00
|
|
|
if (nsid <= 0 || nsid > 65535) {
|
|
|
|
fprintf(stderr, "NVMe namespace IDs must be less than 65536 and greater than 0\n");
|
|
|
|
free(trid_entry);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
trid_entry->nsid = (uint16_t)nsid;
|
|
|
|
}
|
|
|
|
|
2020-12-07 17:06:26 +00:00
|
|
|
hostnqn = strcasestr(trid_str, "hostnqn:");
|
|
|
|
if (hostnqn) {
|
|
|
|
size_t len;
|
|
|
|
|
|
|
|
hostnqn += strlen("hostnqn:");
|
|
|
|
|
|
|
|
len = strcspn(hostnqn, " \t\n");
|
|
|
|
if (len > (sizeof(trid_entry->hostnqn) - 1)) {
|
|
|
|
fprintf(stderr, "Host NQN is too long\n");
|
|
|
|
free(trid_entry);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(trid_entry->hostnqn, hostnqn, len);
|
|
|
|
trid_entry->hostnqn[len] = '\0';
|
|
|
|
}
|
|
|
|
|
2017-01-19 21:45:55 +00:00
|
|
|
TAILQ_INSERT_TAIL(&g_trid_list, trid_entry, tailq);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-07-08 12:20:59 +00:00
|
|
|
static int
|
2021-04-14 12:46:14 +00:00
|
|
|
add_allowed_pci_device(const char *bdf_str, struct spdk_env_opts *env_opts)
|
2020-07-08 12:20:59 +00:00
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
2021-04-14 12:46:14 +00:00
|
|
|
if (env_opts->num_pci_addr >= MAX_ALLOWED_PCI_DEVICE_NUM) {
|
2020-07-08 12:20:59 +00:00
|
|
|
fprintf(stderr, "Currently we only support allowed PCI device num=%d\n",
|
|
|
|
MAX_ALLOWED_PCI_DEVICE_NUM);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2021-04-14 12:46:14 +00:00
|
|
|
rc = spdk_pci_addr_parse(&env_opts->pci_allowed[env_opts->num_pci_addr], bdf_str);
|
2020-07-08 12:20:59 +00:00
|
|
|
if (rc < 0) {
|
|
|
|
fprintf(stderr, "Failed to parse the given bdf_str=%s\n", bdf_str);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2021-04-14 12:46:14 +00:00
|
|
|
env_opts->num_pci_addr++;
|
2020-07-08 12:20:59 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-12-20 07:32:35 +00:00
|
|
|
static size_t
|
|
|
|
parse_next_key(const char **str, char *key, char *val, size_t key_buf_size,
|
|
|
|
size_t val_buf_size)
|
2017-06-22 01:57:56 +00:00
|
|
|
{
|
|
|
|
const char *sep;
|
2018-12-20 07:32:35 +00:00
|
|
|
const char *separator = ", \t\n";
|
|
|
|
size_t key_len, val_len;
|
2017-06-22 01:57:56 +00:00
|
|
|
|
2018-12-20 07:32:35 +00:00
|
|
|
*str += strspn(*str, separator);
|
2017-06-22 01:57:56 +00:00
|
|
|
|
2018-12-20 07:32:35 +00:00
|
|
|
sep = strchr(*str, '=');
|
2017-06-22 01:57:56 +00:00
|
|
|
if (!sep) {
|
2018-12-20 07:32:35 +00:00
|
|
|
fprintf(stderr, "Key without '=' separator\n");
|
2017-06-22 01:57:56 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-12-20 07:32:35 +00:00
|
|
|
key_len = sep - *str;
|
|
|
|
if (key_len >= key_buf_size) {
|
|
|
|
fprintf(stderr, "Key length %zu is greater than maximum allowed %zu\n",
|
|
|
|
key_len, key_buf_size - 1);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(key, *str, key_len);
|
|
|
|
key[key_len] = '\0';
|
|
|
|
|
|
|
|
*str += key_len + 1; /* Skip key */
|
|
|
|
val_len = strcspn(*str, separator);
|
|
|
|
if (val_len == 0) {
|
|
|
|
fprintf(stderr, "Key without value\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (val_len >= val_buf_size) {
|
|
|
|
fprintf(stderr, "Value length %zu is greater than maximum allowed %zu\n",
|
|
|
|
val_len, val_buf_size - 1);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(val, *str, val_len);
|
|
|
|
val[val_len] = '\0';
|
|
|
|
|
|
|
|
*str += val_len;
|
|
|
|
|
|
|
|
return val_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
parse_metadata(const char *metacfg_str)
|
|
|
|
{
|
|
|
|
const char *str;
|
|
|
|
size_t val_len;
|
|
|
|
char key[32];
|
|
|
|
char val[1024];
|
|
|
|
|
|
|
|
if (metacfg_str == NULL) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
str = metacfg_str;
|
|
|
|
|
|
|
|
while (*str != '\0') {
|
|
|
|
val_len = parse_next_key(&str, key, val, sizeof(key), sizeof(val));
|
|
|
|
if (val_len == 0) {
|
|
|
|
fprintf(stderr, "Failed to parse metadata\n");
|
|
|
|
return -EINVAL;
|
2017-06-22 01:57:56 +00:00
|
|
|
}
|
2018-12-20 07:32:35 +00:00
|
|
|
|
|
|
|
if (strcmp(key, "PRACT") == 0) {
|
|
|
|
if (*val == '1') {
|
2019-09-11 05:23:36 +00:00
|
|
|
g_metacfg_pract_flag = SPDK_NVME_IO_FLAGS_PRACT;
|
2018-12-20 07:32:35 +00:00
|
|
|
}
|
|
|
|
} else if (strcmp(key, "PRCHK") == 0) {
|
|
|
|
if (strstr(val, "GUARD") != NULL) {
|
|
|
|
g_metacfg_prchk_flags |= SPDK_NVME_IO_FLAGS_PRCHK_GUARD;
|
|
|
|
}
|
|
|
|
if (strstr(val, "REFTAG") != NULL) {
|
|
|
|
g_metacfg_prchk_flags |= SPDK_NVME_IO_FLAGS_PRCHK_REFTAG;
|
|
|
|
}
|
|
|
|
if (strstr(val, "APPTAG") != NULL) {
|
|
|
|
g_metacfg_prchk_flags |= SPDK_NVME_IO_FLAGS_PRCHK_APPTAG;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
fprintf(stderr, "Unknown key '%s'\n", key);
|
2017-06-22 01:57:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-09-05 18:22:13 +00:00
|
|
|
#define PERF_GETOPT_SHORT "a:b:c:d:e:gi:lmo:q:r:k:s:t:w:z:A:C:DF:GHILM:NO:P:Q:RS:T:U:VZ:"
|
2020-12-04 12:33:39 +00:00
|
|
|
|
|
|
|
static const struct option g_perf_cmdline_opts[] = {
|
|
|
|
#define PERF_WARMUP_TIME 'a'
|
|
|
|
{"warmup-time", required_argument, NULL, PERF_WARMUP_TIME},
|
|
|
|
#define PERF_ALLOWED_PCI_ADDR 'b'
|
|
|
|
{"allowed-pci-addr", required_argument, NULL, PERF_ALLOWED_PCI_ADDR},
|
|
|
|
#define PERF_CORE_MASK 'c'
|
|
|
|
{"core-mask", required_argument, NULL, PERF_CORE_MASK},
|
|
|
|
#define PERF_METADATA 'e'
|
|
|
|
{"metadata", required_argument, NULL, PERF_METADATA},
|
|
|
|
#define PERF_MEM_SINGL_SEG 'g'
|
|
|
|
{"mem-single-seg", no_argument, NULL, PERF_MEM_SINGL_SEG},
|
|
|
|
#define PERF_SHMEM_GROUP_ID 'i'
|
|
|
|
{"shmem-grp-id", required_argument, NULL, PERF_SHMEM_GROUP_ID},
|
|
|
|
#define PERF_ENABLE_SSD_LATENCY_TRACING 'l'
|
|
|
|
{"enable-ssd-latency-tracking", no_argument, NULL, PERF_ENABLE_SSD_LATENCY_TRACING},
|
|
|
|
#define PERF_CPU_USAGE 'm'
|
|
|
|
{"cpu-usage", no_argument, NULL, PERF_CPU_USAGE},
|
|
|
|
#define PERF_IO_SIZE 'o'
|
|
|
|
{"io-size", required_argument, NULL, PERF_IO_SIZE},
|
|
|
|
#define PERF_IO_DEPTH 'q'
|
|
|
|
{"io-depth", required_argument, NULL, PERF_IO_DEPTH},
|
|
|
|
#define PERF_TRANSPORT 'r'
|
|
|
|
{"transport", required_argument, NULL, PERF_TRANSPORT},
|
|
|
|
#define PERF_KEEPALIVE 'k'
|
|
|
|
{"keepalive", required_argument, NULL, PERF_KEEPALIVE},
|
|
|
|
#define PERF_HUGEMEM_SIZE 's'
|
|
|
|
{"hugemem-size", required_argument, NULL, PERF_HUGEMEM_SIZE},
|
|
|
|
#define PERF_TIME 't'
|
|
|
|
{"time", required_argument, NULL, PERF_TIME},
|
2022-09-05 18:22:13 +00:00
|
|
|
#define PERF_NUMBER_IOS 'd'
|
|
|
|
{"number-ios", required_argument, NULL, PERF_NUMBER_IOS},
|
2020-12-04 12:33:39 +00:00
|
|
|
#define PERF_IO_PATTERN 'w'
|
|
|
|
{"io-pattern", required_argument, NULL, PERF_IO_PATTERN},
|
|
|
|
#define PERF_DISABLE_ZCOPY 'z'
|
|
|
|
{"disable-zcopy", required_argument, NULL, PERF_DISABLE_ZCOPY},
|
|
|
|
#define PERF_BUFFER_ALIGNMENT 'A'
|
|
|
|
{"buffer-alignment", required_argument, NULL, PERF_BUFFER_ALIGNMENT},
|
|
|
|
#define PERF_MAX_COMPLETIONS_PER_POLL 'C'
|
|
|
|
{"max-completion-per-poll", required_argument, NULL, PERF_MAX_COMPLETIONS_PER_POLL},
|
|
|
|
#define PERF_DISABLE_SQ_CMB 'D'
|
|
|
|
{"disable-sq-cmb", no_argument, NULL, PERF_DISABLE_SQ_CMB},
|
2021-05-07 00:01:23 +00:00
|
|
|
#define PERF_ZIPF 'F'
|
|
|
|
{"zipf", required_argument, NULL, PERF_ZIPF},
|
2020-12-04 12:33:39 +00:00
|
|
|
#define PERF_ENABLE_DEBUG 'G'
|
|
|
|
{"enable-debug", no_argument, NULL, PERF_ENABLE_DEBUG},
|
|
|
|
#define PERF_ENABLE_TCP_HDGST 'H'
|
|
|
|
{"enable-tcp-hdgst", no_argument, NULL, PERF_ENABLE_TCP_HDGST},
|
|
|
|
#define PERF_ENABLE_TCP_DDGST 'I'
|
|
|
|
{"enable-tcp-ddgst", no_argument, NULL, PERF_ENABLE_TCP_DDGST},
|
|
|
|
#define PERF_ENABLE_SW_LATENCY_TRACING 'L'
|
|
|
|
{"enable-sw-latency-tracking", no_argument, NULL, PERF_ENABLE_SW_LATENCY_TRACING},
|
|
|
|
#define PERF_RW_MIXREAD 'M'
|
|
|
|
{"rwmixread", required_argument, NULL, PERF_RW_MIXREAD},
|
|
|
|
#define PERF_NO_SHST_NOTIFICATION 'N'
|
|
|
|
{"no-shst-notification", no_argument, NULL, PERF_NO_SHST_NOTIFICATION},
|
|
|
|
#define PERF_IO_UNIT_SIZE 'O'
|
|
|
|
{"io-unit-size", required_argument, NULL, PERF_IO_UNIT_SIZE},
|
|
|
|
#define PERF_IO_QUEUES_PER_NS 'P'
|
|
|
|
{"num-qpairs", required_argument, NULL, PERF_IO_QUEUES_PER_NS},
|
2021-11-25 01:40:57 +00:00
|
|
|
#define PERF_SKIP_ERRORS 'Q'
|
|
|
|
{"skip-errors", required_argument, NULL, PERF_SKIP_ERRORS},
|
2020-12-04 12:33:39 +00:00
|
|
|
#define PERF_ENABLE_URING 'R'
|
|
|
|
{"enable-uring", no_argument, NULL, PERF_ENABLE_URING},
|
|
|
|
#define PERF_DEFAULT_SOCK_IMPL 'S'
|
|
|
|
{"default-sock-impl", required_argument, NULL, PERF_DEFAULT_SOCK_IMPL},
|
|
|
|
#define PERF_LOG_FLAG 'T'
|
|
|
|
{"logflag", required_argument, NULL, PERF_LOG_FLAG},
|
|
|
|
#define PERF_NUM_UNUSED_IO_QPAIRS 'U'
|
|
|
|
{"num-unused-qpairs", required_argument, NULL, PERF_NUM_UNUSED_IO_QPAIRS},
|
|
|
|
#define PERF_ENABLE_VMD 'V'
|
|
|
|
{"enable-vmd", no_argument, NULL, PERF_ENABLE_VMD},
|
|
|
|
#define PERF_ENABLE_ZCOPY 'Z'
|
|
|
|
{"enable-zcopy", required_argument, NULL, PERF_ENABLE_ZCOPY},
|
2020-12-04 14:22:54 +00:00
|
|
|
#define PERF_TRANSPORT_STATISTICS 257
|
|
|
|
{"transport-stats", no_argument, NULL, PERF_TRANSPORT_STATISTICS},
|
2021-04-14 13:44:48 +00:00
|
|
|
#define PERF_IOVA_MODE 258
|
|
|
|
{"iova-mode", required_argument, NULL, PERF_IOVA_MODE},
|
2021-08-05 12:56:07 +00:00
|
|
|
#define PERF_IO_QUEUE_SIZE 259
|
|
|
|
{"io-queue-size", required_argument, NULL, PERF_IO_QUEUE_SIZE},
|
2022-05-02 16:08:50 +00:00
|
|
|
#define PERF_DISABLE_KTLS 260
|
|
|
|
{"disable-ktls", no_argument, NULL, PERF_DISABLE_KTLS},
|
|
|
|
#define PERF_ENABLE_KTLS 261
|
|
|
|
{"enable-ktls", no_argument, NULL, PERF_ENABLE_KTLS},
|
|
|
|
#define PERF_TLS_VERSION 262
|
|
|
|
{"tls-version", required_argument, NULL, PERF_TLS_VERSION},
|
2022-07-22 14:27:45 +00:00
|
|
|
#define PERF_PSK_KEY 263
|
|
|
|
{"psk-key", required_argument, NULL, PERF_PSK_KEY},
|
|
|
|
#define PERF_PSK_IDENTITY 264
|
|
|
|
{"psk-identity ", required_argument, NULL, PERF_PSK_IDENTITY},
|
2022-05-25 01:44:37 +00:00
|
|
|
#define PERF_ZEROCOPY_THRESHOLD 265
|
|
|
|
{"zerocopy-threshold", required_argument, NULL, PERF_ZEROCOPY_THRESHOLD},
|
|
|
|
#define PERF_SOCK_IMPL 266
|
|
|
|
{"zerocopy-threshold-sock-impl", required_argument, NULL, PERF_SOCK_IMPL},
|
2022-12-13 20:56:40 +00:00
|
|
|
#define PERF_TRANSPORT_TOS 267
|
|
|
|
{"transport-tos", required_argument, NULL, PERF_TRANSPORT_TOS},
|
2023-01-06 11:09:55 +00:00
|
|
|
#define PERF_RDMA_SRQ_SIZE 268
|
|
|
|
{"rdma-srq-size", required_argument, NULL, PERF_RDMA_SRQ_SIZE},
|
2022-09-07 07:53:14 +00:00
|
|
|
#define PERF_USE_EVERY_CORE 269
|
|
|
|
{"use-every-core", no_argument, NULL, PERF_USE_EVERY_CORE},
|
2020-12-04 12:33:39 +00:00
|
|
|
/* Should be the last element */
|
|
|
|
{0, 0, 0, 0}
|
|
|
|
};
|
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
static int
|
2021-04-14 12:46:14 +00:00
|
|
|
parse_args(int argc, char **argv, struct spdk_env_opts *env_opts)
|
2015-09-21 15:52:41 +00:00
|
|
|
{
|
2020-12-04 12:33:39 +00:00
|
|
|
int op, long_idx;
|
2019-01-23 00:31:40 +00:00
|
|
|
long int val;
|
2022-09-05 18:22:13 +00:00
|
|
|
long long int val2;
|
2019-10-01 02:54:25 +00:00
|
|
|
int rc;
|
2021-05-07 00:01:23 +00:00
|
|
|
char *endptr;
|
2022-07-22 14:27:45 +00:00
|
|
|
bool ssl_used = false;
|
|
|
|
char *sock_impl = "posix";
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2020-12-04 12:33:39 +00:00
|
|
|
while ((op = getopt_long(argc, argv, PERF_GETOPT_SHORT, g_perf_cmdline_opts, &long_idx)) != -1) {
|
2015-09-21 15:52:41 +00:00
|
|
|
switch (op) {
|
2020-12-04 12:33:39 +00:00
|
|
|
case PERF_WARMUP_TIME:
|
|
|
|
case PERF_BUFFER_ALIGNMENT:
|
|
|
|
case PERF_SHMEM_GROUP_ID:
|
|
|
|
case PERF_MAX_COMPLETIONS_PER_POLL:
|
|
|
|
case PERF_IO_QUEUES_PER_NS:
|
|
|
|
case PERF_IO_SIZE:
|
|
|
|
case PERF_IO_UNIT_SIZE:
|
|
|
|
case PERF_IO_DEPTH:
|
|
|
|
case PERF_KEEPALIVE:
|
|
|
|
case PERF_HUGEMEM_SIZE:
|
|
|
|
case PERF_TIME:
|
|
|
|
case PERF_RW_MIXREAD:
|
|
|
|
case PERF_NUM_UNUSED_IO_QPAIRS:
|
2021-11-25 01:40:57 +00:00
|
|
|
case PERF_SKIP_ERRORS:
|
2021-08-05 12:56:07 +00:00
|
|
|
case PERF_IO_QUEUE_SIZE:
|
2022-05-25 01:44:37 +00:00
|
|
|
case PERF_ZEROCOPY_THRESHOLD:
|
2023-01-06 11:09:55 +00:00
|
|
|
case PERF_RDMA_SRQ_SIZE:
|
2019-01-23 00:31:40 +00:00
|
|
|
val = spdk_strtol(optarg, 10);
|
|
|
|
if (val < 0) {
|
|
|
|
fprintf(stderr, "Converting a string to integer failed\n");
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
switch (op) {
|
2020-12-04 12:33:39 +00:00
|
|
|
case PERF_WARMUP_TIME:
|
2020-08-14 14:13:14 +00:00
|
|
|
g_warmup_time_in_sec = val;
|
|
|
|
break;
|
2020-12-04 12:33:39 +00:00
|
|
|
case PERF_SHMEM_GROUP_ID:
|
2021-04-14 12:46:14 +00:00
|
|
|
env_opts->shm_id = val;
|
2019-01-23 00:31:40 +00:00
|
|
|
break;
|
2020-12-04 12:33:39 +00:00
|
|
|
case PERF_MAX_COMPLETIONS_PER_POLL:
|
2019-01-23 00:31:40 +00:00
|
|
|
g_max_completions = val;
|
|
|
|
break;
|
2020-12-04 12:33:39 +00:00
|
|
|
case PERF_IO_QUEUES_PER_NS:
|
2017-12-14 22:13:43 +00:00
|
|
|
g_nr_io_queues_per_ns = val;
|
|
|
|
break;
|
2020-12-04 12:33:39 +00:00
|
|
|
case PERF_IO_SIZE:
|
2019-01-23 00:31:40 +00:00
|
|
|
g_io_size_bytes = val;
|
|
|
|
break;
|
2020-12-04 12:33:39 +00:00
|
|
|
case PERF_IO_UNIT_SIZE:
|
2020-07-29 08:57:10 +00:00
|
|
|
g_io_unit_size = val;
|
|
|
|
break;
|
2020-12-04 12:33:39 +00:00
|
|
|
case PERF_IO_DEPTH:
|
2019-01-23 00:31:40 +00:00
|
|
|
g_queue_depth = val;
|
|
|
|
break;
|
2020-12-04 12:33:39 +00:00
|
|
|
case PERF_KEEPALIVE:
|
2019-01-25 11:21:53 +00:00
|
|
|
g_keep_alive_timeout_in_ms = val;
|
|
|
|
break;
|
2020-12-04 12:33:39 +00:00
|
|
|
case PERF_HUGEMEM_SIZE:
|
2021-04-14 12:46:14 +00:00
|
|
|
env_opts->mem_size = val;
|
2019-01-23 00:31:40 +00:00
|
|
|
break;
|
2020-12-04 12:33:39 +00:00
|
|
|
case PERF_TIME:
|
2019-01-23 00:31:40 +00:00
|
|
|
g_time_in_sec = val;
|
|
|
|
break;
|
2020-12-04 12:33:39 +00:00
|
|
|
case PERF_RW_MIXREAD:
|
2019-01-23 00:31:40 +00:00
|
|
|
g_rw_percentage = val;
|
2020-02-11 21:59:30 +00:00
|
|
|
g_mix_specified = true;
|
2019-01-23 00:31:40 +00:00
|
|
|
break;
|
2021-11-25 01:40:57 +00:00
|
|
|
case PERF_SKIP_ERRORS:
|
2021-01-25 16:10:05 +00:00
|
|
|
g_quiet_count = val;
|
|
|
|
break;
|
2020-12-04 12:33:39 +00:00
|
|
|
case PERF_NUM_UNUSED_IO_QPAIRS:
|
2017-12-26 18:40:00 +00:00
|
|
|
g_nr_unused_io_queues = val;
|
|
|
|
break;
|
2020-12-04 12:33:39 +00:00
|
|
|
case PERF_BUFFER_ALIGNMENT:
|
2020-09-18 16:42:13 +00:00
|
|
|
g_io_align = val;
|
|
|
|
if (!spdk_u32_is_pow2(g_io_align) || g_io_align < SPDK_CACHE_LINE_SIZE) {
|
|
|
|
fprintf(stderr, "Wrong alignment %u. Must be power of 2 and not less than cache lize (%u)\n",
|
|
|
|
g_io_align, SPDK_CACHE_LINE_SIZE);
|
|
|
|
usage(argv[0]);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
g_io_align_specified = true;
|
|
|
|
break;
|
2021-08-05 12:56:07 +00:00
|
|
|
case PERF_IO_QUEUE_SIZE:
|
|
|
|
g_io_queue_size = val;
|
|
|
|
break;
|
2022-05-25 01:44:37 +00:00
|
|
|
case PERF_ZEROCOPY_THRESHOLD:
|
|
|
|
g_sock_zcopy_threshold = val;
|
2023-01-06 11:09:55 +00:00
|
|
|
break;
|
|
|
|
case PERF_RDMA_SRQ_SIZE:
|
|
|
|
g_rdma_srq_size = val;
|
|
|
|
break;
|
2019-01-23 00:31:40 +00:00
|
|
|
}
|
|
|
|
break;
|
2022-09-05 18:22:13 +00:00
|
|
|
case PERF_NUMBER_IOS:
|
|
|
|
val2 = spdk_strtoll(optarg, 10);
|
|
|
|
if (val2 < 0) {
|
|
|
|
fprintf(stderr, "Converting a string to integer failed\n");
|
|
|
|
return val2;
|
|
|
|
}
|
|
|
|
|
|
|
|
g_number_ios = (uint64_t)val2;
|
|
|
|
break;
|
2021-05-07 00:01:23 +00:00
|
|
|
case PERF_ZIPF:
|
|
|
|
errno = 0;
|
|
|
|
g_zipf_theta = strtod(optarg, &endptr);
|
|
|
|
if (errno || optarg == endptr || g_zipf_theta < 0) {
|
|
|
|
fprintf(stderr, "Illegal zipf theta value %s\n", optarg);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
break;
|
2020-12-04 12:33:39 +00:00
|
|
|
case PERF_ALLOWED_PCI_ADDR:
|
2021-04-14 12:46:14 +00:00
|
|
|
if (add_allowed_pci_device(optarg, env_opts)) {
|
2020-07-08 12:20:59 +00:00
|
|
|
usage(argv[0]);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
break;
|
2020-12-04 12:33:39 +00:00
|
|
|
case PERF_CORE_MASK:
|
2021-04-14 12:46:14 +00:00
|
|
|
env_opts->core_mask = optarg;
|
2015-09-25 21:09:41 +00:00
|
|
|
break;
|
2020-12-04 12:33:39 +00:00
|
|
|
case PERF_METADATA:
|
2017-06-22 01:57:56 +00:00
|
|
|
if (parse_metadata(optarg)) {
|
|
|
|
usage(argv[0]);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
break;
|
2020-12-04 12:33:39 +00:00
|
|
|
case PERF_MEM_SINGL_SEG:
|
2021-04-14 12:46:14 +00:00
|
|
|
env_opts->hugepage_single_segments = true;
|
2020-10-13 06:44:18 +00:00
|
|
|
break;
|
2020-12-04 12:33:39 +00:00
|
|
|
case PERF_ENABLE_SSD_LATENCY_TRACING:
|
2017-05-15 22:57:26 +00:00
|
|
|
g_latency_ssd_tracking_enable = true;
|
2016-01-18 02:04:48 +00:00
|
|
|
break;
|
2020-12-04 12:33:39 +00:00
|
|
|
case PERF_CPU_USAGE:
|
2021-02-17 14:50:17 +00:00
|
|
|
g_monitor_perf_cores = true;
|
|
|
|
break;
|
2020-12-04 12:33:39 +00:00
|
|
|
case PERF_TRANSPORT:
|
2017-01-19 21:45:55 +00:00
|
|
|
if (add_trid(optarg)) {
|
|
|
|
usage(argv[0]);
|
|
|
|
return 1;
|
|
|
|
}
|
2016-12-01 03:11:37 +00:00
|
|
|
break;
|
2020-12-04 12:33:39 +00:00
|
|
|
case PERF_IO_PATTERN:
|
2020-02-11 21:59:30 +00:00
|
|
|
g_workload_type = optarg;
|
2015-09-21 15:52:41 +00:00
|
|
|
break;
|
2020-12-04 12:33:39 +00:00
|
|
|
case PERF_DISABLE_SQ_CMB:
|
2017-06-22 01:57:56 +00:00
|
|
|
g_disable_sq_cmb = 1;
|
|
|
|
break;
|
2020-12-04 12:33:39 +00:00
|
|
|
case PERF_ENABLE_DEBUG:
|
2019-03-27 09:00:16 +00:00
|
|
|
#ifndef DEBUG
|
|
|
|
fprintf(stderr, "%s must be configured with --enable-debug for -G flag\n",
|
|
|
|
argv[0]);
|
|
|
|
usage(argv[0]);
|
|
|
|
return 1;
|
|
|
|
#else
|
|
|
|
spdk_log_set_flag("nvme");
|
|
|
|
spdk_log_set_print_level(SPDK_LOG_DEBUG);
|
|
|
|
break;
|
|
|
|
#endif
|
2020-12-04 12:33:39 +00:00
|
|
|
case PERF_ENABLE_TCP_HDGST:
|
2018-11-30 01:46:55 +00:00
|
|
|
g_header_digest = 1;
|
|
|
|
break;
|
2020-12-04 12:33:39 +00:00
|
|
|
case PERF_ENABLE_TCP_DDGST:
|
2018-11-30 01:46:55 +00:00
|
|
|
g_data_digest = 1;
|
|
|
|
break;
|
2020-12-04 12:33:39 +00:00
|
|
|
case PERF_ENABLE_SW_LATENCY_TRACING:
|
2017-05-15 22:57:26 +00:00
|
|
|
g_latency_sw_tracking_level++;
|
|
|
|
break;
|
2020-12-04 12:33:39 +00:00
|
|
|
case PERF_NO_SHST_NOTIFICATION:
|
2019-08-02 02:10:05 +00:00
|
|
|
g_no_shn_notification = true;
|
|
|
|
break;
|
2020-12-04 12:33:39 +00:00
|
|
|
case PERF_ENABLE_URING:
|
2020-07-03 16:14:58 +00:00
|
|
|
#ifndef SPDK_CONFIG_URING
|
|
|
|
fprintf(stderr, "%s must be rebuilt with CONFIG_URING=y for -R flag.\n",
|
|
|
|
argv[0]);
|
|
|
|
usage(argv[0]);
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
g_use_uring = true;
|
|
|
|
break;
|
2020-12-04 12:33:39 +00:00
|
|
|
case PERF_LOG_FLAG:
|
2019-10-01 02:54:25 +00:00
|
|
|
rc = spdk_log_set_flag(optarg);
|
|
|
|
if (rc < 0) {
|
|
|
|
fprintf(stderr, "unknown flag\n");
|
|
|
|
usage(argv[0]);
|
|
|
|
exit(EXIT_FAILURE);
|
|
|
|
}
|
2020-10-07 14:53:50 +00:00
|
|
|
#ifdef DEBUG
|
2019-10-01 02:54:25 +00:00
|
|
|
spdk_log_set_print_level(SPDK_LOG_DEBUG);
|
|
|
|
#endif
|
|
|
|
break;
|
2020-12-04 12:33:39 +00:00
|
|
|
case PERF_ENABLE_VMD:
|
2019-04-11 14:24:55 +00:00
|
|
|
g_vmd = true;
|
|
|
|
break;
|
2022-05-02 16:08:50 +00:00
|
|
|
case PERF_DISABLE_KTLS:
|
2022-07-22 14:27:45 +00:00
|
|
|
ssl_used = true;
|
|
|
|
perf_set_sock_opts("ssl", "ktls", 0, NULL);
|
2022-05-02 16:08:50 +00:00
|
|
|
break;
|
|
|
|
case PERF_ENABLE_KTLS:
|
2022-07-22 14:27:45 +00:00
|
|
|
ssl_used = true;
|
|
|
|
perf_set_sock_opts("ssl", "ktls", 1, NULL);
|
2022-05-02 16:08:50 +00:00
|
|
|
break;
|
|
|
|
case PERF_TLS_VERSION:
|
2022-07-22 14:27:45 +00:00
|
|
|
ssl_used = true;
|
2022-05-02 16:08:50 +00:00
|
|
|
val = spdk_strtol(optarg, 10);
|
|
|
|
if (val < 0) {
|
|
|
|
fprintf(stderr, "Illegal tls version value %s\n", optarg);
|
|
|
|
return val;
|
|
|
|
}
|
2022-07-22 14:27:45 +00:00
|
|
|
perf_set_sock_opts("ssl", "tls_version", val, NULL);
|
|
|
|
break;
|
|
|
|
case PERF_PSK_KEY:
|
|
|
|
ssl_used = true;
|
|
|
|
perf_set_sock_opts("ssl", "psk_key", 0, optarg);
|
|
|
|
break;
|
|
|
|
case PERF_PSK_IDENTITY:
|
|
|
|
ssl_used = true;
|
|
|
|
perf_set_sock_opts("ssl", "psk_identity", 0, optarg);
|
2022-05-02 16:08:50 +00:00
|
|
|
break;
|
2020-12-04 12:33:39 +00:00
|
|
|
case PERF_DISABLE_ZCOPY:
|
2022-07-22 14:27:45 +00:00
|
|
|
perf_set_sock_opts(optarg, "enable_zerocopy_send_client", 0, NULL);
|
2020-09-14 10:47:39 +00:00
|
|
|
break;
|
2020-12-04 12:33:39 +00:00
|
|
|
case PERF_ENABLE_ZCOPY:
|
2022-07-22 14:27:45 +00:00
|
|
|
perf_set_sock_opts(optarg, "enable_zerocopy_send_client", 1, NULL);
|
2020-09-14 10:47:39 +00:00
|
|
|
break;
|
2022-09-07 07:53:14 +00:00
|
|
|
case PERF_USE_EVERY_CORE:
|
|
|
|
g_use_every_core = true;
|
|
|
|
break;
|
2020-12-04 12:33:39 +00:00
|
|
|
case PERF_DEFAULT_SOCK_IMPL:
|
2022-07-22 14:27:45 +00:00
|
|
|
sock_impl = optarg;
|
2020-09-21 14:59:08 +00:00
|
|
|
rc = spdk_sock_set_default_impl(optarg);
|
|
|
|
if (rc) {
|
|
|
|
fprintf(stderr, "Failed to set sock impl %s, err %d (%s)\n", optarg, errno, strerror(errno));
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
break;
|
2020-12-04 14:22:54 +00:00
|
|
|
case PERF_TRANSPORT_STATISTICS:
|
|
|
|
g_dump_transport_stats = true;
|
|
|
|
break;
|
2021-04-14 13:44:48 +00:00
|
|
|
case PERF_IOVA_MODE:
|
|
|
|
env_opts->iova_mode = optarg;
|
|
|
|
break;
|
2022-05-25 01:44:37 +00:00
|
|
|
case PERF_SOCK_IMPL:
|
|
|
|
g_sock_threshold_impl = optarg;
|
|
|
|
break;
|
2022-12-13 20:56:40 +00:00
|
|
|
case PERF_TRANSPORT_TOS:
|
|
|
|
val = spdk_strtol(optarg, 10);
|
|
|
|
if (val < 0) {
|
|
|
|
fprintf(stderr, "Invalid TOS value\n");
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
g_transport_tos = val;
|
|
|
|
break;
|
2015-09-21 15:52:41 +00:00
|
|
|
default:
|
|
|
|
usage(argv[0]);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-14 22:13:43 +00:00
|
|
|
if (!g_nr_io_queues_per_ns) {
|
|
|
|
usage(argv[0]);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
if (!g_queue_depth) {
|
2020-12-04 12:33:39 +00:00
|
|
|
fprintf(stderr, "missing -q (--io-depth) operand\n");
|
2015-09-21 15:52:41 +00:00
|
|
|
usage(argv[0]);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
if (!g_io_size_bytes) {
|
2020-12-04 12:33:39 +00:00
|
|
|
fprintf(stderr, "missing -o (--io-size) operand\n");
|
2015-09-21 15:52:41 +00:00
|
|
|
usage(argv[0]);
|
|
|
|
return 1;
|
|
|
|
}
|
2020-07-29 08:57:10 +00:00
|
|
|
if (!g_io_unit_size || g_io_unit_size % 4) {
|
|
|
|
fprintf(stderr, "io unit size can not be 0 or non 4-byte aligned\n");
|
|
|
|
return 1;
|
|
|
|
}
|
2020-02-11 21:59:30 +00:00
|
|
|
if (!g_workload_type) {
|
2020-12-04 12:33:39 +00:00
|
|
|
fprintf(stderr, "missing -w (--io-pattern) operand\n");
|
2015-09-21 15:52:41 +00:00
|
|
|
usage(argv[0]);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
if (!g_time_in_sec) {
|
2020-12-04 12:33:39 +00:00
|
|
|
fprintf(stderr, "missing -t (--time) operand\n");
|
2015-09-21 15:52:41 +00:00
|
|
|
usage(argv[0]);
|
|
|
|
return 1;
|
|
|
|
}
|
2021-01-25 16:10:05 +00:00
|
|
|
if (!g_quiet_count) {
|
2020-12-04 12:33:39 +00:00
|
|
|
fprintf(stderr, "-Q (--skip-errors) value must be greater than 0\n");
|
2021-01-25 16:10:05 +00:00
|
|
|
usage(argv[0]);
|
|
|
|
return 1;
|
|
|
|
}
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2020-02-11 21:59:30 +00:00
|
|
|
if (strncmp(g_workload_type, "rand", 4) == 0) {
|
2020-02-10 23:49:03 +00:00
|
|
|
g_is_random = 1;
|
2020-02-11 21:59:30 +00:00
|
|
|
g_workload_type = &g_workload_type[4];
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
|
|
|
|
2022-07-22 14:27:45 +00:00
|
|
|
if (ssl_used && strncmp(sock_impl, "ssl", 3) != 0) {
|
|
|
|
fprintf(stderr, "sock impl is not SSL but tried to use one of the SSL only options\n");
|
|
|
|
usage(argv[0]);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-02-11 21:59:30 +00:00
|
|
|
if (strcmp(g_workload_type, "read") == 0 || strcmp(g_workload_type, "write") == 0) {
|
|
|
|
g_rw_percentage = strcmp(g_workload_type, "read") == 0 ? 100 : 0;
|
|
|
|
if (g_mix_specified) {
|
2020-12-04 12:33:39 +00:00
|
|
|
fprintf(stderr, "Ignoring -M (--rwmixread) option... Please use -M option"
|
2015-09-21 15:52:41 +00:00
|
|
|
" only when using rw or randrw.\n");
|
|
|
|
}
|
2020-02-11 21:59:30 +00:00
|
|
|
} else if (strcmp(g_workload_type, "rw") == 0) {
|
2015-09-21 15:52:41 +00:00
|
|
|
if (g_rw_percentage < 0 || g_rw_percentage > 100) {
|
|
|
|
fprintf(stderr,
|
2020-12-04 12:33:39 +00:00
|
|
|
"-M (--rwmixread) must be specified to value from 0 to 100 "
|
2015-09-21 15:52:41 +00:00
|
|
|
"for rw or randrw.\n");
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
} else {
|
2020-02-10 23:49:03 +00:00
|
|
|
fprintf(stderr,
|
2021-08-06 14:30:31 +00:00
|
|
|
"-w (--io-pattern) io pattern type must be one of\n"
|
2020-02-10 23:49:03 +00:00
|
|
|
"(read, write, randread, randwrite, rw, randrw)\n");
|
|
|
|
return 1;
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
|
|
|
|
2022-05-25 01:44:37 +00:00
|
|
|
if (g_sock_zcopy_threshold > 0) {
|
|
|
|
if (!g_sock_threshold_impl) {
|
|
|
|
fprintf(stderr,
|
|
|
|
"--zerocopy-threshold must be set with sock implementation specified(--zerocopy-threshold-sock-impl <impl>)\n");
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
perf_set_sock_opts(g_sock_threshold_impl, "zerocopy_threshold", g_sock_zcopy_threshold, NULL);
|
|
|
|
}
|
|
|
|
|
2022-09-05 18:22:13 +00:00
|
|
|
if (g_number_ios && g_warmup_time_in_sec) {
|
|
|
|
fprintf(stderr, "-d (--number-ios) with -a (--warmup-time) is not supported\n");
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (g_number_ios && g_number_ios < g_queue_depth) {
|
|
|
|
fprintf(stderr, "-d (--number-ios) less than -q (--io-depth) is not supported\n");
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2023-01-06 11:09:55 +00:00
|
|
|
if (g_rdma_srq_size != 0) {
|
|
|
|
struct spdk_nvme_transport_opts opts;
|
|
|
|
|
|
|
|
spdk_nvme_transport_get_opts(&opts, sizeof(opts));
|
|
|
|
opts.rdma_srq_size = g_rdma_srq_size;
|
|
|
|
|
|
|
|
rc = spdk_nvme_transport_set_opts(&opts, sizeof(opts));
|
|
|
|
if (rc != 0) {
|
|
|
|
fprintf(stderr, "Failed to set NVMe transport options.\n");
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-19 21:45:55 +00:00
|
|
|
if (TAILQ_EMPTY(&g_trid_list)) {
|
|
|
|
/* If no transport IDs specified, default to enumerating all local PCIe devices */
|
2017-03-22 07:37:20 +00:00
|
|
|
add_trid("trtype:PCIe");
|
2017-06-14 07:31:55 +00:00
|
|
|
} else {
|
|
|
|
struct trid_entry *trid_entry, *trid_entry_tmp;
|
|
|
|
|
2021-04-14 12:46:14 +00:00
|
|
|
env_opts->no_pci = true;
|
2017-06-14 07:31:55 +00:00
|
|
|
/* check whether there is local PCIe type */
|
|
|
|
TAILQ_FOREACH_SAFE(trid_entry, &g_trid_list, tailq, trid_entry_tmp) {
|
|
|
|
if (trid_entry->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
|
2021-04-14 12:46:14 +00:00
|
|
|
env_opts->no_pci = false;
|
2017-06-14 07:31:55 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2017-01-19 21:45:55 +00:00
|
|
|
}
|
|
|
|
|
2020-07-03 16:14:58 +00:00
|
|
|
g_file_optind = optind;
|
2017-06-12 19:55:37 +00:00
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-09-25 21:09:41 +00:00
|
|
|
static int
|
|
|
|
register_workers(void)
|
|
|
|
{
|
2017-04-03 22:02:49 +00:00
|
|
|
uint32_t i;
|
2015-09-25 21:09:41 +00:00
|
|
|
struct worker_thread *worker;
|
|
|
|
|
2017-04-03 22:02:49 +00:00
|
|
|
SPDK_ENV_FOREACH_CORE(i) {
|
2017-04-03 22:54:23 +00:00
|
|
|
worker = calloc(1, sizeof(*worker));
|
2015-11-03 20:20:11 +00:00
|
|
|
if (worker == NULL) {
|
2017-04-03 22:54:23 +00:00
|
|
|
fprintf(stderr, "Unable to allocate worker\n");
|
2015-11-03 20:20:11 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-09-28 00:40:05 +00:00
|
|
|
TAILQ_INIT(&worker->ns_ctx);
|
2017-04-03 22:02:49 +00:00
|
|
|
worker->lcore = i;
|
2020-09-28 00:40:05 +00:00
|
|
|
TAILQ_INSERT_TAIL(&g_workers, worker, link);
|
2015-10-29 22:18:48 +00:00
|
|
|
g_num_workers++;
|
2015-09-25 21:09:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-05-19 20:21:26 +00:00
|
|
|
static void
|
|
|
|
unregister_workers(void)
|
|
|
|
{
|
2020-09-28 00:40:05 +00:00
|
|
|
struct worker_thread *worker, *tmp_worker;
|
|
|
|
struct ns_worker_ctx *ns_ctx, *tmp_ns_ctx;
|
2016-05-19 20:21:26 +00:00
|
|
|
|
|
|
|
/* Free namespace context and worker thread */
|
2020-09-28 00:40:05 +00:00
|
|
|
TAILQ_FOREACH_SAFE(worker, &g_workers, link, tmp_worker) {
|
|
|
|
TAILQ_REMOVE(&g_workers, worker, link);
|
2016-05-19 20:21:26 +00:00
|
|
|
|
2020-09-28 00:40:05 +00:00
|
|
|
TAILQ_FOREACH_SAFE(ns_ctx, &worker->ns_ctx, link, tmp_ns_ctx) {
|
|
|
|
TAILQ_REMOVE(&worker->ns_ctx, ns_ctx, link);
|
2017-12-21 18:56:38 +00:00
|
|
|
spdk_histogram_data_free(ns_ctx->histogram);
|
2016-05-19 20:21:26 +00:00
|
|
|
free(ns_ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
free(worker);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-29 20:15:29 +00:00
|
|
|
static bool
|
2016-12-09 22:09:28 +00:00
|
|
|
probe_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
|
2016-10-31 23:55:14 +00:00
|
|
|
struct spdk_nvme_ctrlr_opts *opts)
|
2015-09-21 15:52:41 +00:00
|
|
|
{
|
2020-12-07 17:06:26 +00:00
|
|
|
struct trid_entry *trid_entry = cb_ctx;
|
|
|
|
|
2019-11-05 12:51:35 +00:00
|
|
|
if (trid->trtype == SPDK_NVME_TRANSPORT_PCIE) {
|
2017-06-03 01:49:47 +00:00
|
|
|
if (g_disable_sq_cmb) {
|
|
|
|
opts->use_cmb_sqs = false;
|
|
|
|
}
|
2019-08-02 02:10:05 +00:00
|
|
|
if (g_no_shn_notification) {
|
|
|
|
opts->no_shn_notification = true;
|
|
|
|
}
|
2016-12-01 03:11:37 +00:00
|
|
|
}
|
2016-01-29 20:15:29 +00:00
|
|
|
|
2021-06-22 08:15:20 +00:00
|
|
|
if (trid->trtype != trid_entry->trid.trtype &&
|
|
|
|
strcasecmp(trid->trstring, trid_entry->trid.trstring)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-08-05 12:56:07 +00:00
|
|
|
opts->io_queue_size = g_io_queue_size;
|
2016-12-02 22:52:35 +00:00
|
|
|
|
2018-11-30 01:46:55 +00:00
|
|
|
/* Set the header and data_digest */
|
|
|
|
opts->header_digest = g_header_digest;
|
|
|
|
opts->data_digest = g_data_digest;
|
2019-11-01 21:54:29 +00:00
|
|
|
opts->keep_alive_timeout_ms = g_keep_alive_timeout_in_ms;
|
2020-12-07 17:06:26 +00:00
|
|
|
memcpy(opts->hostnqn, trid_entry->hostnqn, sizeof(opts->hostnqn));
|
2018-11-30 01:46:55 +00:00
|
|
|
|
2022-12-13 20:56:40 +00:00
|
|
|
opts->transport_tos = g_transport_tos;
|
|
|
|
|
2016-01-29 20:15:29 +00:00
|
|
|
return true;
|
|
|
|
}
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2016-01-29 20:15:29 +00:00
|
|
|
static void
|
2016-12-09 22:09:28 +00:00
|
|
|
attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
|
2016-10-31 23:55:14 +00:00
|
|
|
struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts)
|
2016-01-29 20:15:29 +00:00
|
|
|
{
|
2018-08-03 18:51:28 +00:00
|
|
|
struct trid_entry *trid_entry = cb_ctx;
|
2016-12-09 21:43:33 +00:00
|
|
|
struct spdk_pci_addr pci_addr;
|
|
|
|
struct spdk_pci_device *pci_dev;
|
|
|
|
struct spdk_pci_id pci_id;
|
|
|
|
|
2016-12-09 22:09:28 +00:00
|
|
|
if (trid->trtype != SPDK_NVME_TRANSPORT_PCIE) {
|
2016-12-01 03:11:37 +00:00
|
|
|
printf("Attached to NVMe over Fabrics controller at %s:%s: %s\n",
|
2016-12-09 22:09:28 +00:00
|
|
|
trid->traddr, trid->trsvcid,
|
|
|
|
trid->subnqn);
|
2016-12-01 03:11:37 +00:00
|
|
|
} else {
|
2016-12-09 22:09:28 +00:00
|
|
|
if (spdk_pci_addr_parse(&pci_addr, trid->traddr)) {
|
2016-12-09 21:43:33 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-03-28 22:36:02 +00:00
|
|
|
pci_dev = spdk_nvme_ctrlr_get_pci_device(ctrlr);
|
2016-12-09 21:43:33 +00:00
|
|
|
if (!pci_dev) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
pci_id = spdk_pci_device_get_id(pci_dev);
|
|
|
|
|
2016-12-09 21:23:55 +00:00
|
|
|
printf("Attached to NVMe Controller at %s [%04x:%04x]\n",
|
2016-12-09 22:09:28 +00:00
|
|
|
trid->traddr,
|
2016-12-09 21:43:33 +00:00
|
|
|
pci_id.vendor_id, pci_id.device_id);
|
2016-12-01 03:11:37 +00:00
|
|
|
}
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2018-08-03 18:51:28 +00:00
|
|
|
register_ctrlr(ctrlr, trid_entry);
|
2016-01-29 20:15:29 +00:00
|
|
|
}
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2016-01-29 20:15:29 +00:00
|
|
|
static int
|
|
|
|
register_controllers(void)
|
|
|
|
{
|
2017-01-19 21:45:55 +00:00
|
|
|
struct trid_entry *trid_entry;
|
2016-12-01 03:11:37 +00:00
|
|
|
|
2016-01-29 20:15:29 +00:00
|
|
|
printf("Initializing NVMe Controllers\n");
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2019-04-11 14:24:55 +00:00
|
|
|
if (g_vmd && spdk_vmd_init()) {
|
|
|
|
fprintf(stderr, "Failed to initialize VMD."
|
|
|
|
" Some NVMe devices can be unavailable.\n");
|
|
|
|
}
|
|
|
|
|
2017-01-19 21:45:55 +00:00
|
|
|
TAILQ_FOREACH(trid_entry, &g_trid_list, tailq) {
|
2018-08-03 18:51:28 +00:00
|
|
|
if (spdk_nvme_probe(&trid_entry->trid, trid_entry, probe_cb, attach_cb, NULL) != 0) {
|
2017-01-19 21:45:55 +00:00
|
|
|
fprintf(stderr, "spdk_nvme_probe() failed for transport address '%s'\n",
|
|
|
|
trid_entry->trid.traddr);
|
|
|
|
return -1;
|
2016-12-01 03:11:37 +00:00
|
|
|
}
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
|
|
|
|
2016-01-29 20:15:29 +00:00
|
|
|
return 0;
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
unregister_controllers(void)
|
|
|
|
{
|
2020-09-28 00:40:05 +00:00
|
|
|
struct ctrlr_entry *entry, *tmp;
|
2020-10-15 23:51:35 +00:00
|
|
|
struct spdk_nvme_detach_ctx *detach_ctx = NULL;
|
2020-09-28 00:40:05 +00:00
|
|
|
|
|
|
|
TAILQ_FOREACH_SAFE(entry, &g_controllers, link, tmp) {
|
|
|
|
TAILQ_REMOVE(&g_controllers, entry, link);
|
2015-10-29 22:18:48 +00:00
|
|
|
|
2017-05-25 18:21:30 +00:00
|
|
|
spdk_dma_free(entry->latency_page);
|
2017-05-15 22:57:26 +00:00
|
|
|
if (g_latency_ssd_tracking_enable &&
|
2017-12-07 23:23:48 +00:00
|
|
|
spdk_nvme_ctrlr_is_feature_supported(entry->ctrlr, SPDK_NVME_INTEL_FEAT_LATENCY_TRACKING)) {
|
2016-01-18 02:04:48 +00:00
|
|
|
set_latency_tracking_feature(entry->ctrlr, false);
|
2017-12-07 23:23:48 +00:00
|
|
|
}
|
2017-12-26 18:40:00 +00:00
|
|
|
|
|
|
|
if (g_nr_unused_io_queues) {
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < g_nr_unused_io_queues; i++) {
|
|
|
|
spdk_nvme_ctrlr_free_io_qpair(entry->unused_qpairs[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
free(entry->unused_qpairs);
|
|
|
|
}
|
|
|
|
|
2020-10-15 23:51:35 +00:00
|
|
|
spdk_nvme_detach_async(entry->ctrlr, &detach_ctx);
|
2015-09-21 15:52:41 +00:00
|
|
|
free(entry);
|
|
|
|
}
|
2020-01-27 11:09:36 +00:00
|
|
|
|
2021-06-24 20:30:55 +00:00
|
|
|
if (detach_ctx) {
|
|
|
|
spdk_nvme_detach_poll(detach_ctx);
|
2020-10-15 23:51:35 +00:00
|
|
|
}
|
|
|
|
|
2020-01-27 11:09:36 +00:00
|
|
|
if (g_vmd) {
|
|
|
|
spdk_vmd_fini();
|
|
|
|
}
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
|
|
|
|
2022-09-07 15:24:00 +00:00
|
|
|
static int
|
|
|
|
allocate_ns_worker(struct ns_entry *entry, struct worker_thread *worker)
|
|
|
|
{
|
|
|
|
struct ns_worker_ctx *ns_ctx;
|
|
|
|
|
|
|
|
ns_ctx = calloc(1, sizeof(struct ns_worker_ctx));
|
|
|
|
if (!ns_ctx) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
printf("Associating %s with lcore %d\n", entry->name, worker->lcore);
|
|
|
|
ns_ctx->stats.min_tsc = UINT64_MAX;
|
|
|
|
ns_ctx->entry = entry;
|
|
|
|
ns_ctx->histogram = spdk_histogram_data_alloc();
|
|
|
|
TAILQ_INSERT_TAIL(&worker->ns_ctx, ns_ctx, link);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-10-29 22:18:48 +00:00
|
|
|
static int
|
|
|
|
associate_workers_with_ns(void)
|
|
|
|
{
|
2020-09-28 00:40:05 +00:00
|
|
|
struct ns_entry *entry = TAILQ_FIRST(&g_namespaces);
|
|
|
|
struct worker_thread *worker = TAILQ_FIRST(&g_workers);
|
2015-10-29 22:18:48 +00:00
|
|
|
int i, count;
|
|
|
|
|
2022-09-07 15:29:55 +00:00
|
|
|
/* Each core contains single worker, and namespaces are associated as follows:
|
2022-09-07 07:53:14 +00:00
|
|
|
* --use-every-core not specified (default):
|
|
|
|
* 2) equal workers and namespaces - each worker associated with single namespace
|
|
|
|
* 3) more workers than namespaces - each namespace is associated with one or more workers
|
|
|
|
* 4) more namespaces than workers - each worker is associated with one or more namespaces
|
|
|
|
* --use-every-core option enabled - every worker is associated with all namespaces
|
2022-09-07 15:29:55 +00:00
|
|
|
*/
|
2022-09-07 07:53:14 +00:00
|
|
|
if (g_use_every_core) {
|
|
|
|
TAILQ_FOREACH(worker, &g_workers, link) {
|
|
|
|
TAILQ_FOREACH(entry, &g_namespaces, link) {
|
|
|
|
if (allocate_ns_worker(entry, worker) != 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-10-29 22:18:48 +00:00
|
|
|
count = g_num_namespaces > g_num_workers ? g_num_namespaces : g_num_workers;
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
2016-01-04 21:09:42 +00:00
|
|
|
if (entry == NULL) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2022-09-07 15:24:00 +00:00
|
|
|
if (allocate_ns_worker(entry, worker) != 0) {
|
2015-10-29 22:18:48 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-09-28 00:40:05 +00:00
|
|
|
worker = TAILQ_NEXT(worker, link);
|
2015-10-29 22:18:48 +00:00
|
|
|
if (worker == NULL) {
|
2020-09-28 00:40:05 +00:00
|
|
|
worker = TAILQ_FIRST(&g_workers);
|
2015-10-29 22:18:48 +00:00
|
|
|
}
|
|
|
|
|
2020-09-28 00:40:05 +00:00
|
|
|
entry = TAILQ_NEXT(entry, link);
|
2015-10-29 22:18:48 +00:00
|
|
|
if (entry == NULL) {
|
2020-09-28 00:40:05 +00:00
|
|
|
entry = TAILQ_FIRST(&g_namespaces);
|
2015-10-29 22:18:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-01-24 06:56:49 +00:00
|
|
|
static void *
|
|
|
|
nvme_poll_ctrlrs(void *arg)
|
|
|
|
{
|
|
|
|
struct ctrlr_entry *entry;
|
|
|
|
int oldstate;
|
2020-08-13 16:10:07 +00:00
|
|
|
int rc;
|
2019-01-24 06:56:49 +00:00
|
|
|
|
|
|
|
spdk_unaffinitize_thread();
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
|
|
|
|
|
2020-09-28 00:40:05 +00:00
|
|
|
TAILQ_FOREACH(entry, &g_controllers, link) {
|
2019-01-24 06:56:49 +00:00
|
|
|
if (entry->trtype != SPDK_NVME_TRANSPORT_PCIE) {
|
2020-08-13 16:10:07 +00:00
|
|
|
rc = spdk_nvme_ctrlr_process_admin_completions(entry->ctrlr);
|
|
|
|
if (spdk_unlikely(rc < 0 && !g_exit)) {
|
|
|
|
g_exit = true;
|
|
|
|
}
|
2019-01-24 06:56:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &oldstate);
|
|
|
|
|
|
|
|
/* This is a pthread cancellation point and cannot be removed. */
|
|
|
|
sleep(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2021-01-11 15:58:49 +00:00
|
|
|
static void
|
|
|
|
sig_handler(int signo)
|
|
|
|
{
|
|
|
|
g_exit = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
setup_sig_handlers(void)
|
|
|
|
{
|
|
|
|
struct sigaction sigact = {};
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
sigemptyset(&sigact.sa_mask);
|
|
|
|
sigact.sa_handler = sig_handler;
|
|
|
|
rc = sigaction(SIGINT, &sigact, NULL);
|
|
|
|
if (rc < 0) {
|
|
|
|
fprintf(stderr, "sigaction(SIGINT) failed, errno %d (%s)\n", errno, strerror(errno));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = sigaction(SIGTERM, &sigact, NULL);
|
|
|
|
if (rc < 0) {
|
|
|
|
fprintf(stderr, "sigaction(SIGTERM) failed, errno %d (%s)\n", errno, strerror(errno));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-06-22 21:35:04 +00:00
|
|
|
int
|
|
|
|
main(int argc, char **argv)
|
2015-09-21 15:52:41 +00:00
|
|
|
{
|
|
|
|
int rc;
|
2020-11-30 20:02:59 +00:00
|
|
|
struct worker_thread *worker, *main_worker;
|
2017-01-12 18:25:17 +00:00
|
|
|
struct spdk_env_opts opts;
|
2019-01-24 06:56:49 +00:00
|
|
|
pthread_t thread_id = 0;
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2022-10-21 13:24:55 +00:00
|
|
|
/* Use the runtime PID to set the random seed */
|
|
|
|
srand(getpid());
|
|
|
|
|
2017-01-12 18:25:17 +00:00
|
|
|
spdk_env_opts_init(&opts);
|
|
|
|
opts.name = "perf";
|
2021-04-14 12:46:14 +00:00
|
|
|
opts.pci_allowed = g_allowed_pci_addr;
|
|
|
|
rc = parse_args(argc, argv, &opts);
|
|
|
|
if (rc != 0) {
|
|
|
|
return rc;
|
2020-07-08 12:20:59 +00:00
|
|
|
}
|
2020-12-04 14:22:54 +00:00
|
|
|
/* Transport statistics are printed from each thread.
|
|
|
|
* To avoid mess in terminal, init and use mutex */
|
|
|
|
rc = pthread_mutex_init(&g_stats_mutex, NULL);
|
|
|
|
if (rc != 0) {
|
|
|
|
fprintf(stderr, "Failed to init mutex\n");
|
2021-12-29 11:30:13 +00:00
|
|
|
return -1;
|
2020-12-04 14:22:54 +00:00
|
|
|
}
|
2017-12-18 19:57:01 +00:00
|
|
|
if (spdk_env_init(&opts) < 0) {
|
|
|
|
fprintf(stderr, "Unable to initialize SPDK env\n");
|
2021-12-29 11:30:13 +00:00
|
|
|
unregister_trids();
|
|
|
|
pthread_mutex_destroy(&g_stats_mutex);
|
|
|
|
return -1;
|
2017-12-18 19:57:01 +00:00
|
|
|
}
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2021-01-11 15:58:49 +00:00
|
|
|
rc = setup_sig_handlers();
|
|
|
|
if (rc != 0) {
|
|
|
|
rc = -1;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2016-08-18 19:52:48 +00:00
|
|
|
g_tsc_rate = spdk_get_ticks_hz();
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2015-10-29 22:18:48 +00:00
|
|
|
if (register_workers() != 0) {
|
2016-05-19 20:21:26 +00:00
|
|
|
rc = -1;
|
|
|
|
goto cleanup;
|
2015-10-29 22:18:48 +00:00
|
|
|
}
|
|
|
|
|
2020-07-03 16:14:58 +00:00
|
|
|
#if defined(HAVE_LIBAIO) || defined(SPDK_CONFIG_URING)
|
|
|
|
if (register_files(argc, argv) != 0) {
|
2019-08-29 15:16:41 +00:00
|
|
|
rc = -1;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-10-29 22:18:48 +00:00
|
|
|
if (register_controllers() != 0) {
|
2016-05-19 20:21:26 +00:00
|
|
|
rc = -1;
|
|
|
|
goto cleanup;
|
2015-10-29 22:18:48 +00:00
|
|
|
}
|
|
|
|
|
2017-11-08 05:25:02 +00:00
|
|
|
if (g_warn) {
|
|
|
|
printf("WARNING: Some requested NVMe devices were skipped\n");
|
|
|
|
}
|
|
|
|
|
2017-10-27 09:47:54 +00:00
|
|
|
if (g_num_namespaces == 0) {
|
2020-07-03 16:14:58 +00:00
|
|
|
fprintf(stderr, "No valid NVMe controllers or AIO or URING devices found\n");
|
2019-02-12 12:56:56 +00:00
|
|
|
goto cleanup;
|
2017-09-22 03:40:23 +00:00
|
|
|
}
|
|
|
|
|
2021-01-25 16:10:05 +00:00
|
|
|
if (g_num_workers > 1 && g_quiet_count > 1) {
|
|
|
|
fprintf(stderr, "Error message rate-limiting enabled across multiple threads.\n");
|
|
|
|
fprintf(stderr, "Error suppression count may not be exact.\n");
|
|
|
|
}
|
|
|
|
|
2019-01-24 06:56:49 +00:00
|
|
|
rc = pthread_create(&thread_id, NULL, &nvme_poll_ctrlrs, NULL);
|
|
|
|
if (rc != 0) {
|
|
|
|
fprintf(stderr, "Unable to spawn a thread to poll admin queues.\n");
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2015-10-29 22:18:48 +00:00
|
|
|
if (associate_workers_with_ns() != 0) {
|
2016-05-19 20:21:26 +00:00
|
|
|
rc = -1;
|
|
|
|
goto cleanup;
|
2015-10-29 22:18:48 +00:00
|
|
|
}
|
|
|
|
|
2020-11-17 12:20:09 +00:00
|
|
|
rc = pthread_barrier_init(&g_worker_sync_barrier, NULL, g_num_workers);
|
|
|
|
if (rc != 0) {
|
|
|
|
fprintf(stderr, "Unable to initialize thread sync barrier\n");
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2015-10-29 22:18:48 +00:00
|
|
|
printf("Initialization complete. Launching workers.\n");
|
2015-09-25 21:09:41 +00:00
|
|
|
|
2020-11-30 20:02:59 +00:00
|
|
|
/* Launch all of the secondary workers */
|
|
|
|
g_main_core = spdk_env_get_current_core();
|
|
|
|
main_worker = NULL;
|
2020-09-28 00:40:05 +00:00
|
|
|
TAILQ_FOREACH(worker, &g_workers, link) {
|
2020-11-30 20:02:59 +00:00
|
|
|
if (worker->lcore != g_main_core) {
|
2017-09-12 22:35:32 +00:00
|
|
|
spdk_env_thread_launch_pinned(worker->lcore, work_fn, worker);
|
2017-04-03 22:54:23 +00:00
|
|
|
} else {
|
2020-11-30 20:02:59 +00:00
|
|
|
assert(main_worker == NULL);
|
|
|
|
main_worker = worker;
|
2017-04-03 22:54:23 +00:00
|
|
|
}
|
2015-09-25 21:09:41 +00:00
|
|
|
}
|
|
|
|
|
2020-11-30 20:02:59 +00:00
|
|
|
assert(main_worker != NULL);
|
|
|
|
rc = work_fn(main_worker);
|
2015-09-25 21:09:41 +00:00
|
|
|
|
2017-09-12 22:35:32 +00:00
|
|
|
spdk_env_thread_wait_all();
|
2015-09-21 15:52:41 +00:00
|
|
|
|
|
|
|
print_stats();
|
|
|
|
|
2020-11-17 12:20:09 +00:00
|
|
|
pthread_barrier_destroy(&g_worker_sync_barrier);
|
2020-12-14 13:52:19 +00:00
|
|
|
|
|
|
|
cleanup:
|
2019-02-12 12:56:56 +00:00
|
|
|
if (thread_id && pthread_cancel(thread_id) == 0) {
|
2019-01-24 06:56:49 +00:00
|
|
|
pthread_join(thread_id, NULL);
|
|
|
|
}
|
2017-01-19 21:45:55 +00:00
|
|
|
unregister_trids();
|
2016-05-19 20:21:26 +00:00
|
|
|
unregister_namespaces();
|
2015-09-21 15:52:41 +00:00
|
|
|
unregister_controllers();
|
2016-05-19 20:21:26 +00:00
|
|
|
unregister_workers();
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2021-12-29 11:30:13 +00:00
|
|
|
spdk_env_fini();
|
|
|
|
|
2020-12-04 14:22:54 +00:00
|
|
|
pthread_mutex_destroy(&g_stats_mutex);
|
|
|
|
|
2015-11-03 20:43:57 +00:00
|
|
|
if (rc != 0) {
|
2021-11-25 01:40:57 +00:00
|
|
|
fprintf(stderr, "%s: errors occurred\n", argv[0]);
|
2015-11-03 20:43:57 +00:00
|
|
|
}
|
|
|
|
|
2015-11-03 20:37:12 +00:00
|
|
|
return rc;
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|