2017-06-06 17:24:07 +00:00
|
|
|
/*-
|
|
|
|
* BSD LICENSE
|
|
|
|
*
|
|
|
|
* Copyright (c) Intel Corporation.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
*
|
|
|
|
* * Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* * Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in
|
|
|
|
* the documentation and/or other materials provided with the
|
|
|
|
* distribution.
|
|
|
|
* * Neither the name of Intel Corporation nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "spdk/stdinc.h"
|
|
|
|
|
|
|
|
#include "spdk/bdev.h"
|
2021-05-03 10:49:15 +00:00
|
|
|
#include "spdk/bdev_zone.h"
|
2020-02-05 17:10:05 +00:00
|
|
|
#include "spdk/accel_engine.h"
|
2017-06-06 17:24:07 +00:00
|
|
|
#include "spdk/env.h"
|
2021-03-01 21:06:04 +00:00
|
|
|
#include "spdk/init.h"
|
2018-06-11 20:32:15 +00:00
|
|
|
#include "spdk/thread.h"
|
2017-06-06 17:24:07 +00:00
|
|
|
#include "spdk/log.h"
|
|
|
|
#include "spdk/string.h"
|
|
|
|
#include "spdk/queue.h"
|
2018-11-22 08:50:28 +00:00
|
|
|
#include "spdk/util.h"
|
2017-06-06 17:24:07 +00:00
|
|
|
|
2019-08-02 13:33:07 +00:00
|
|
|
#include "spdk_internal/event.h"
|
2018-09-17 23:18:53 +00:00
|
|
|
|
2017-06-06 17:24:07 +00:00
|
|
|
#include "config-host.h"
|
|
|
|
#include "fio.h"
|
|
|
|
#include "optgroup.h"
|
|
|
|
|
2021-05-03 10:49:15 +00:00
|
|
|
#ifdef for_each_rw_ddir
|
|
|
|
#define FIO_HAS_ZBD (FIO_IOOPS_VERSION >= 26)
|
|
|
|
#else
|
|
|
|
#define FIO_HAS_ZBD (0)
|
|
|
|
#endif
|
|
|
|
|
2019-06-14 11:41:21 +00:00
|
|
|
/* FreeBSD is missing CLOCK_MONOTONIC_RAW,
|
|
|
|
* so alternative is provided. */
|
|
|
|
#ifndef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */
|
|
|
|
#define CLOCK_MONOTONIC_RAW CLOCK_MONOTONIC
|
|
|
|
#endif
|
|
|
|
|
2017-06-06 17:24:07 +00:00
|
|
|
struct spdk_fio_options {
|
|
|
|
void *pad;
|
|
|
|
char *conf;
|
2019-08-02 11:51:20 +00:00
|
|
|
char *json_conf;
|
2021-10-14 05:01:28 +00:00
|
|
|
char *log_flags;
|
2017-10-02 19:02:02 +00:00
|
|
|
unsigned mem_mb;
|
2021-01-18 16:42:09 +00:00
|
|
|
int mem_single_seg;
|
2021-05-03 14:48:25 +00:00
|
|
|
int initial_zone_reset;
|
bdev/fio_plugin: add support for --zone_append
Add support for using zone append commands instead of write command,
for bdevs that support it.
In the SPDK NVMe plugin, the target struct (called spdk_fio_qpair) is
allocated in .setup(), but the qpair is first created in .open_file.
In the SPDK bdev plugin, the target struct (called spdk_fio_target)
is allocated in spdk_fio_bdev_open(). This function also creates the
I/O channel. (spdk_fio_bdev_open() is called by .init().)
Since certain options has to be saved in the struct spdk_fio_target,
which is allocated quite late, we cannot reuse spdk_fio_handle_options(),
which has to be called before threads are created.
Therefore, we unfortunately need another option parsing function in the
SPDK bdev plugin.
If we fail to handle any of the per target options, the target is never
added to the list of targets, and is freed immediately.
Signed-off-by: Niklas Cassel <niklas.cassel@wdc.com>
Change-Id: Iacd1924c8f0d3d324f2a6f1b0a54f86459f39d31
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/7727
Community-CI: Broadcom CI
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
2021-05-03 15:27:21 +00:00
|
|
|
int zone_append;
|
2017-06-06 17:24:07 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct spdk_fio_request {
|
|
|
|
struct io_u *io;
|
|
|
|
struct thread_data *td;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct spdk_fio_target {
|
|
|
|
struct spdk_bdev *bdev;
|
|
|
|
struct spdk_bdev_desc *desc;
|
|
|
|
struct spdk_io_channel *ch;
|
bdev/fio_plugin: add support for --zone_append
Add support for using zone append commands instead of write command,
for bdevs that support it.
In the SPDK NVMe plugin, the target struct (called spdk_fio_qpair) is
allocated in .setup(), but the qpair is first created in .open_file.
In the SPDK bdev plugin, the target struct (called spdk_fio_target)
is allocated in spdk_fio_bdev_open(). This function also creates the
I/O channel. (spdk_fio_bdev_open() is called by .init().)
Since certain options has to be saved in the struct spdk_fio_target,
which is allocated quite late, we cannot reuse spdk_fio_handle_options(),
which has to be called before threads are created.
Therefore, we unfortunately need another option parsing function in the
SPDK bdev plugin.
If we fail to handle any of the per target options, the target is never
added to the list of targets, and is freed immediately.
Signed-off-by: Niklas Cassel <niklas.cassel@wdc.com>
Change-Id: Iacd1924c8f0d3d324f2a6f1b0a54f86459f39d31
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/7727
Community-CI: Broadcom CI
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
2021-05-03 15:27:21 +00:00
|
|
|
bool zone_append_enabled;
|
2017-06-06 17:24:07 +00:00
|
|
|
|
|
|
|
TAILQ_ENTRY(spdk_fio_target) link;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct spdk_fio_thread {
|
|
|
|
struct thread_data *td; /* fio thread context */
|
|
|
|
struct spdk_thread *thread; /* spdk thread context */
|
|
|
|
|
|
|
|
TAILQ_HEAD(, spdk_fio_target) targets;
|
2018-12-07 21:21:37 +00:00
|
|
|
bool failed; /* true if the thread failed to initialize */
|
2017-06-06 17:24:07 +00:00
|
|
|
|
2018-10-29 02:45:42 +00:00
|
|
|
struct io_u **iocq; /* io completion queue */
|
|
|
|
unsigned int iocq_count; /* number of iocq entries filled by last getevents */
|
|
|
|
unsigned int iocq_size; /* number of iocq entries allocated */
|
2020-08-06 17:25:52 +00:00
|
|
|
|
|
|
|
TAILQ_ENTRY(spdk_fio_thread) link;
|
2017-06-06 17:24:07 +00:00
|
|
|
};
|
|
|
|
|
2021-05-03 10:49:15 +00:00
|
|
|
struct spdk_fio_zone_cb_arg {
|
|
|
|
struct spdk_fio_target *target;
|
|
|
|
struct spdk_bdev_zone_info *spdk_zones;
|
|
|
|
int completed;
|
|
|
|
uint64_t offset_blocks;
|
|
|
|
struct zbd_zone *fio_zones;
|
|
|
|
unsigned int nr_zones;
|
|
|
|
};
|
|
|
|
|
2017-10-27 09:57:11 +00:00
|
|
|
static bool g_spdk_env_initialized = false;
|
2019-08-02 11:51:20 +00:00
|
|
|
static const char *g_json_config_file = NULL;
|
2017-06-06 17:24:07 +00:00
|
|
|
|
|
|
|
static int spdk_fio_init(struct thread_data *td);
|
|
|
|
static void spdk_fio_cleanup(struct thread_data *td);
|
2017-11-06 11:00:42 +00:00
|
|
|
static size_t spdk_fio_poll_thread(struct spdk_fio_thread *fio_thread);
|
2021-05-03 14:48:25 +00:00
|
|
|
static int spdk_fio_handle_options(struct thread_data *td, struct fio_file *f,
|
|
|
|
struct spdk_bdev *bdev);
|
bdev/fio_plugin: add support for --zone_append
Add support for using zone append commands instead of write command,
for bdevs that support it.
In the SPDK NVMe plugin, the target struct (called spdk_fio_qpair) is
allocated in .setup(), but the qpair is first created in .open_file.
In the SPDK bdev plugin, the target struct (called spdk_fio_target)
is allocated in spdk_fio_bdev_open(). This function also creates the
I/O channel. (spdk_fio_bdev_open() is called by .init().)
Since certain options has to be saved in the struct spdk_fio_target,
which is allocated quite late, we cannot reuse spdk_fio_handle_options(),
which has to be called before threads are created.
Therefore, we unfortunately need another option parsing function in the
SPDK bdev plugin.
If we fail to handle any of the per target options, the target is never
added to the list of targets, and is freed immediately.
Signed-off-by: Niklas Cassel <niklas.cassel@wdc.com>
Change-Id: Iacd1924c8f0d3d324f2a6f1b0a54f86459f39d31
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/7727
Community-CI: Broadcom CI
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
2021-05-03 15:27:21 +00:00
|
|
|
static int spdk_fio_handle_options_per_target(struct thread_data *td, struct fio_file *f);
|
2017-06-06 17:24:07 +00:00
|
|
|
|
2020-08-06 17:25:52 +00:00
|
|
|
static pthread_t g_init_thread_id = 0;
|
|
|
|
static pthread_mutex_t g_init_mtx = PTHREAD_MUTEX_INITIALIZER;
|
|
|
|
static pthread_cond_t g_init_cond;
|
|
|
|
static bool g_poll_loop = true;
|
|
|
|
static TAILQ_HEAD(, spdk_fio_thread) g_threads = TAILQ_HEAD_INITIALIZER(g_threads);
|
|
|
|
|
2018-12-14 10:36:22 +00:00
|
|
|
/* Default polling timeout (ns) */
|
|
|
|
#define SPDK_FIO_POLLING_TIMEOUT 1000000000ULL
|
2018-11-22 08:50:28 +00:00
|
|
|
|
2017-06-06 17:24:07 +00:00
|
|
|
static int
|
|
|
|
spdk_fio_init_thread(struct thread_data *td)
|
|
|
|
{
|
|
|
|
struct spdk_fio_thread *fio_thread;
|
|
|
|
|
|
|
|
fio_thread = calloc(1, sizeof(*fio_thread));
|
|
|
|
if (!fio_thread) {
|
|
|
|
SPDK_ERRLOG("failed to allocate thread local context\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
fio_thread->td = td;
|
|
|
|
td->io_ops_data = fio_thread;
|
|
|
|
|
2019-04-22 20:29:06 +00:00
|
|
|
fio_thread->thread = spdk_thread_create("fio_thread", NULL);
|
2017-06-06 17:24:07 +00:00
|
|
|
if (!fio_thread->thread) {
|
|
|
|
free(fio_thread);
|
|
|
|
SPDK_ERRLOG("failed to allocate thread\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2019-01-17 20:36:21 +00:00
|
|
|
spdk_set_thread(fio_thread->thread);
|
2017-06-06 17:24:07 +00:00
|
|
|
|
|
|
|
fio_thread->iocq_size = td->o.iodepth;
|
|
|
|
fio_thread->iocq = calloc(fio_thread->iocq_size, sizeof(struct io_u *));
|
|
|
|
assert(fio_thread->iocq != NULL);
|
|
|
|
|
|
|
|
TAILQ_INIT(&fio_thread->targets);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-11-05 22:27:18 +00:00
|
|
|
static void
|
2018-12-07 20:57:52 +00:00
|
|
|
spdk_fio_bdev_close_targets(void *arg)
|
2018-11-05 22:27:18 +00:00
|
|
|
{
|
2018-12-07 20:57:52 +00:00
|
|
|
struct spdk_fio_thread *fio_thread = arg;
|
2018-11-05 22:27:18 +00:00
|
|
|
struct spdk_fio_target *target, *tmp;
|
|
|
|
|
|
|
|
TAILQ_FOREACH_SAFE(target, &fio_thread->targets, link, tmp) {
|
|
|
|
TAILQ_REMOVE(&fio_thread->targets, target, link);
|
|
|
|
spdk_put_io_channel(target->ch);
|
|
|
|
spdk_bdev_close(target->desc);
|
|
|
|
free(target);
|
|
|
|
}
|
2018-12-07 20:57:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
spdk_fio_cleanup_thread(struct spdk_fio_thread *fio_thread)
|
|
|
|
{
|
|
|
|
spdk_thread_send_msg(fio_thread->thread, spdk_fio_bdev_close_targets, fio_thread);
|
2018-11-05 22:27:18 +00:00
|
|
|
|
2020-08-06 17:25:52 +00:00
|
|
|
pthread_mutex_lock(&g_init_mtx);
|
|
|
|
TAILQ_INSERT_TAIL(&g_threads, fio_thread, link);
|
|
|
|
pthread_mutex_unlock(&g_init_mtx);
|
2018-11-05 22:27:18 +00:00
|
|
|
}
|
|
|
|
|
2018-11-22 08:50:28 +00:00
|
|
|
static void
|
2018-12-14 10:36:22 +00:00
|
|
|
spdk_fio_calc_timeout(struct spdk_fio_thread *fio_thread, struct timespec *ts)
|
2018-11-22 08:50:28 +00:00
|
|
|
{
|
2018-12-14 10:36:22 +00:00
|
|
|
uint64_t timeout, now;
|
|
|
|
|
|
|
|
if (spdk_thread_has_active_pollers(fio_thread->thread)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
timeout = spdk_thread_next_poller_expiration(fio_thread->thread);
|
|
|
|
now = spdk_get_ticks();
|
2018-11-22 08:50:28 +00:00
|
|
|
|
2018-12-14 10:36:22 +00:00
|
|
|
if (timeout == 0) {
|
|
|
|
timeout = now + (SPDK_FIO_POLLING_TIMEOUT * spdk_get_ticks_hz()) / SPDK_SEC_TO_NSEC;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (timeout > now) {
|
|
|
|
timeout = ((timeout - now) * SPDK_SEC_TO_NSEC) / spdk_get_ticks_hz() +
|
|
|
|
ts->tv_sec * SPDK_SEC_TO_NSEC + ts->tv_nsec;
|
|
|
|
|
|
|
|
ts->tv_sec = timeout / SPDK_SEC_TO_NSEC;
|
|
|
|
ts->tv_nsec = timeout % SPDK_SEC_TO_NSEC;
|
|
|
|
}
|
2018-11-22 08:50:28 +00:00
|
|
|
}
|
|
|
|
|
2018-12-07 20:30:44 +00:00
|
|
|
static void
|
2019-08-02 13:33:07 +00:00
|
|
|
spdk_fio_bdev_init_done(int rc, void *cb_arg)
|
2018-12-07 20:30:44 +00:00
|
|
|
{
|
|
|
|
*(bool *)cb_arg = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
spdk_fio_bdev_init_start(void *arg)
|
|
|
|
{
|
|
|
|
bool *done = arg;
|
|
|
|
|
2021-03-02 19:34:25 +00:00
|
|
|
spdk_subsystem_init_from_json_config(g_json_config_file, SPDK_DEFAULT_RPC_ADDR,
|
|
|
|
spdk_fio_bdev_init_done, done, true);
|
2018-12-07 20:30:44 +00:00
|
|
|
}
|
|
|
|
|
2018-12-07 20:41:45 +00:00
|
|
|
static void
|
|
|
|
spdk_fio_bdev_fini_done(void *cb_arg)
|
|
|
|
{
|
|
|
|
*(bool *)cb_arg = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
spdk_fio_bdev_fini_start(void *arg)
|
|
|
|
{
|
|
|
|
bool *done = arg;
|
|
|
|
|
2019-08-02 13:33:07 +00:00
|
|
|
spdk_subsystem_fini(spdk_fio_bdev_fini_done, done);
|
2018-12-07 20:41:45 +00:00
|
|
|
}
|
|
|
|
|
2018-06-08 22:02:59 +00:00
|
|
|
static void *
|
|
|
|
spdk_init_thread_poll(void *arg)
|
|
|
|
{
|
2018-11-02 20:59:11 +00:00
|
|
|
struct spdk_fio_options *eo = arg;
|
2017-11-06 11:00:42 +00:00
|
|
|
struct spdk_fio_thread *fio_thread;
|
2020-08-06 17:25:52 +00:00
|
|
|
struct spdk_fio_thread *thread, *tmp;
|
2017-06-06 17:24:07 +00:00
|
|
|
struct spdk_env_opts opts;
|
2018-11-02 20:59:11 +00:00
|
|
|
bool done;
|
|
|
|
int rc;
|
|
|
|
struct timespec ts;
|
|
|
|
struct thread_data td = {};
|
|
|
|
|
|
|
|
/* Create a dummy thread data for use on the initialization thread. */
|
|
|
|
td.o.iodepth = 32;
|
|
|
|
td.eo = eo;
|
2017-06-06 17:24:07 +00:00
|
|
|
|
|
|
|
/* Parse the SPDK configuration file */
|
2018-11-02 20:59:11 +00:00
|
|
|
eo = arg;
|
2017-06-06 17:24:07 +00:00
|
|
|
|
2019-08-02 11:51:20 +00:00
|
|
|
if (eo->conf && eo->json_conf) {
|
|
|
|
SPDK_ERRLOG("Cannot provide two types of configuration files\n");
|
|
|
|
rc = EINVAL;
|
2018-11-05 22:32:19 +00:00
|
|
|
goto err_exit;
|
2019-08-02 11:51:20 +00:00
|
|
|
} else if (eo->conf && strlen(eo->conf)) {
|
2020-08-10 14:08:10 +00:00
|
|
|
g_json_config_file = eo->conf;
|
2019-08-02 11:51:20 +00:00
|
|
|
} else if (eo->json_conf && strlen(eo->json_conf)) {
|
|
|
|
g_json_config_file = eo->json_conf;
|
|
|
|
} else {
|
|
|
|
SPDK_ERRLOG("No configuration file provided\n");
|
2018-11-05 22:32:19 +00:00
|
|
|
rc = EINVAL;
|
|
|
|
goto err_exit;
|
2017-06-06 17:24:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize the environment library */
|
|
|
|
spdk_env_opts_init(&opts);
|
|
|
|
opts.name = "fio";
|
|
|
|
|
2017-10-02 19:02:02 +00:00
|
|
|
if (eo->mem_mb) {
|
|
|
|
opts.mem_size = eo->mem_mb;
|
|
|
|
}
|
2018-03-19 12:34:42 +00:00
|
|
|
opts.hugepage_single_segments = eo->mem_single_seg;
|
2017-10-02 19:02:02 +00:00
|
|
|
|
2017-12-18 19:57:01 +00:00
|
|
|
if (spdk_env_init(&opts) < 0) {
|
|
|
|
SPDK_ERRLOG("Unable to initialize SPDK env\n");
|
2018-11-05 22:32:19 +00:00
|
|
|
rc = EINVAL;
|
|
|
|
goto err_exit;
|
2017-12-18 19:57:01 +00:00
|
|
|
}
|
2017-06-06 17:24:07 +00:00
|
|
|
spdk_unaffinitize_thread();
|
|
|
|
|
2021-10-14 05:01:28 +00:00
|
|
|
if (eo->log_flags) {
|
|
|
|
char *tok = strtok(eo->log_flags, ",");
|
|
|
|
do {
|
|
|
|
rc = spdk_log_set_flag(tok);
|
|
|
|
if (rc < 0) {
|
|
|
|
SPDK_ERRLOG("unknown spdk log flag %s\n", tok);
|
|
|
|
rc = EINVAL;
|
|
|
|
goto err_exit;
|
|
|
|
}
|
|
|
|
} while ((tok = strtok(NULL, ",")) != NULL);
|
|
|
|
#ifdef DEBUG
|
|
|
|
spdk_log_set_print_level(SPDK_LOG_DEBUG);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2019-02-13 18:12:25 +00:00
|
|
|
spdk_thread_lib_init(NULL, 0);
|
2018-07-03 16:48:49 +00:00
|
|
|
|
2017-06-06 17:24:07 +00:00
|
|
|
/* Create an SPDK thread temporarily */
|
2018-11-02 20:59:11 +00:00
|
|
|
rc = spdk_fio_init_thread(&td);
|
2017-06-06 17:24:07 +00:00
|
|
|
if (rc < 0) {
|
|
|
|
SPDK_ERRLOG("Failed to create initialization thread\n");
|
2018-11-05 22:32:19 +00:00
|
|
|
goto err_exit;
|
2017-06-06 17:24:07 +00:00
|
|
|
}
|
|
|
|
|
2018-11-02 20:59:11 +00:00
|
|
|
fio_thread = td.io_ops_data;
|
2017-11-06 11:00:42 +00:00
|
|
|
|
2017-06-06 17:24:07 +00:00
|
|
|
/* Initialize the bdev layer */
|
2018-11-02 20:59:11 +00:00
|
|
|
done = false;
|
2018-12-07 20:30:44 +00:00
|
|
|
spdk_thread_send_msg(fio_thread->thread, spdk_fio_bdev_init_start, &done);
|
2017-06-06 17:24:07 +00:00
|
|
|
|
2018-03-01 18:03:04 +00:00
|
|
|
do {
|
|
|
|
spdk_fio_poll_thread(fio_thread);
|
|
|
|
} while (!done);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Continue polling until there are no more events.
|
|
|
|
* This handles any final events posted by pollers.
|
|
|
|
*/
|
2018-07-03 16:48:49 +00:00
|
|
|
while (spdk_fio_poll_thread(fio_thread) > 0) {};
|
2017-06-06 17:24:07 +00:00
|
|
|
|
2018-11-02 20:59:11 +00:00
|
|
|
/* Set condition variable */
|
|
|
|
pthread_mutex_lock(&g_init_mtx);
|
|
|
|
pthread_cond_signal(&g_init_cond);
|
|
|
|
|
2020-08-06 17:25:52 +00:00
|
|
|
pthread_mutex_unlock(&g_init_mtx);
|
|
|
|
|
2018-12-06 13:16:51 +00:00
|
|
|
while (g_poll_loop) {
|
2018-11-02 20:59:11 +00:00
|
|
|
spdk_fio_poll_thread(fio_thread);
|
|
|
|
|
2020-08-06 17:25:52 +00:00
|
|
|
pthread_mutex_lock(&g_init_mtx);
|
|
|
|
if (!TAILQ_EMPTY(&g_threads)) {
|
|
|
|
TAILQ_FOREACH_SAFE(thread, &g_threads, link, tmp) {
|
|
|
|
spdk_fio_poll_thread(thread);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If there are exiting threads to poll, don't sleep. */
|
|
|
|
pthread_mutex_unlock(&g_init_mtx);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Figure out how long to sleep. */
|
2018-12-06 13:16:51 +00:00
|
|
|
clock_gettime(CLOCK_MONOTONIC, &ts);
|
2018-12-14 10:36:22 +00:00
|
|
|
spdk_fio_calc_timeout(fio_thread, &ts);
|
2018-11-02 20:59:11 +00:00
|
|
|
|
2018-12-06 13:16:51 +00:00
|
|
|
rc = pthread_cond_timedwait(&g_init_cond, &g_init_mtx, &ts);
|
2020-08-06 17:25:52 +00:00
|
|
|
pthread_mutex_unlock(&g_init_mtx);
|
|
|
|
|
2018-11-02 20:59:11 +00:00
|
|
|
if (rc != ETIMEDOUT) {
|
|
|
|
break;
|
|
|
|
}
|
2020-08-06 17:25:52 +00:00
|
|
|
|
|
|
|
|
2018-11-02 20:59:11 +00:00
|
|
|
}
|
|
|
|
|
2020-08-06 17:25:52 +00:00
|
|
|
spdk_fio_cleanup_thread(fio_thread);
|
2018-11-02 20:59:11 +00:00
|
|
|
|
2018-12-07 20:41:45 +00:00
|
|
|
/* Finalize the bdev layer */
|
2018-11-02 20:59:11 +00:00
|
|
|
done = false;
|
2018-12-07 20:41:45 +00:00
|
|
|
spdk_thread_send_msg(fio_thread->thread, spdk_fio_bdev_fini_start, &done);
|
2018-11-02 20:59:11 +00:00
|
|
|
|
|
|
|
do {
|
|
|
|
spdk_fio_poll_thread(fio_thread);
|
|
|
|
|
2020-08-06 17:25:52 +00:00
|
|
|
TAILQ_FOREACH_SAFE(thread, &g_threads, link, tmp) {
|
|
|
|
spdk_fio_poll_thread(thread);
|
|
|
|
}
|
|
|
|
} while (!done);
|
|
|
|
|
|
|
|
/* Now exit all the threads */
|
|
|
|
TAILQ_FOREACH(thread, &g_threads, link) {
|
|
|
|
spdk_set_thread(thread->thread);
|
|
|
|
spdk_thread_exit(thread->thread);
|
|
|
|
spdk_set_thread(NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* And wait for them to gracefully exit */
|
|
|
|
while (!TAILQ_EMPTY(&g_threads)) {
|
|
|
|
TAILQ_FOREACH_SAFE(thread, &g_threads, link, tmp) {
|
|
|
|
if (spdk_thread_is_exited(thread->thread)) {
|
|
|
|
TAILQ_REMOVE(&g_threads, thread, link);
|
|
|
|
spdk_thread_destroy(thread->thread);
|
|
|
|
free(thread->iocq);
|
|
|
|
free(thread);
|
|
|
|
} else {
|
|
|
|
spdk_thread_poll(thread->thread, 0, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-11-02 20:59:11 +00:00
|
|
|
|
|
|
|
pthread_exit(NULL);
|
|
|
|
|
|
|
|
err_exit:
|
|
|
|
exit(rc);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
spdk_fio_init_env(struct thread_data *td)
|
|
|
|
{
|
2018-12-06 13:16:51 +00:00
|
|
|
pthread_condattr_t attr;
|
|
|
|
int rc = -1;
|
|
|
|
|
|
|
|
if (pthread_condattr_init(&attr)) {
|
|
|
|
SPDK_ERRLOG("Unable to initialize condition variable\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pthread_condattr_setclock(&attr, CLOCK_MONOTONIC)) {
|
|
|
|
SPDK_ERRLOG("Unable to initialize condition variable\n");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pthread_cond_init(&g_init_cond, &attr)) {
|
|
|
|
SPDK_ERRLOG("Unable to initialize condition variable\n");
|
|
|
|
goto out;
|
|
|
|
}
|
2018-11-02 20:59:11 +00:00
|
|
|
|
2018-06-08 22:02:59 +00:00
|
|
|
/*
|
2018-11-02 20:59:11 +00:00
|
|
|
* Spawn a thread to handle initialization operations and to poll things
|
|
|
|
* like the admin queues periodically.
|
2018-06-08 22:02:59 +00:00
|
|
|
*/
|
2018-11-02 20:59:11 +00:00
|
|
|
rc = pthread_create(&g_init_thread_id, NULL, &spdk_init_thread_poll, td->eo);
|
2018-06-08 22:02:59 +00:00
|
|
|
if (rc != 0) {
|
|
|
|
SPDK_ERRLOG("Unable to spawn thread to poll admin queue. It won't be polled.\n");
|
|
|
|
}
|
|
|
|
|
2018-11-02 20:59:11 +00:00
|
|
|
/* Wait for background thread to advance past the initialization */
|
|
|
|
pthread_mutex_lock(&g_init_mtx);
|
|
|
|
pthread_cond_wait(&g_init_cond, &g_init_mtx);
|
|
|
|
pthread_mutex_unlock(&g_init_mtx);
|
2018-12-06 13:16:51 +00:00
|
|
|
out:
|
|
|
|
pthread_condattr_destroy(&attr);
|
|
|
|
return rc;
|
2017-06-06 17:24:07 +00:00
|
|
|
}
|
|
|
|
|
2021-09-30 09:34:16 +00:00
|
|
|
static bool
|
|
|
|
fio_redirected_to_dev_null(void)
|
|
|
|
{
|
|
|
|
char path[PATH_MAX] = "";
|
|
|
|
ssize_t ret;
|
|
|
|
|
|
|
|
ret = readlink("/proc/self/fd/1", path, sizeof(path));
|
|
|
|
|
|
|
|
if (ret == -1 || strcmp(path, "/dev/null") != 0) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = readlink("/proc/self/fd/2", path, sizeof(path));
|
|
|
|
|
|
|
|
if (ret == -1 || strcmp(path, "/dev/null") != 0) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-06-06 17:24:07 +00:00
|
|
|
/* Called for each thread to fill in the 'real_file_size' member for
|
|
|
|
* each file associated with this thread. This is called prior to
|
|
|
|
* the init operation (spdk_fio_init()) below. This call will occur
|
|
|
|
* on the initial start up thread if 'create_serialize' is true, or
|
|
|
|
* on the thread actually associated with 'thread_data' if 'create_serialize'
|
|
|
|
* is false.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
spdk_fio_setup(struct thread_data *td)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
struct fio_file *f;
|
|
|
|
|
2021-09-30 09:34:16 +00:00
|
|
|
/*
|
|
|
|
* If we're running in a daemonized FIO instance, it's possible
|
|
|
|
* fd 1/2 were re-used for something important by FIO. Newer fio
|
|
|
|
* versions are careful to redirect those to /dev/null, but if we're
|
|
|
|
* not, we'll abort early, so we don't accidentally write messages to
|
|
|
|
* an important file, etc.
|
2020-02-05 16:38:47 +00:00
|
|
|
*/
|
2021-09-30 09:34:16 +00:00
|
|
|
if (is_backend && !fio_redirected_to_dev_null()) {
|
2020-02-05 16:38:47 +00:00
|
|
|
char buf[1024];
|
|
|
|
snprintf(buf, sizeof(buf),
|
2021-09-30 09:34:16 +00:00
|
|
|
"SPDK FIO plugin is in daemon mode, but stdout/stderr "
|
|
|
|
"aren't redirected to /dev/null. Aborting.");
|
2020-02-05 16:38:47 +00:00
|
|
|
fio_server_text_output(FIO_LOG_ERR, buf, sizeof(buf));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2017-06-06 17:24:07 +00:00
|
|
|
if (!td->o.use_thread) {
|
|
|
|
SPDK_ERRLOG("must set thread=1 when using spdk plugin\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2017-10-27 09:57:11 +00:00
|
|
|
if (!g_spdk_env_initialized) {
|
2017-06-06 17:24:07 +00:00
|
|
|
if (spdk_fio_init_env(td)) {
|
|
|
|
SPDK_ERRLOG("failed to initialize\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2017-10-27 09:57:11 +00:00
|
|
|
g_spdk_env_initialized = true;
|
2017-06-06 17:24:07 +00:00
|
|
|
}
|
|
|
|
|
2020-03-27 15:03:02 +00:00
|
|
|
if (td->o.nr_files == 1 && strcmp(td->files[0]->file_name, "*") == 0) {
|
|
|
|
struct spdk_bdev *bdev;
|
|
|
|
|
|
|
|
/* add all available bdevs as fio targets */
|
|
|
|
for (bdev = spdk_bdev_first_leaf(); bdev; bdev = spdk_bdev_next_leaf(bdev)) {
|
|
|
|
add_file(td, spdk_bdev_get_name(bdev), 0, 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-06 17:24:07 +00:00
|
|
|
for_each_file(td, f, i) {
|
|
|
|
struct spdk_bdev *bdev;
|
2021-05-03 14:48:25 +00:00
|
|
|
int rc;
|
2017-06-06 17:24:07 +00:00
|
|
|
|
2020-03-27 15:03:02 +00:00
|
|
|
if (strcmp(f->file_name, "*") == 0) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-06-06 17:24:07 +00:00
|
|
|
bdev = spdk_bdev_get_by_name(f->file_name);
|
|
|
|
if (!bdev) {
|
|
|
|
SPDK_ERRLOG("Unable to find bdev with name %s\n", f->file_name);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
f->real_file_size = spdk_bdev_get_num_blocks(bdev) *
|
|
|
|
spdk_bdev_get_block_size(bdev);
|
2021-06-07 10:46:32 +00:00
|
|
|
f->filetype = FIO_TYPE_BLOCK;
|
|
|
|
fio_file_set_size_known(f);
|
2017-06-06 17:24:07 +00:00
|
|
|
|
2021-05-03 14:48:25 +00:00
|
|
|
rc = spdk_fio_handle_options(td, f, bdev);
|
|
|
|
if (rc) {
|
|
|
|
return rc;
|
|
|
|
}
|
2017-06-06 17:24:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-10-21 10:06:04 +00:00
|
|
|
static void
|
|
|
|
fio_bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev,
|
|
|
|
void *event_ctx)
|
|
|
|
{
|
|
|
|
SPDK_WARNLOG("Unsupported bdev event: type %d\n", type);
|
|
|
|
}
|
|
|
|
|
2018-12-07 21:21:37 +00:00
|
|
|
static void
|
|
|
|
spdk_fio_bdev_open(void *arg)
|
2017-06-06 17:24:07 +00:00
|
|
|
{
|
2018-12-07 21:21:37 +00:00
|
|
|
struct thread_data *td = arg;
|
2017-06-06 17:24:07 +00:00
|
|
|
struct spdk_fio_thread *fio_thread;
|
|
|
|
unsigned int i;
|
|
|
|
struct fio_file *f;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
fio_thread = td->io_ops_data;
|
|
|
|
|
|
|
|
for_each_file(td, f, i) {
|
|
|
|
struct spdk_fio_target *target;
|
|
|
|
|
2020-03-27 15:03:02 +00:00
|
|
|
if (strcmp(f->file_name, "*") == 0) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-06-06 17:24:07 +00:00
|
|
|
target = calloc(1, sizeof(*target));
|
|
|
|
if (!target) {
|
|
|
|
SPDK_ERRLOG("Unable to allocate memory for I/O target.\n");
|
2018-12-07 21:21:37 +00:00
|
|
|
fio_thread->failed = true;
|
|
|
|
return;
|
2017-06-06 17:24:07 +00:00
|
|
|
}
|
|
|
|
|
2020-10-21 10:06:04 +00:00
|
|
|
rc = spdk_bdev_open_ext(f->file_name, true, fio_bdev_event_cb, NULL,
|
|
|
|
&target->desc);
|
2017-06-06 17:24:07 +00:00
|
|
|
if (rc) {
|
|
|
|
SPDK_ERRLOG("Unable to open bdev %s\n", f->file_name);
|
|
|
|
free(target);
|
2018-12-07 21:21:37 +00:00
|
|
|
fio_thread->failed = true;
|
|
|
|
return;
|
2017-06-06 17:24:07 +00:00
|
|
|
}
|
|
|
|
|
2020-10-21 10:06:04 +00:00
|
|
|
target->bdev = spdk_bdev_desc_get_bdev(target->desc);
|
|
|
|
|
2017-06-06 17:24:07 +00:00
|
|
|
target->ch = spdk_bdev_get_io_channel(target->desc);
|
|
|
|
if (!target->ch) {
|
|
|
|
SPDK_ERRLOG("Unable to get I/O channel for bdev.\n");
|
|
|
|
spdk_bdev_close(target->desc);
|
|
|
|
free(target);
|
2018-12-07 21:21:37 +00:00
|
|
|
fio_thread->failed = true;
|
|
|
|
return;
|
2017-06-06 17:24:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
f->engine_data = target;
|
|
|
|
|
bdev/fio_plugin: add support for --zone_append
Add support for using zone append commands instead of write command,
for bdevs that support it.
In the SPDK NVMe plugin, the target struct (called spdk_fio_qpair) is
allocated in .setup(), but the qpair is first created in .open_file.
In the SPDK bdev plugin, the target struct (called spdk_fio_target)
is allocated in spdk_fio_bdev_open(). This function also creates the
I/O channel. (spdk_fio_bdev_open() is called by .init().)
Since certain options has to be saved in the struct spdk_fio_target,
which is allocated quite late, we cannot reuse spdk_fio_handle_options(),
which has to be called before threads are created.
Therefore, we unfortunately need another option parsing function in the
SPDK bdev plugin.
If we fail to handle any of the per target options, the target is never
added to the list of targets, and is freed immediately.
Signed-off-by: Niklas Cassel <niklas.cassel@wdc.com>
Change-Id: Iacd1924c8f0d3d324f2a6f1b0a54f86459f39d31
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/7727
Community-CI: Broadcom CI
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
2021-05-03 15:27:21 +00:00
|
|
|
rc = spdk_fio_handle_options_per_target(td, f);
|
|
|
|
if (rc) {
|
|
|
|
SPDK_ERRLOG("Failed to handle options for: %s\n", f->file_name);
|
|
|
|
f->engine_data = NULL;
|
|
|
|
spdk_put_io_channel(target->ch);
|
|
|
|
spdk_bdev_close(target->desc);
|
|
|
|
free(target);
|
|
|
|
fio_thread->failed = true;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-06-06 17:24:07 +00:00
|
|
|
TAILQ_INSERT_TAIL(&fio_thread->targets, target, link);
|
|
|
|
}
|
2018-12-07 21:21:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Called for each thread, on that thread, shortly after the thread
|
|
|
|
* starts.
|
2021-05-03 10:49:15 +00:00
|
|
|
*
|
|
|
|
* Also called by spdk_fio_report_zones(), since we need an I/O channel
|
|
|
|
* in order to get the zone report. (fio calls the .report_zones callback
|
|
|
|
* before it calls the .init callback.)
|
|
|
|
* Therefore, if fio was run with --zonemode=zbd, the thread will already
|
|
|
|
* be initialized by the time that fio calls the .init callback.
|
2018-12-07 21:21:37 +00:00
|
|
|
*/
|
|
|
|
static int
|
|
|
|
spdk_fio_init(struct thread_data *td)
|
|
|
|
{
|
|
|
|
struct spdk_fio_thread *fio_thread;
|
2021-05-04 07:11:53 +00:00
|
|
|
int rc;
|
2018-12-07 21:21:37 +00:00
|
|
|
|
2021-05-03 10:49:15 +00:00
|
|
|
/* If thread has already been initialized, do nothing. */
|
|
|
|
if (td->io_ops_data) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-05-04 07:11:53 +00:00
|
|
|
rc = spdk_fio_init_thread(td);
|
|
|
|
if (rc) {
|
|
|
|
return rc;
|
|
|
|
}
|
2018-12-07 21:21:37 +00:00
|
|
|
|
|
|
|
fio_thread = td->io_ops_data;
|
2021-05-04 07:11:53 +00:00
|
|
|
assert(fio_thread);
|
2018-12-07 21:21:37 +00:00
|
|
|
fio_thread->failed = false;
|
|
|
|
|
|
|
|
spdk_thread_send_msg(fio_thread->thread, spdk_fio_bdev_open, td);
|
|
|
|
|
|
|
|
while (spdk_fio_poll_thread(fio_thread) > 0) {}
|
|
|
|
|
|
|
|
if (fio_thread->failed) {
|
|
|
|
return -1;
|
|
|
|
}
|
2017-06-06 17:24:07 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-11-08 14:53:32 +00:00
|
|
|
static void
|
|
|
|
spdk_fio_cleanup(struct thread_data *td)
|
|
|
|
{
|
|
|
|
struct spdk_fio_thread *fio_thread = td->io_ops_data;
|
2017-06-06 17:24:07 +00:00
|
|
|
|
2017-11-08 14:53:32 +00:00
|
|
|
spdk_fio_cleanup_thread(fio_thread);
|
2017-06-06 17:24:07 +00:00
|
|
|
td->io_ops_data = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
spdk_fio_open(struct thread_data *td, struct fio_file *f)
|
|
|
|
{
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
spdk_fio_close(struct thread_data *td, struct fio_file *f)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
spdk_fio_iomem_alloc(struct thread_data *td, size_t total_mem)
|
|
|
|
{
|
|
|
|
td->orig_buffer = spdk_dma_zmalloc(total_mem, 0x1000, NULL);
|
|
|
|
return td->orig_buffer == NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
spdk_fio_iomem_free(struct thread_data *td)
|
|
|
|
{
|
|
|
|
spdk_dma_free(td->orig_buffer);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
spdk_fio_io_u_init(struct thread_data *td, struct io_u *io_u)
|
|
|
|
{
|
|
|
|
struct spdk_fio_request *fio_req;
|
|
|
|
|
2019-10-31 21:33:36 +00:00
|
|
|
io_u->engine_data = NULL;
|
|
|
|
|
2017-06-06 17:24:07 +00:00
|
|
|
fio_req = calloc(1, sizeof(*fio_req));
|
|
|
|
if (fio_req == NULL) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
fio_req->io = io_u;
|
|
|
|
fio_req->td = td;
|
|
|
|
|
|
|
|
io_u->engine_data = fio_req;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
spdk_fio_io_u_free(struct thread_data *td, struct io_u *io_u)
|
|
|
|
{
|
|
|
|
struct spdk_fio_request *fio_req = io_u->engine_data;
|
|
|
|
|
|
|
|
if (fio_req) {
|
|
|
|
assert(fio_req->io == io_u);
|
|
|
|
free(fio_req);
|
|
|
|
io_u->engine_data = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
spdk_fio_completion_cb(struct spdk_bdev_io *bdev_io,
|
|
|
|
bool success,
|
|
|
|
void *cb_arg)
|
|
|
|
{
|
|
|
|
struct spdk_fio_request *fio_req = cb_arg;
|
|
|
|
struct thread_data *td = fio_req->td;
|
|
|
|
struct spdk_fio_thread *fio_thread = td->io_ops_data;
|
|
|
|
|
|
|
|
assert(fio_thread->iocq_count < fio_thread->iocq_size);
|
2017-12-20 15:44:45 +00:00
|
|
|
fio_req->io->error = success ? 0 : EIO;
|
2017-06-06 17:24:07 +00:00
|
|
|
fio_thread->iocq[fio_thread->iocq_count++] = fio_req->io;
|
|
|
|
|
|
|
|
spdk_bdev_free_io(bdev_io);
|
|
|
|
}
|
|
|
|
|
2018-06-05 09:11:24 +00:00
|
|
|
#if FIO_IOOPS_VERSION >= 24
|
|
|
|
typedef enum fio_q_status fio_q_status_t;
|
|
|
|
#else
|
|
|
|
typedef int fio_q_status_t;
|
|
|
|
#endif
|
|
|
|
|
bdev/fio_plugin: add support for --zone_append
Add support for using zone append commands instead of write command,
for bdevs that support it.
In the SPDK NVMe plugin, the target struct (called spdk_fio_qpair) is
allocated in .setup(), but the qpair is first created in .open_file.
In the SPDK bdev plugin, the target struct (called spdk_fio_target)
is allocated in spdk_fio_bdev_open(). This function also creates the
I/O channel. (spdk_fio_bdev_open() is called by .init().)
Since certain options has to be saved in the struct spdk_fio_target,
which is allocated quite late, we cannot reuse spdk_fio_handle_options(),
which has to be called before threads are created.
Therefore, we unfortunately need another option parsing function in the
SPDK bdev plugin.
If we fail to handle any of the per target options, the target is never
added to the list of targets, and is freed immediately.
Signed-off-by: Niklas Cassel <niklas.cassel@wdc.com>
Change-Id: Iacd1924c8f0d3d324f2a6f1b0a54f86459f39d31
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/7727
Community-CI: Broadcom CI
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
2021-05-03 15:27:21 +00:00
|
|
|
static uint64_t
|
|
|
|
spdk_fio_zone_bytes_to_blocks(struct spdk_bdev *bdev, uint64_t offset_bytes, uint64_t *zone_start,
|
|
|
|
uint64_t num_bytes, uint64_t *num_blocks)
|
|
|
|
{
|
|
|
|
uint32_t block_size = spdk_bdev_get_block_size(bdev);
|
2021-11-11 01:21:20 +00:00
|
|
|
*zone_start = spdk_bdev_get_zone_id(bdev, offset_bytes / block_size);
|
bdev/fio_plugin: add support for --zone_append
Add support for using zone append commands instead of write command,
for bdevs that support it.
In the SPDK NVMe plugin, the target struct (called spdk_fio_qpair) is
allocated in .setup(), but the qpair is first created in .open_file.
In the SPDK bdev plugin, the target struct (called spdk_fio_target)
is allocated in spdk_fio_bdev_open(). This function also creates the
I/O channel. (spdk_fio_bdev_open() is called by .init().)
Since certain options has to be saved in the struct spdk_fio_target,
which is allocated quite late, we cannot reuse spdk_fio_handle_options(),
which has to be called before threads are created.
Therefore, we unfortunately need another option parsing function in the
SPDK bdev plugin.
If we fail to handle any of the per target options, the target is never
added to the list of targets, and is freed immediately.
Signed-off-by: Niklas Cassel <niklas.cassel@wdc.com>
Change-Id: Iacd1924c8f0d3d324f2a6f1b0a54f86459f39d31
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/7727
Community-CI: Broadcom CI
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
2021-05-03 15:27:21 +00:00
|
|
|
*num_blocks = num_bytes / block_size;
|
|
|
|
return (offset_bytes % block_size) | (num_bytes % block_size);
|
|
|
|
}
|
|
|
|
|
2018-06-05 09:11:24 +00:00
|
|
|
static fio_q_status_t
|
2017-06-06 17:24:07 +00:00
|
|
|
spdk_fio_queue(struct thread_data *td, struct io_u *io_u)
|
|
|
|
{
|
|
|
|
int rc = 1;
|
|
|
|
struct spdk_fio_request *fio_req = io_u->engine_data;
|
|
|
|
struct spdk_fio_target *target = io_u->file->engine_data;
|
|
|
|
|
|
|
|
assert(fio_req->td == td);
|
|
|
|
|
|
|
|
if (!target) {
|
|
|
|
SPDK_ERRLOG("Unable to look up correct I/O target.\n");
|
2017-12-20 15:44:45 +00:00
|
|
|
fio_req->io->error = ENODEV;
|
|
|
|
return FIO_Q_COMPLETED;
|
2017-06-06 17:24:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
switch (io_u->ddir) {
|
|
|
|
case DDIR_READ:
|
|
|
|
rc = spdk_bdev_read(target->desc, target->ch,
|
|
|
|
io_u->buf, io_u->offset, io_u->xfer_buflen,
|
|
|
|
spdk_fio_completion_cb, fio_req);
|
|
|
|
break;
|
|
|
|
case DDIR_WRITE:
|
bdev/fio_plugin: add support for --zone_append
Add support for using zone append commands instead of write command,
for bdevs that support it.
In the SPDK NVMe plugin, the target struct (called spdk_fio_qpair) is
allocated in .setup(), but the qpair is first created in .open_file.
In the SPDK bdev plugin, the target struct (called spdk_fio_target)
is allocated in spdk_fio_bdev_open(). This function also creates the
I/O channel. (spdk_fio_bdev_open() is called by .init().)
Since certain options has to be saved in the struct spdk_fio_target,
which is allocated quite late, we cannot reuse spdk_fio_handle_options(),
which has to be called before threads are created.
Therefore, we unfortunately need another option parsing function in the
SPDK bdev plugin.
If we fail to handle any of the per target options, the target is never
added to the list of targets, and is freed immediately.
Signed-off-by: Niklas Cassel <niklas.cassel@wdc.com>
Change-Id: Iacd1924c8f0d3d324f2a6f1b0a54f86459f39d31
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/7727
Community-CI: Broadcom CI
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
2021-05-03 15:27:21 +00:00
|
|
|
if (!target->zone_append_enabled) {
|
|
|
|
rc = spdk_bdev_write(target->desc, target->ch,
|
|
|
|
io_u->buf, io_u->offset, io_u->xfer_buflen,
|
|
|
|
spdk_fio_completion_cb, fio_req);
|
|
|
|
} else {
|
|
|
|
uint64_t zone_start, num_blocks;
|
|
|
|
if (spdk_fio_zone_bytes_to_blocks(target->bdev, io_u->offset, &zone_start,
|
|
|
|
io_u->xfer_buflen, &num_blocks) != 0) {
|
|
|
|
rc = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
rc = spdk_bdev_zone_append(target->desc, target->ch, io_u->buf,
|
|
|
|
zone_start, num_blocks, spdk_fio_completion_cb,
|
|
|
|
fio_req);
|
|
|
|
}
|
2017-06-06 17:24:07 +00:00
|
|
|
break;
|
2017-07-19 21:55:03 +00:00
|
|
|
case DDIR_TRIM:
|
|
|
|
rc = spdk_bdev_unmap(target->desc, target->ch,
|
|
|
|
io_u->offset, io_u->xfer_buflen,
|
|
|
|
spdk_fio_completion_cb, fio_req);
|
|
|
|
break;
|
2020-12-11 13:41:47 +00:00
|
|
|
case DDIR_SYNC:
|
|
|
|
rc = spdk_bdev_flush(target->desc, target->ch,
|
|
|
|
io_u->offset, io_u->xfer_buflen,
|
|
|
|
spdk_fio_completion_cb, fio_req);
|
|
|
|
break;
|
2017-06-06 17:24:07 +00:00
|
|
|
default:
|
|
|
|
assert(false);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rc == -ENOMEM) {
|
|
|
|
return FIO_Q_BUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rc != 0) {
|
2017-12-20 15:44:45 +00:00
|
|
|
fio_req->io->error = abs(rc);
|
|
|
|
return FIO_Q_COMPLETED;
|
2017-06-06 17:24:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return FIO_Q_QUEUED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct io_u *
|
|
|
|
spdk_fio_event(struct thread_data *td, int event)
|
|
|
|
{
|
|
|
|
struct spdk_fio_thread *fio_thread = td->io_ops_data;
|
|
|
|
|
|
|
|
assert(event >= 0);
|
|
|
|
assert((unsigned)event < fio_thread->iocq_count);
|
|
|
|
return fio_thread->iocq[event];
|
|
|
|
}
|
|
|
|
|
2017-11-07 19:23:21 +00:00
|
|
|
static size_t
|
|
|
|
spdk_fio_poll_thread(struct spdk_fio_thread *fio_thread)
|
|
|
|
{
|
2019-02-06 20:19:39 +00:00
|
|
|
return spdk_thread_poll(fio_thread->thread, 0, 0);
|
2017-11-07 19:23:21 +00:00
|
|
|
}
|
|
|
|
|
2017-06-06 17:24:07 +00:00
|
|
|
static int
|
|
|
|
spdk_fio_getevents(struct thread_data *td, unsigned int min,
|
|
|
|
unsigned int max, const struct timespec *t)
|
|
|
|
{
|
|
|
|
struct spdk_fio_thread *fio_thread = td->io_ops_data;
|
|
|
|
struct timespec t0, t1;
|
|
|
|
uint64_t timeout = 0;
|
|
|
|
|
|
|
|
if (t) {
|
2018-11-22 08:50:28 +00:00
|
|
|
timeout = t->tv_sec * SPDK_SEC_TO_NSEC + t->tv_nsec;
|
2017-06-06 17:24:07 +00:00
|
|
|
clock_gettime(CLOCK_MONOTONIC_RAW, &t0);
|
|
|
|
}
|
|
|
|
|
|
|
|
fio_thread->iocq_count = 0;
|
|
|
|
|
|
|
|
for (;;) {
|
2017-11-07 19:23:21 +00:00
|
|
|
spdk_fio_poll_thread(fio_thread);
|
2017-06-06 17:24:07 +00:00
|
|
|
|
|
|
|
if (fio_thread->iocq_count >= min) {
|
|
|
|
return fio_thread->iocq_count;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (t) {
|
|
|
|
clock_gettime(CLOCK_MONOTONIC_RAW, &t1);
|
2018-11-22 08:50:28 +00:00
|
|
|
uint64_t elapse = ((t1.tv_sec - t0.tv_sec) * SPDK_SEC_TO_NSEC)
|
2017-06-06 17:24:07 +00:00
|
|
|
+ t1.tv_nsec - t0.tv_nsec;
|
|
|
|
if (elapse > timeout) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return fio_thread->iocq_count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
spdk_fio_invalidate(struct thread_data *td, struct fio_file *f)
|
|
|
|
{
|
|
|
|
/* TODO: This should probably send a flush to the device, but for now just return successful. */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-05-03 10:49:15 +00:00
|
|
|
#if FIO_HAS_ZBD
|
|
|
|
static int
|
|
|
|
spdk_fio_get_zoned_model(struct thread_data *td, struct fio_file *f, enum zbd_zoned_model *model)
|
|
|
|
{
|
|
|
|
struct spdk_bdev *bdev;
|
|
|
|
|
bdev/fio_plugin: spdk_fio_get_zoned_model() is too permissive
The .get_zoned_model() callback is supposed to reject unsupported
file types. Right now, we do not reject unsupported file types.
For our specific ioengine, this isn't strictly needed, since our
ioengine unconditionally sets f->filetype to FIO_TYPE_BLOCK, and if
it fails to find a SPDK bdev that matches the --filename, it will
return an error that it couldn't find the bdev matching filename.
However, all .get_zoned_model() callbacks in the fio in-tree ioengines
have a check that a given file has a file type that is supported by
the ioengine itself. This is needed since they do not set f->filetype
themselves, but instead let fio generic code initialize f->filetype.
Since we reuse --filename to mean something in the SPDK namespace, we
are force to initialize filetype manually. So that is the only reason
why we know that the file type will be FIO_TYPE_BLOCK. Anyway, let's
try to keep our code as similar to the in-tree ioengines as possible.
The SPDK nvme ioengine already has this check, so adding it in the
SPDK bdev ioengine makes our ioengines more consistent as well.
Signed-off-by: Niklas Cassel <niklas.cassel@wdc.com>
Change-Id: Ib5e19c738dea0f8d41641b63d0fabe055a930827
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8329
Community-CI: Mellanox Build Bot
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Paul Luse <paul.e.luse@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
2021-06-07 11:29:34 +00:00
|
|
|
if (f->filetype != FIO_TYPE_BLOCK) {
|
|
|
|
SPDK_ERRLOG("Unsupported filetype: %d\n", f->filetype);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2021-05-03 10:49:15 +00:00
|
|
|
bdev = spdk_bdev_get_by_name(f->file_name);
|
|
|
|
if (!bdev) {
|
|
|
|
SPDK_ERRLOG("Cannot get zoned model, no bdev with name: %s\n", f->file_name);
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (spdk_bdev_is_zoned(bdev)) {
|
|
|
|
*model = ZBD_HOST_MANAGED;
|
|
|
|
} else {
|
|
|
|
*model = ZBD_NONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
spdk_fio_bdev_get_zone_info_done(struct spdk_bdev_io *bdev_io, bool success, void *arg)
|
|
|
|
{
|
|
|
|
struct spdk_fio_zone_cb_arg *cb_arg = arg;
|
|
|
|
unsigned int i;
|
|
|
|
int handled_zones = 0;
|
|
|
|
|
|
|
|
if (!success) {
|
|
|
|
spdk_bdev_free_io(bdev_io);
|
|
|
|
cb_arg->completed = -EIO;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < cb_arg->nr_zones; i++) {
|
|
|
|
struct spdk_bdev_zone_info *zone_src = &cb_arg->spdk_zones[handled_zones];
|
|
|
|
struct zbd_zone *zone_dest = &cb_arg->fio_zones[handled_zones];
|
|
|
|
uint32_t block_size = spdk_bdev_get_block_size(cb_arg->target->bdev);
|
|
|
|
|
|
|
|
zone_dest->type = ZBD_ZONE_TYPE_SWR;
|
|
|
|
zone_dest->len = spdk_bdev_get_zone_size(cb_arg->target->bdev) * block_size;
|
|
|
|
zone_dest->capacity = zone_src->capacity * block_size;
|
|
|
|
zone_dest->start = zone_src->zone_id * block_size;
|
|
|
|
zone_dest->wp = zone_src->write_pointer * block_size;
|
|
|
|
|
|
|
|
switch (zone_src->state) {
|
|
|
|
case SPDK_BDEV_ZONE_STATE_EMPTY:
|
|
|
|
zone_dest->cond = ZBD_ZONE_COND_EMPTY;
|
|
|
|
break;
|
|
|
|
case SPDK_BDEV_ZONE_STATE_IMP_OPEN:
|
|
|
|
zone_dest->cond = ZBD_ZONE_COND_IMP_OPEN;
|
|
|
|
break;
|
|
|
|
case SPDK_BDEV_ZONE_STATE_EXP_OPEN:
|
|
|
|
zone_dest->cond = ZBD_ZONE_COND_EXP_OPEN;
|
|
|
|
break;
|
|
|
|
case SPDK_BDEV_ZONE_STATE_FULL:
|
|
|
|
zone_dest->cond = ZBD_ZONE_COND_FULL;
|
|
|
|
break;
|
|
|
|
case SPDK_BDEV_ZONE_STATE_CLOSED:
|
|
|
|
zone_dest->cond = ZBD_ZONE_COND_CLOSED;
|
|
|
|
break;
|
|
|
|
case SPDK_BDEV_ZONE_STATE_READ_ONLY:
|
|
|
|
zone_dest->cond = ZBD_ZONE_COND_READONLY;
|
|
|
|
break;
|
|
|
|
case SPDK_BDEV_ZONE_STATE_OFFLINE:
|
|
|
|
zone_dest->cond = ZBD_ZONE_COND_OFFLINE;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
spdk_bdev_free_io(bdev_io);
|
|
|
|
cb_arg->completed = -EIO;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
handled_zones++;
|
|
|
|
}
|
|
|
|
|
|
|
|
spdk_bdev_free_io(bdev_io);
|
|
|
|
cb_arg->completed = handled_zones;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
spdk_fio_bdev_get_zone_info(void *arg)
|
|
|
|
{
|
|
|
|
struct spdk_fio_zone_cb_arg *cb_arg = arg;
|
|
|
|
struct spdk_fio_target *target = cb_arg->target;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = spdk_bdev_get_zone_info(target->desc, target->ch, cb_arg->offset_blocks,
|
|
|
|
cb_arg->nr_zones, cb_arg->spdk_zones,
|
|
|
|
spdk_fio_bdev_get_zone_info_done, cb_arg);
|
|
|
|
if (rc < 0) {
|
|
|
|
cb_arg->completed = rc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
spdk_fio_report_zones(struct thread_data *td, struct fio_file *f, uint64_t offset,
|
|
|
|
struct zbd_zone *zones, unsigned int nr_zones)
|
|
|
|
{
|
|
|
|
struct spdk_fio_target *target;
|
|
|
|
struct spdk_fio_thread *fio_thread;
|
|
|
|
struct spdk_fio_zone_cb_arg cb_arg;
|
|
|
|
uint32_t block_size;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (nr_zones == 0) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* spdk_fio_report_zones() is only called before the bdev I/O channels have been created.
|
|
|
|
* Since we need an I/O channel for report_zones(), call spdk_fio_init() to initialize
|
|
|
|
* the thread early.
|
|
|
|
* spdk_fio_report_zones() might be called several times by fio, if e.g. the zone report
|
|
|
|
* for all zones does not fit in the buffer that fio has allocated for the zone report.
|
|
|
|
* It is safe to call spdk_fio_init(), even if the thread has already been initialized.
|
|
|
|
*/
|
|
|
|
rc = spdk_fio_init(td);
|
|
|
|
if (rc) {
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
fio_thread = td->io_ops_data;
|
|
|
|
target = f->engine_data;
|
|
|
|
|
|
|
|
assert(fio_thread);
|
|
|
|
assert(target);
|
|
|
|
|
|
|
|
block_size = spdk_bdev_get_block_size(target->bdev);
|
|
|
|
|
|
|
|
cb_arg.target = target;
|
|
|
|
cb_arg.completed = 0;
|
|
|
|
cb_arg.offset_blocks = offset / block_size;
|
|
|
|
cb_arg.fio_zones = zones;
|
|
|
|
cb_arg.nr_zones = spdk_min(nr_zones, spdk_bdev_get_num_zones(target->bdev));
|
|
|
|
|
|
|
|
cb_arg.spdk_zones = calloc(1, sizeof(*cb_arg.spdk_zones) * cb_arg.nr_zones);
|
|
|
|
if (!cb_arg.spdk_zones) {
|
|
|
|
SPDK_ERRLOG("Could not allocate memory for zone report!\n");
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto cleanup_thread;
|
|
|
|
}
|
|
|
|
|
|
|
|
spdk_thread_send_msg(fio_thread->thread, spdk_fio_bdev_get_zone_info, &cb_arg);
|
|
|
|
do {
|
|
|
|
spdk_fio_poll_thread(fio_thread);
|
|
|
|
} while (!cb_arg.completed);
|
|
|
|
|
|
|
|
/* Free cb_arg.spdk_zones. The report in fio format is stored in cb_arg.fio_zones/zones. */
|
|
|
|
free(cb_arg.spdk_zones);
|
|
|
|
|
|
|
|
rc = cb_arg.completed;
|
|
|
|
if (rc < 0) {
|
|
|
|
SPDK_ERRLOG("Failed to get zone info: %d\n", rc);
|
|
|
|
goto cleanup_thread;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return the amount of zones successfully copied. */
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
cleanup_thread:
|
|
|
|
spdk_fio_cleanup(td);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
spdk_fio_bdev_zone_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *arg)
|
|
|
|
{
|
|
|
|
struct spdk_fio_zone_cb_arg *cb_arg = arg;
|
|
|
|
|
|
|
|
spdk_bdev_free_io(bdev_io);
|
|
|
|
|
|
|
|
if (!success) {
|
|
|
|
cb_arg->completed = -EIO;
|
|
|
|
} else {
|
|
|
|
cb_arg->completed = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
spdk_fio_bdev_zone_reset(void *arg)
|
|
|
|
{
|
|
|
|
struct spdk_fio_zone_cb_arg *cb_arg = arg;
|
|
|
|
struct spdk_fio_target *target = cb_arg->target;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = spdk_bdev_zone_management(target->desc, target->ch, cb_arg->offset_blocks,
|
|
|
|
SPDK_BDEV_ZONE_RESET,
|
|
|
|
spdk_fio_bdev_zone_reset_done, cb_arg);
|
|
|
|
if (rc < 0) {
|
|
|
|
cb_arg->completed = rc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
spdk_fio_reset_zones(struct spdk_fio_thread *fio_thread, struct spdk_fio_target *target,
|
|
|
|
uint64_t offset, uint64_t length)
|
|
|
|
{
|
|
|
|
uint64_t zone_size_bytes;
|
|
|
|
uint32_t block_size;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
assert(fio_thread);
|
|
|
|
assert(target);
|
|
|
|
|
|
|
|
block_size = spdk_bdev_get_block_size(target->bdev);
|
|
|
|
zone_size_bytes = spdk_bdev_get_zone_size(target->bdev) * block_size;
|
|
|
|
|
|
|
|
for (uint64_t cur = offset; cur < offset + length; cur += zone_size_bytes) {
|
|
|
|
struct spdk_fio_zone_cb_arg cb_arg = {
|
|
|
|
.target = target,
|
|
|
|
.completed = 0,
|
|
|
|
.offset_blocks = cur / block_size,
|
|
|
|
};
|
|
|
|
|
|
|
|
spdk_thread_send_msg(fio_thread->thread, spdk_fio_bdev_zone_reset, &cb_arg);
|
|
|
|
do {
|
|
|
|
spdk_fio_poll_thread(fio_thread);
|
|
|
|
} while (!cb_arg.completed);
|
|
|
|
|
|
|
|
rc = cb_arg.completed;
|
|
|
|
if (rc < 0) {
|
|
|
|
SPDK_ERRLOG("Failed to reset zone: %d\n", rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
spdk_fio_reset_wp(struct thread_data *td, struct fio_file *f, uint64_t offset, uint64_t length)
|
|
|
|
{
|
|
|
|
return spdk_fio_reset_zones(td->io_ops_data, f->engine_data, offset, length);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2021-05-17 08:55:21 +00:00
|
|
|
#if FIO_IOOPS_VERSION >= 30
|
|
|
|
static int spdk_fio_get_max_open_zones(struct thread_data *td, struct fio_file *f,
|
|
|
|
unsigned int *max_open_zones)
|
|
|
|
{
|
|
|
|
struct spdk_bdev *bdev;
|
|
|
|
|
|
|
|
bdev = spdk_bdev_get_by_name(f->file_name);
|
|
|
|
if (!bdev) {
|
|
|
|
SPDK_ERRLOG("Cannot get max open zones, no bdev with name: %s\n", f->file_name);
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
*max_open_zones = spdk_bdev_get_max_open_zones(bdev);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2021-05-03 14:48:25 +00:00
|
|
|
static int
|
|
|
|
spdk_fio_handle_options(struct thread_data *td, struct fio_file *f, struct spdk_bdev *bdev)
|
|
|
|
{
|
|
|
|
struct spdk_fio_options *fio_options = td->eo;
|
|
|
|
|
|
|
|
if (fio_options->initial_zone_reset && spdk_bdev_is_zoned(bdev)) {
|
|
|
|
#if FIO_HAS_ZBD
|
|
|
|
int rc = spdk_fio_init(td);
|
|
|
|
if (rc) {
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
rc = spdk_fio_reset_zones(td->io_ops_data, f->engine_data, 0, f->real_file_size);
|
|
|
|
if (rc) {
|
|
|
|
spdk_fio_cleanup(td);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
SPDK_ERRLOG("fio version is too old to support zoned block devices\n");
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
bdev/fio_plugin: add support for --zone_append
Add support for using zone append commands instead of write command,
for bdevs that support it.
In the SPDK NVMe plugin, the target struct (called spdk_fio_qpair) is
allocated in .setup(), but the qpair is first created in .open_file.
In the SPDK bdev plugin, the target struct (called spdk_fio_target)
is allocated in spdk_fio_bdev_open(). This function also creates the
I/O channel. (spdk_fio_bdev_open() is called by .init().)
Since certain options has to be saved in the struct spdk_fio_target,
which is allocated quite late, we cannot reuse spdk_fio_handle_options(),
which has to be called before threads are created.
Therefore, we unfortunately need another option parsing function in the
SPDK bdev plugin.
If we fail to handle any of the per target options, the target is never
added to the list of targets, and is freed immediately.
Signed-off-by: Niklas Cassel <niklas.cassel@wdc.com>
Change-Id: Iacd1924c8f0d3d324f2a6f1b0a54f86459f39d31
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/7727
Community-CI: Broadcom CI
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
2021-05-03 15:27:21 +00:00
|
|
|
static int
|
|
|
|
spdk_fio_handle_options_per_target(struct thread_data *td, struct fio_file *f)
|
|
|
|
{
|
|
|
|
struct spdk_fio_target *target = f->engine_data;
|
|
|
|
struct spdk_fio_options *fio_options = td->eo;
|
|
|
|
|
|
|
|
if (fio_options->zone_append && spdk_bdev_is_zoned(target->bdev)) {
|
|
|
|
if (spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_ZONE_APPEND)) {
|
|
|
|
SPDK_DEBUGLOG(fio_bdev, "Using zone appends instead of writes on: '%s'\n",
|
|
|
|
f->file_name);
|
|
|
|
target->zone_append_enabled = true;
|
|
|
|
} else {
|
|
|
|
SPDK_WARNLOG("Falling back to writes on: '%s' - bdev lacks zone append cmd\n",
|
|
|
|
f->file_name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-06-06 17:24:07 +00:00
|
|
|
static struct fio_option options[] = {
|
|
|
|
{
|
|
|
|
.name = "spdk_conf",
|
|
|
|
.lname = "SPDK configuration file",
|
|
|
|
.type = FIO_OPT_STR_STORE,
|
|
|
|
.off1 = offsetof(struct spdk_fio_options, conf),
|
2020-08-10 14:08:10 +00:00
|
|
|
.help = "A SPDK JSON configuration file",
|
2017-06-06 17:24:07 +00:00
|
|
|
.category = FIO_OPT_C_ENGINE,
|
|
|
|
.group = FIO_OPT_G_INVALID,
|
|
|
|
},
|
2019-08-02 11:51:20 +00:00
|
|
|
{
|
|
|
|
.name = "spdk_json_conf",
|
|
|
|
.lname = "SPDK JSON configuration file",
|
|
|
|
.type = FIO_OPT_STR_STORE,
|
|
|
|
.off1 = offsetof(struct spdk_fio_options, json_conf),
|
|
|
|
.help = "A SPDK JSON configuration file",
|
|
|
|
.category = FIO_OPT_C_ENGINE,
|
|
|
|
.group = FIO_OPT_G_INVALID,
|
|
|
|
},
|
2017-10-02 19:02:02 +00:00
|
|
|
{
|
|
|
|
.name = "spdk_mem",
|
|
|
|
.lname = "SPDK memory in MB",
|
|
|
|
.type = FIO_OPT_INT,
|
|
|
|
.off1 = offsetof(struct spdk_fio_options, mem_mb),
|
2018-03-02 19:49:36 +00:00
|
|
|
.help = "Amount of memory in MB to allocate for SPDK",
|
2017-10-02 19:02:02 +00:00
|
|
|
.category = FIO_OPT_C_ENGINE,
|
|
|
|
.group = FIO_OPT_G_INVALID,
|
|
|
|
},
|
2018-03-19 12:34:42 +00:00
|
|
|
{
|
|
|
|
.name = "spdk_single_seg",
|
|
|
|
.lname = "SPDK switch to create just a single hugetlbfs file",
|
|
|
|
.type = FIO_OPT_BOOL,
|
|
|
|
.off1 = offsetof(struct spdk_fio_options, mem_single_seg),
|
|
|
|
.help = "If set to 1, SPDK will use just a single hugetlbfs file",
|
2021-01-18 16:42:09 +00:00
|
|
|
.def = "0",
|
2018-03-19 12:34:42 +00:00
|
|
|
.category = FIO_OPT_C_ENGINE,
|
|
|
|
.group = FIO_OPT_G_INVALID,
|
|
|
|
},
|
2021-10-14 05:01:28 +00:00
|
|
|
{
|
|
|
|
.name = "log_flags",
|
|
|
|
.lname = "log flags",
|
|
|
|
.type = FIO_OPT_STR_STORE,
|
|
|
|
.off1 = offsetof(struct spdk_fio_options, log_flags),
|
|
|
|
.help = "SPDK log flags to enable",
|
|
|
|
.category = FIO_OPT_C_ENGINE,
|
|
|
|
.group = FIO_OPT_G_INVALID,
|
|
|
|
},
|
2021-05-03 14:48:25 +00:00
|
|
|
{
|
|
|
|
.name = "initial_zone_reset",
|
|
|
|
.lname = "Reset Zones on initialization",
|
|
|
|
.type = FIO_OPT_INT,
|
|
|
|
.off1 = offsetof(struct spdk_fio_options, initial_zone_reset),
|
|
|
|
.def = "0",
|
|
|
|
.help = "Reset Zones on initialization (0=disable, 1=Reset All Zones)",
|
|
|
|
.category = FIO_OPT_C_ENGINE,
|
|
|
|
.group = FIO_OPT_G_INVALID,
|
|
|
|
},
|
bdev/fio_plugin: add support for --zone_append
Add support for using zone append commands instead of write command,
for bdevs that support it.
In the SPDK NVMe plugin, the target struct (called spdk_fio_qpair) is
allocated in .setup(), but the qpair is first created in .open_file.
In the SPDK bdev plugin, the target struct (called spdk_fio_target)
is allocated in spdk_fio_bdev_open(). This function also creates the
I/O channel. (spdk_fio_bdev_open() is called by .init().)
Since certain options has to be saved in the struct spdk_fio_target,
which is allocated quite late, we cannot reuse spdk_fio_handle_options(),
which has to be called before threads are created.
Therefore, we unfortunately need another option parsing function in the
SPDK bdev plugin.
If we fail to handle any of the per target options, the target is never
added to the list of targets, and is freed immediately.
Signed-off-by: Niklas Cassel <niklas.cassel@wdc.com>
Change-Id: Iacd1924c8f0d3d324f2a6f1b0a54f86459f39d31
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/7727
Community-CI: Broadcom CI
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
2021-05-03 15:27:21 +00:00
|
|
|
{
|
|
|
|
.name = "zone_append",
|
|
|
|
.lname = "Use zone append instead of write",
|
|
|
|
.type = FIO_OPT_INT,
|
|
|
|
.off1 = offsetof(struct spdk_fio_options, zone_append),
|
|
|
|
.def = "0",
|
|
|
|
.help = "Use zone append instead of write (1=zone append, 0=write)",
|
|
|
|
.category = FIO_OPT_C_ENGINE,
|
|
|
|
.group = FIO_OPT_G_INVALID,
|
|
|
|
},
|
2017-06-06 17:24:07 +00:00
|
|
|
{
|
|
|
|
.name = NULL,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
/* FIO imports this structure using dlsym */
|
|
|
|
struct ioengine_ops ioengine = {
|
|
|
|
.name = "spdk_bdev",
|
|
|
|
.version = FIO_IOOPS_VERSION,
|
|
|
|
.flags = FIO_RAWIO | FIO_NOEXTEND | FIO_NODISKUTIL | FIO_MEMALIGN,
|
|
|
|
.setup = spdk_fio_setup,
|
|
|
|
.init = spdk_fio_init,
|
2018-10-29 02:45:42 +00:00
|
|
|
/* .prep = unused, */
|
2017-06-06 17:24:07 +00:00
|
|
|
.queue = spdk_fio_queue,
|
2018-10-29 02:45:42 +00:00
|
|
|
/* .commit = unused, */
|
2017-06-06 17:24:07 +00:00
|
|
|
.getevents = spdk_fio_getevents,
|
|
|
|
.event = spdk_fio_event,
|
2018-10-29 02:45:42 +00:00
|
|
|
/* .errdetails = unused, */
|
|
|
|
/* .cancel = unused, */
|
2017-06-06 17:24:07 +00:00
|
|
|
.cleanup = spdk_fio_cleanup,
|
|
|
|
.open_file = spdk_fio_open,
|
|
|
|
.close_file = spdk_fio_close,
|
|
|
|
.invalidate = spdk_fio_invalidate,
|
2018-10-29 02:45:42 +00:00
|
|
|
/* .unlink_file = unused, */
|
|
|
|
/* .get_file_size = unused, */
|
|
|
|
/* .terminate = unused, */
|
2017-06-06 17:24:07 +00:00
|
|
|
.iomem_alloc = spdk_fio_iomem_alloc,
|
|
|
|
.iomem_free = spdk_fio_iomem_free,
|
|
|
|
.io_u_init = spdk_fio_io_u_init,
|
|
|
|
.io_u_free = spdk_fio_io_u_free,
|
2021-05-03 10:49:15 +00:00
|
|
|
#if FIO_HAS_ZBD
|
|
|
|
.get_zoned_model = spdk_fio_get_zoned_model,
|
|
|
|
.report_zones = spdk_fio_report_zones,
|
|
|
|
.reset_wp = spdk_fio_reset_wp,
|
2021-05-17 08:55:21 +00:00
|
|
|
#endif
|
|
|
|
#if FIO_IOOPS_VERSION >= 30
|
|
|
|
.get_max_open_zones = spdk_fio_get_max_open_zones,
|
2021-05-03 10:49:15 +00:00
|
|
|
#endif
|
2017-06-06 17:24:07 +00:00
|
|
|
.option_struct_size = sizeof(struct spdk_fio_options),
|
|
|
|
.options = options,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void fio_init spdk_fio_register(void)
|
|
|
|
{
|
|
|
|
register_ioengine(&ioengine);
|
|
|
|
}
|
|
|
|
|
2017-10-27 09:57:11 +00:00
|
|
|
static void
|
|
|
|
spdk_fio_finish_env(void)
|
|
|
|
{
|
2018-11-02 20:59:11 +00:00
|
|
|
pthread_mutex_lock(&g_init_mtx);
|
2018-12-06 13:16:51 +00:00
|
|
|
g_poll_loop = false;
|
2018-11-02 20:59:11 +00:00
|
|
|
pthread_cond_signal(&g_init_cond);
|
|
|
|
pthread_mutex_unlock(&g_init_mtx);
|
|
|
|
pthread_join(g_init_thread_id, NULL);
|
2018-07-03 16:48:49 +00:00
|
|
|
|
|
|
|
spdk_thread_lib_fini();
|
2017-10-27 09:57:11 +00:00
|
|
|
}
|
|
|
|
|
2017-06-06 17:24:07 +00:00
|
|
|
static void fio_exit spdk_fio_unregister(void)
|
|
|
|
{
|
2017-10-27 09:57:11 +00:00
|
|
|
if (g_spdk_env_initialized) {
|
|
|
|
spdk_fio_finish_env();
|
|
|
|
g_spdk_env_initialized = false;
|
|
|
|
}
|
2017-06-06 17:24:07 +00:00
|
|
|
unregister_ioengine(&ioengine);
|
|
|
|
}
|
bdev/fio_plugin: add support for --zone_append
Add support for using zone append commands instead of write command,
for bdevs that support it.
In the SPDK NVMe plugin, the target struct (called spdk_fio_qpair) is
allocated in .setup(), but the qpair is first created in .open_file.
In the SPDK bdev plugin, the target struct (called spdk_fio_target)
is allocated in spdk_fio_bdev_open(). This function also creates the
I/O channel. (spdk_fio_bdev_open() is called by .init().)
Since certain options has to be saved in the struct spdk_fio_target,
which is allocated quite late, we cannot reuse spdk_fio_handle_options(),
which has to be called before threads are created.
Therefore, we unfortunately need another option parsing function in the
SPDK bdev plugin.
If we fail to handle any of the per target options, the target is never
added to the list of targets, and is freed immediately.
Signed-off-by: Niklas Cassel <niklas.cassel@wdc.com>
Change-Id: Iacd1924c8f0d3d324f2a6f1b0a54f86459f39d31
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/7727
Community-CI: Broadcom CI
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
2021-05-03 15:27:21 +00:00
|
|
|
|
|
|
|
SPDK_LOG_REGISTER_COMPONENT(fio_bdev)
|