2022-06-03 19:15:11 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
2022-11-01 20:26:26 +00:00
|
|
|
* Copyright (C) 2016 Intel Corporation. All rights reserved.
|
2021-05-14 12:54:02 +00:00
|
|
|
* Copyright (c) 2018-2019, 2021 Mellanox Technologies LTD. All rights reserved.
|
2023-03-21 18:53:58 +00:00
|
|
|
* Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
2016-06-06 21:44:30 +00:00
|
|
|
*/
|
|
|
|
|
2017-05-02 18:18:25 +00:00
|
|
|
#include "spdk/stdinc.h"
|
2016-06-06 21:44:30 +00:00
|
|
|
|
2017-08-29 20:22:37 +00:00
|
|
|
#include "nvmf_internal.h"
|
2017-05-02 18:18:25 +00:00
|
|
|
#include "transport.h"
|
2016-06-06 21:44:30 +00:00
|
|
|
|
2018-09-27 19:38:15 +00:00
|
|
|
#include "spdk/config.h"
|
2016-07-14 22:25:23 +00:00
|
|
|
#include "spdk/log.h"
|
2016-09-19 17:01:52 +00:00
|
|
|
#include "spdk/nvmf.h"
|
2020-01-30 20:34:06 +00:00
|
|
|
#include "spdk/nvmf_transport.h"
|
2016-07-14 22:25:23 +00:00
|
|
|
#include "spdk/queue.h"
|
2017-03-03 20:44:04 +00:00
|
|
|
#include "spdk/util.h"
|
2021-03-31 07:37:25 +00:00
|
|
|
#include "spdk_internal/usdt.h"
|
2016-07-01 20:18:24 +00:00
|
|
|
|
2019-12-10 20:12:55 +00:00
|
|
|
#define MAX_MEMPOOL_NAME_LENGTH 40
|
2020-08-20 05:54:03 +00:00
|
|
|
#define NVMF_TRANSPORT_DEFAULT_ASSOCIATION_TIMEOUT_IN_MS 120000
|
2019-12-10 20:12:55 +00:00
|
|
|
|
|
|
|
struct nvmf_transport_ops_list_element {
|
|
|
|
struct spdk_nvmf_transport_ops ops;
|
|
|
|
TAILQ_ENTRY(nvmf_transport_ops_list_element) link;
|
2016-07-14 22:25:23 +00:00
|
|
|
};
|
2016-07-01 20:18:24 +00:00
|
|
|
|
2019-12-10 20:12:55 +00:00
|
|
|
TAILQ_HEAD(nvmf_transport_ops_list, nvmf_transport_ops_list_element)
|
|
|
|
g_spdk_nvmf_transport_ops = TAILQ_HEAD_INITIALIZER(g_spdk_nvmf_transport_ops);
|
2016-06-06 21:44:30 +00:00
|
|
|
|
2018-08-22 23:04:16 +00:00
|
|
|
static inline const struct spdk_nvmf_transport_ops *
|
2020-05-10 05:53:39 +00:00
|
|
|
nvmf_get_transport_ops(const char *transport_name)
|
2018-08-22 23:04:16 +00:00
|
|
|
{
|
2019-12-10 20:12:55 +00:00
|
|
|
struct nvmf_transport_ops_list_element *ops;
|
|
|
|
TAILQ_FOREACH(ops, &g_spdk_nvmf_transport_ops, link) {
|
|
|
|
if (strcasecmp(transport_name, ops->ops.name) == 0) {
|
|
|
|
return &ops->ops;
|
2018-08-22 23:04:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-12-10 20:12:55 +00:00
|
|
|
void
|
|
|
|
spdk_nvmf_transport_register(const struct spdk_nvmf_transport_ops *ops)
|
|
|
|
{
|
|
|
|
struct nvmf_transport_ops_list_element *new_ops;
|
|
|
|
|
2020-05-10 05:53:39 +00:00
|
|
|
if (nvmf_get_transport_ops(ops->name) != NULL) {
|
2019-12-10 20:12:55 +00:00
|
|
|
SPDK_ERRLOG("Double registering nvmf transport type %s.\n", ops->name);
|
|
|
|
assert(false);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
new_ops = calloc(1, sizeof(*new_ops));
|
|
|
|
if (new_ops == NULL) {
|
|
|
|
SPDK_ERRLOG("Unable to allocate memory to register new transport type %s.\n", ops->name);
|
|
|
|
assert(false);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
new_ops->ops = *ops;
|
|
|
|
|
|
|
|
TAILQ_INSERT_TAIL(&g_spdk_nvmf_transport_ops, new_ops, link);
|
|
|
|
}
|
|
|
|
|
2018-10-23 21:37:22 +00:00
|
|
|
const struct spdk_nvmf_transport_opts *
|
|
|
|
spdk_nvmf_get_transport_opts(struct spdk_nvmf_transport *transport)
|
|
|
|
{
|
|
|
|
return &transport->opts;
|
|
|
|
}
|
|
|
|
|
2021-08-31 13:44:05 +00:00
|
|
|
void
|
|
|
|
nvmf_transport_dump_opts(struct spdk_nvmf_transport *transport, struct spdk_json_write_ctx *w,
|
|
|
|
bool named)
|
|
|
|
{
|
|
|
|
const struct spdk_nvmf_transport_opts *opts = spdk_nvmf_get_transport_opts(transport);
|
|
|
|
|
|
|
|
named ? spdk_json_write_named_object_begin(w, "params") : spdk_json_write_object_begin(w);
|
|
|
|
|
|
|
|
spdk_json_write_named_string(w, "trtype", spdk_nvmf_get_transport_name(transport));
|
|
|
|
spdk_json_write_named_uint32(w, "max_queue_depth", opts->max_queue_depth);
|
|
|
|
spdk_json_write_named_uint32(w, "max_io_qpairs_per_ctrlr", opts->max_qpairs_per_ctrlr - 1);
|
|
|
|
spdk_json_write_named_uint32(w, "in_capsule_data_size", opts->in_capsule_data_size);
|
|
|
|
spdk_json_write_named_uint32(w, "max_io_size", opts->max_io_size);
|
|
|
|
spdk_json_write_named_uint32(w, "io_unit_size", opts->io_unit_size);
|
|
|
|
spdk_json_write_named_uint32(w, "max_aq_depth", opts->max_aq_depth);
|
|
|
|
spdk_json_write_named_uint32(w, "num_shared_buffers", opts->num_shared_buffers);
|
|
|
|
spdk_json_write_named_uint32(w, "buf_cache_size", opts->buf_cache_size);
|
|
|
|
spdk_json_write_named_bool(w, "dif_insert_or_strip", opts->dif_insert_or_strip);
|
2021-11-24 14:42:24 +00:00
|
|
|
spdk_json_write_named_bool(w, "zcopy", opts->zcopy);
|
2021-08-31 13:44:05 +00:00
|
|
|
|
|
|
|
if (transport->ops->dump_opts) {
|
|
|
|
transport->ops->dump_opts(transport, w);
|
|
|
|
}
|
|
|
|
|
|
|
|
spdk_json_write_named_uint32(w, "abort_timeout_sec", opts->abort_timeout_sec);
|
|
|
|
spdk_json_write_object_end(w);
|
|
|
|
}
|
|
|
|
|
2021-09-01 08:28:10 +00:00
|
|
|
void
|
|
|
|
nvmf_transport_listen_dump_opts(struct spdk_nvmf_transport *transport,
|
|
|
|
const struct spdk_nvme_transport_id *trid, struct spdk_json_write_ctx *w)
|
|
|
|
{
|
|
|
|
const char *adrfam = spdk_nvme_transport_id_adrfam_str(trid->adrfam);
|
|
|
|
|
|
|
|
spdk_json_write_named_object_begin(w, "listen_address");
|
|
|
|
|
|
|
|
spdk_json_write_named_string(w, "trtype", trid->trstring);
|
|
|
|
spdk_json_write_named_string(w, "adrfam", adrfam ? adrfam : "unknown");
|
|
|
|
spdk_json_write_named_string(w, "traddr", trid->traddr);
|
|
|
|
spdk_json_write_named_string(w, "trsvcid", trid->trsvcid);
|
|
|
|
|
2021-09-02 13:22:40 +00:00
|
|
|
if (transport->ops->listen_dump_opts) {
|
|
|
|
transport->ops->listen_dump_opts(transport, trid, w);
|
|
|
|
}
|
|
|
|
|
2021-09-01 08:28:10 +00:00
|
|
|
spdk_json_write_object_end(w);
|
|
|
|
}
|
|
|
|
|
2018-10-23 21:37:22 +00:00
|
|
|
spdk_nvme_transport_type_t
|
|
|
|
spdk_nvmf_get_transport_type(struct spdk_nvmf_transport *transport)
|
|
|
|
{
|
|
|
|
return transport->ops->type;
|
|
|
|
}
|
|
|
|
|
2020-01-17 11:20:29 +00:00
|
|
|
const char *
|
|
|
|
spdk_nvmf_get_transport_name(struct spdk_nvmf_transport *transport)
|
|
|
|
{
|
|
|
|
return transport->ops->name;
|
|
|
|
}
|
|
|
|
|
2022-06-22 21:35:04 +00:00
|
|
|
static void
|
|
|
|
nvmf_transport_opts_copy(struct spdk_nvmf_transport_opts *opts,
|
|
|
|
struct spdk_nvmf_transport_opts *opts_src,
|
|
|
|
size_t opts_size)
|
2020-11-26 16:48:10 +00:00
|
|
|
{
|
|
|
|
assert(opts);
|
|
|
|
assert(opts_src);
|
|
|
|
|
|
|
|
opts->opts_size = opts_size;
|
|
|
|
|
|
|
|
#define SET_FIELD(field) \
|
|
|
|
if (offsetof(struct spdk_nvmf_transport_opts, field) + sizeof(opts->field) <= opts_size) { \
|
|
|
|
opts->field = opts_src->field; \
|
|
|
|
} \
|
|
|
|
|
|
|
|
SET_FIELD(max_queue_depth);
|
|
|
|
SET_FIELD(max_qpairs_per_ctrlr);
|
|
|
|
SET_FIELD(in_capsule_data_size);
|
|
|
|
SET_FIELD(max_io_size);
|
|
|
|
SET_FIELD(io_unit_size);
|
|
|
|
SET_FIELD(max_aq_depth);
|
|
|
|
SET_FIELD(buf_cache_size);
|
|
|
|
SET_FIELD(num_shared_buffers);
|
|
|
|
SET_FIELD(dif_insert_or_strip);
|
|
|
|
SET_FIELD(abort_timeout_sec);
|
|
|
|
SET_FIELD(association_timeout);
|
|
|
|
SET_FIELD(transport_specific);
|
2021-11-25 22:08:12 +00:00
|
|
|
SET_FIELD(acceptor_poll_rate);
|
2021-11-24 14:42:24 +00:00
|
|
|
SET_FIELD(zcopy);
|
2020-11-26 16:48:10 +00:00
|
|
|
|
|
|
|
/* Do not remove this statement, you should always update this statement when you adding a new field,
|
|
|
|
* and do not forget to add the SET_FIELD statement for your added field. */
|
2021-11-25 22:08:12 +00:00
|
|
|
SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_transport_opts) == 64, "Incorrect size");
|
2020-11-26 16:48:10 +00:00
|
|
|
|
|
|
|
#undef SET_FIELD
|
|
|
|
#undef FILED_CHECK
|
|
|
|
}
|
|
|
|
|
2023-01-25 10:20:12 +00:00
|
|
|
struct nvmf_transport_create_ctx {
|
|
|
|
const struct spdk_nvmf_transport_ops *ops;
|
|
|
|
struct spdk_nvmf_transport_opts opts;
|
|
|
|
void *cb_arg;
|
|
|
|
spdk_nvmf_transport_create_done_cb cb_fn;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
nvmf_transport_create_async_done(void *cb_arg, struct spdk_nvmf_transport *transport)
|
2016-07-14 22:25:23 +00:00
|
|
|
{
|
2023-01-25 10:20:12 +00:00
|
|
|
struct nvmf_transport_create_ctx *ctx = cb_arg;
|
2019-01-14 19:55:06 +00:00
|
|
|
char spdk_mempool_name[MAX_MEMPOOL_NAME_LENGTH];
|
|
|
|
int chars_written;
|
2023-01-25 10:20:12 +00:00
|
|
|
|
|
|
|
if (!transport) {
|
|
|
|
SPDK_ERRLOG("Failed to create transport.\n");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
pthread_mutex_init(&transport->mutex, NULL);
|
|
|
|
TAILQ_INIT(&transport->listeners);
|
|
|
|
transport->ops = ctx->ops;
|
|
|
|
transport->opts = ctx->opts;
|
|
|
|
chars_written = snprintf(spdk_mempool_name, MAX_MEMPOOL_NAME_LENGTH, "%s_%s_%s", "spdk_nvmf",
|
|
|
|
transport->ops->name, "data");
|
|
|
|
if (chars_written < 0) {
|
|
|
|
SPDK_ERRLOG("Unable to generate transport data buffer pool name.\n");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ctx->opts.num_shared_buffers) {
|
2023-02-22 11:39:57 +00:00
|
|
|
transport->data_buf_pool = spdk_mempool_create(spdk_mempool_name, ctx->opts.num_shared_buffers,
|
|
|
|
ctx->opts.io_unit_size + NVMF_DATA_BUFFER_ALIGNMENT, 0, SPDK_ENV_SOCKET_ID_ANY);
|
2023-01-25 10:20:12 +00:00
|
|
|
if (!transport->data_buf_pool) {
|
|
|
|
if (spdk_mempool_lookup(spdk_mempool_name) != NULL) {
|
|
|
|
SPDK_ERRLOG("Unable to allocate poll group buffer pull: already exists\n");
|
|
|
|
SPDK_ERRLOG("Probably running in multiprocess environment, which is "
|
|
|
|
"unsupported by the nvmf library\n");
|
|
|
|
} else {
|
|
|
|
SPDK_ERRLOG("Unable to allocate buffer pool for poll group\n");
|
|
|
|
}
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx->cb_fn(ctx->cb_arg, transport);
|
|
|
|
free(ctx);
|
|
|
|
return;
|
|
|
|
|
|
|
|
err:
|
|
|
|
if (transport) {
|
|
|
|
transport->ops->destroy(transport, NULL, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx->cb_fn(ctx->cb_arg, NULL);
|
|
|
|
free(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
_nvmf_transport_create_done(void *ctx)
|
|
|
|
{
|
|
|
|
struct nvmf_transport_create_ctx *_ctx = (struct nvmf_transport_create_ctx *)ctx;
|
|
|
|
|
|
|
|
nvmf_transport_create_async_done(_ctx, _ctx->ops->create(&_ctx->opts));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nvmf_transport_create(const char *transport_name, struct spdk_nvmf_transport_opts *opts,
|
|
|
|
spdk_nvmf_transport_create_done_cb cb_fn, void *cb_arg, bool sync)
|
|
|
|
{
|
|
|
|
struct nvmf_transport_create_ctx *ctx;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
ctx = calloc(1, sizeof(*ctx));
|
|
|
|
if (!ctx) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2020-11-26 16:48:10 +00:00
|
|
|
|
|
|
|
if (!opts) {
|
|
|
|
SPDK_ERRLOG("opts should not be NULL\n");
|
2023-01-25 10:20:12 +00:00
|
|
|
goto err;
|
2020-11-26 16:48:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!opts->opts_size) {
|
|
|
|
SPDK_ERRLOG("The opts_size in opts structure should not be zero\n");
|
2023-01-25 10:20:12 +00:00
|
|
|
goto err;
|
2020-11-26 16:48:10 +00:00
|
|
|
}
|
2016-06-27 17:14:41 +00:00
|
|
|
|
2023-01-25 10:20:12 +00:00
|
|
|
ctx->ops = nvmf_get_transport_ops(transport_name);
|
|
|
|
if (!ctx->ops) {
|
2019-12-23 23:27:35 +00:00
|
|
|
SPDK_ERRLOG("Transport type '%s' unavailable.\n", transport_name);
|
2023-01-25 10:20:12 +00:00
|
|
|
goto err;
|
2017-07-24 23:30:07 +00:00
|
|
|
}
|
2016-07-14 22:25:23 +00:00
|
|
|
|
2023-01-25 10:20:12 +00:00
|
|
|
nvmf_transport_opts_copy(&ctx->opts, opts, opts->opts_size);
|
|
|
|
if (ctx->opts.max_io_size != 0 && (!spdk_u32_is_pow2(ctx->opts.max_io_size) ||
|
|
|
|
ctx->opts.max_io_size < 8192)) {
|
2021-02-10 14:44:21 +00:00
|
|
|
SPDK_ERRLOG("max_io_size %u must be a power of 2 and be greater than or equal 8KB\n",
|
2023-01-25 10:20:12 +00:00
|
|
|
ctx->opts.max_io_size);
|
|
|
|
goto err;
|
2021-02-10 14:44:21 +00:00
|
|
|
}
|
|
|
|
|
2023-01-25 10:20:12 +00:00
|
|
|
if (ctx->opts.max_aq_depth < SPDK_NVMF_MIN_ADMIN_MAX_SQ_SIZE) {
|
2020-03-11 10:40:05 +00:00
|
|
|
SPDK_ERRLOG("max_aq_depth %u is less than minimum defined by NVMf spec, use min value\n",
|
2023-01-25 10:20:12 +00:00
|
|
|
ctx->opts.max_aq_depth);
|
|
|
|
ctx->opts.max_aq_depth = SPDK_NVMF_MIN_ADMIN_MAX_SQ_SIZE;
|
2020-03-11 10:40:05 +00:00
|
|
|
}
|
|
|
|
|
2023-01-25 10:20:12 +00:00
|
|
|
ctx->cb_fn = cb_fn;
|
|
|
|
ctx->cb_arg = cb_arg;
|
|
|
|
|
|
|
|
/* Prioritize sync create operation. */
|
|
|
|
if (ctx->ops->create) {
|
|
|
|
if (sync) {
|
|
|
|
_nvmf_transport_create_done(ctx);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = spdk_thread_send_msg(spdk_get_thread(), _nvmf_transport_create_done, ctx);
|
|
|
|
if (rc) {
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(ctx->ops->create_async);
|
|
|
|
rc = ctx->ops->create_async(&ctx->opts, nvmf_transport_create_async_done, ctx);
|
|
|
|
if (rc) {
|
2019-12-23 23:27:35 +00:00
|
|
|
SPDK_ERRLOG("Unable to create new transport of type %s\n", transport_name);
|
2023-01-25 10:20:12 +00:00
|
|
|
goto err;
|
2016-07-14 22:25:23 +00:00
|
|
|
}
|
|
|
|
|
2023-01-25 10:20:12 +00:00
|
|
|
return 0;
|
|
|
|
err:
|
|
|
|
free(ctx);
|
|
|
|
return -1;
|
|
|
|
}
|
2020-02-15 06:19:24 +00:00
|
|
|
|
2023-01-25 10:20:12 +00:00
|
|
|
int
|
|
|
|
spdk_nvmf_transport_create_async(const char *transport_name, struct spdk_nvmf_transport_opts *opts,
|
|
|
|
spdk_nvmf_transport_create_done_cb cb_fn, void *cb_arg)
|
|
|
|
{
|
|
|
|
return nvmf_transport_create(transport_name, opts, cb_fn, cb_arg, false);
|
|
|
|
}
|
2020-11-26 16:48:10 +00:00
|
|
|
|
2023-01-25 10:20:12 +00:00
|
|
|
static void
|
|
|
|
nvmf_transport_create_sync_done(void *cb_arg, struct spdk_nvmf_transport *transport)
|
|
|
|
{
|
|
|
|
struct spdk_nvmf_transport **_transport = cb_arg;
|
2019-01-14 19:55:06 +00:00
|
|
|
|
2023-01-25 10:20:12 +00:00
|
|
|
*_transport = transport;
|
|
|
|
}
|
2021-06-23 09:04:05 +00:00
|
|
|
|
2023-01-25 10:20:12 +00:00
|
|
|
struct spdk_nvmf_transport *
|
|
|
|
spdk_nvmf_transport_create(const char *transport_name, struct spdk_nvmf_transport_opts *opts)
|
|
|
|
{
|
|
|
|
struct spdk_nvmf_transport *transport = NULL;
|
|
|
|
|
|
|
|
/* Current implementation supports synchronous version of create operation only. */
|
|
|
|
assert(nvmf_get_transport_ops(transport_name) && nvmf_get_transport_ops(transport_name)->create);
|
2018-08-22 23:04:16 +00:00
|
|
|
|
2023-01-25 10:20:12 +00:00
|
|
|
nvmf_transport_create(transport_name, opts, nvmf_transport_create_sync_done, &transport, true);
|
2017-07-24 23:30:07 +00:00
|
|
|
return transport;
|
2016-07-14 22:25:23 +00:00
|
|
|
}
|
|
|
|
|
2018-10-23 21:37:22 +00:00
|
|
|
struct spdk_nvmf_transport *
|
|
|
|
spdk_nvmf_transport_get_first(struct spdk_nvmf_tgt *tgt)
|
|
|
|
{
|
|
|
|
return TAILQ_FIRST(&tgt->transports);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct spdk_nvmf_transport *
|
|
|
|
spdk_nvmf_transport_get_next(struct spdk_nvmf_transport *transport)
|
|
|
|
{
|
|
|
|
return TAILQ_NEXT(transport, link);
|
|
|
|
}
|
|
|
|
|
2017-07-24 23:30:07 +00:00
|
|
|
int
|
2020-11-20 05:50:52 +00:00
|
|
|
spdk_nvmf_transport_destroy(struct spdk_nvmf_transport *transport,
|
|
|
|
spdk_nvmf_transport_destroy_done_cb cb_fn, void *cb_arg)
|
2016-07-14 22:25:23 +00:00
|
|
|
{
|
2022-01-27 12:38:20 +00:00
|
|
|
struct spdk_nvmf_listener *listener, *listener_tmp;
|
|
|
|
|
2019-01-14 19:55:06 +00:00
|
|
|
if (transport->data_buf_pool != NULL) {
|
|
|
|
if (spdk_mempool_count(transport->data_buf_pool) !=
|
|
|
|
transport->opts.num_shared_buffers) {
|
|
|
|
SPDK_ERRLOG("transport buffer pool count is %zu but should be %u\n",
|
|
|
|
spdk_mempool_count(transport->data_buf_pool),
|
|
|
|
transport->opts.num_shared_buffers);
|
|
|
|
}
|
2021-06-23 09:04:05 +00:00
|
|
|
spdk_mempool_free(transport->data_buf_pool);
|
2019-01-14 19:55:06 +00:00
|
|
|
}
|
|
|
|
|
2022-01-27 12:38:20 +00:00
|
|
|
TAILQ_FOREACH_SAFE(listener, &transport->listeners, link, listener_tmp) {
|
|
|
|
TAILQ_REMOVE(&transport->listeners, listener, link);
|
|
|
|
transport->ops->stop_listen(transport, &listener->trid);
|
|
|
|
free(listener);
|
|
|
|
}
|
|
|
|
|
2022-07-13 13:24:22 +00:00
|
|
|
pthread_mutex_destroy(&transport->mutex);
|
2020-11-20 05:50:52 +00:00
|
|
|
return transport->ops->destroy(transport, cb_fn, cb_arg);
|
2016-08-16 16:35:59 +00:00
|
|
|
}
|
|
|
|
|
2020-02-15 06:19:24 +00:00
|
|
|
struct spdk_nvmf_listener *
|
2020-05-09 22:57:13 +00:00
|
|
|
nvmf_transport_find_listener(struct spdk_nvmf_transport *transport,
|
|
|
|
const struct spdk_nvme_transport_id *trid)
|
2020-02-15 06:19:24 +00:00
|
|
|
{
|
|
|
|
struct spdk_nvmf_listener *listener;
|
|
|
|
|
|
|
|
TAILQ_FOREACH(listener, &transport->listeners, link) {
|
|
|
|
if (spdk_nvme_transport_id_compare(&listener->trid, trid) == 0) {
|
|
|
|
return listener;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-07-28 17:40:40 +00:00
|
|
|
int
|
|
|
|
spdk_nvmf_transport_listen(struct spdk_nvmf_transport *transport,
|
2020-12-18 19:12:49 +00:00
|
|
|
const struct spdk_nvme_transport_id *trid, struct spdk_nvmf_listen_opts *opts)
|
2016-07-14 22:25:23 +00:00
|
|
|
{
|
2020-02-20 12:46:49 +00:00
|
|
|
struct spdk_nvmf_listener *listener;
|
2020-03-31 11:17:01 +00:00
|
|
|
int rc;
|
2020-02-20 12:46:49 +00:00
|
|
|
|
2020-05-09 22:57:13 +00:00
|
|
|
listener = nvmf_transport_find_listener(transport, trid);
|
2020-02-15 06:19:24 +00:00
|
|
|
if (!listener) {
|
|
|
|
listener = calloc(1, sizeof(*listener));
|
|
|
|
if (!listener) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
listener->ref = 1;
|
|
|
|
listener->trid = *trid;
|
|
|
|
TAILQ_INSERT_TAIL(&transport->listeners, listener, link);
|
2022-07-13 13:24:22 +00:00
|
|
|
pthread_mutex_lock(&transport->mutex);
|
2020-12-18 19:12:49 +00:00
|
|
|
rc = transport->ops->listen(transport, &listener->trid, opts);
|
2022-07-13 13:24:22 +00:00
|
|
|
pthread_mutex_unlock(&transport->mutex);
|
2020-03-31 11:17:01 +00:00
|
|
|
if (rc != 0) {
|
|
|
|
TAILQ_REMOVE(&transport->listeners, listener, link);
|
|
|
|
free(listener);
|
|
|
|
}
|
|
|
|
return rc;
|
2020-02-15 06:19:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
++listener->ref;
|
|
|
|
|
|
|
|
return 0;
|
2016-07-14 22:25:23 +00:00
|
|
|
}
|
2017-07-25 20:47:41 +00:00
|
|
|
|
|
|
|
int
|
2017-07-28 17:40:40 +00:00
|
|
|
spdk_nvmf_transport_stop_listen(struct spdk_nvmf_transport *transport,
|
|
|
|
const struct spdk_nvme_transport_id *trid)
|
2017-07-25 20:47:41 +00:00
|
|
|
{
|
2020-02-20 12:46:49 +00:00
|
|
|
struct spdk_nvmf_listener *listener;
|
|
|
|
|
2020-05-09 22:57:13 +00:00
|
|
|
listener = nvmf_transport_find_listener(transport, trid);
|
2020-02-15 06:19:24 +00:00
|
|
|
if (!listener) {
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (--listener->ref == 0) {
|
|
|
|
TAILQ_REMOVE(&transport->listeners, listener, link);
|
2022-07-13 13:24:22 +00:00
|
|
|
pthread_mutex_lock(&transport->mutex);
|
2020-02-15 06:19:24 +00:00
|
|
|
transport->ops->stop_listen(transport, trid);
|
2022-07-13 13:24:22 +00:00
|
|
|
pthread_mutex_unlock(&transport->mutex);
|
2020-02-15 06:19:24 +00:00
|
|
|
free(listener);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2017-07-25 20:47:41 +00:00
|
|
|
}
|
|
|
|
|
2020-07-09 21:57:37 +00:00
|
|
|
struct nvmf_stop_listen_ctx {
|
|
|
|
struct spdk_nvmf_transport *transport;
|
|
|
|
struct spdk_nvme_transport_id trid;
|
2021-12-08 10:22:24 +00:00
|
|
|
struct spdk_nvmf_subsystem *subsystem;
|
2020-07-09 21:57:37 +00:00
|
|
|
spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn;
|
|
|
|
void *cb_arg;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
nvmf_stop_listen_fini(struct spdk_io_channel_iter *i, int status)
|
|
|
|
{
|
|
|
|
struct nvmf_stop_listen_ctx *ctx;
|
|
|
|
struct spdk_nvmf_transport *transport;
|
|
|
|
int rc = status;
|
|
|
|
|
|
|
|
ctx = spdk_io_channel_iter_get_ctx(i);
|
|
|
|
transport = ctx->transport;
|
|
|
|
assert(transport != NULL);
|
|
|
|
|
|
|
|
rc = spdk_nvmf_transport_stop_listen(transport, &ctx->trid);
|
|
|
|
if (rc) {
|
|
|
|
SPDK_ERRLOG("Failed to stop listening on address '%s'\n", ctx->trid.traddr);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ctx->cb_fn) {
|
|
|
|
ctx->cb_fn(ctx->cb_arg, rc);
|
|
|
|
}
|
|
|
|
free(ctx);
|
|
|
|
}
|
|
|
|
|
2023-03-21 18:53:58 +00:00
|
|
|
static void nvmf_stop_listen_disconnect_qpairs(struct spdk_io_channel_iter *i);
|
|
|
|
|
|
|
|
static void
|
|
|
|
nvmf_stop_listen_disconnect_qpairs_msg(void *ctx)
|
|
|
|
{
|
|
|
|
nvmf_stop_listen_disconnect_qpairs((struct spdk_io_channel_iter *)ctx);
|
|
|
|
}
|
|
|
|
|
2020-07-09 21:57:37 +00:00
|
|
|
static void
|
|
|
|
nvmf_stop_listen_disconnect_qpairs(struct spdk_io_channel_iter *i)
|
|
|
|
{
|
|
|
|
struct nvmf_stop_listen_ctx *ctx;
|
|
|
|
struct spdk_nvmf_poll_group *group;
|
|
|
|
struct spdk_io_channel *ch;
|
|
|
|
struct spdk_nvmf_qpair *qpair, *tmp_qpair;
|
|
|
|
struct spdk_nvme_transport_id tmp_trid;
|
2023-03-21 18:53:58 +00:00
|
|
|
bool qpair_found = false;
|
2020-07-09 21:57:37 +00:00
|
|
|
|
|
|
|
ctx = spdk_io_channel_iter_get_ctx(i);
|
|
|
|
ch = spdk_io_channel_iter_get_channel(i);
|
|
|
|
group = spdk_io_channel_get_ctx(ch);
|
|
|
|
|
|
|
|
TAILQ_FOREACH_SAFE(qpair, &group->qpairs, link, tmp_qpair) {
|
|
|
|
if (spdk_nvmf_qpair_get_listen_trid(qpair, &tmp_trid)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2023-04-26 20:53:47 +00:00
|
|
|
/* Skip qpairs that don't match the listen trid and subsystem pointer. If
|
|
|
|
* the ctx->subsystem is NULL, it means disconnect all qpairs that match
|
|
|
|
* the listen trid. */
|
2020-07-09 21:57:37 +00:00
|
|
|
if (!spdk_nvme_transport_id_compare(&ctx->trid, &tmp_trid)) {
|
2023-04-26 20:53:47 +00:00
|
|
|
if (ctx->subsystem == NULL ||
|
|
|
|
(qpair->ctrlr != NULL && ctx->subsystem == qpair->ctrlr->subsys)) {
|
2021-12-08 10:22:24 +00:00
|
|
|
spdk_nvmf_qpair_disconnect(qpair, NULL, NULL);
|
2023-03-21 18:53:58 +00:00
|
|
|
qpair_found = true;
|
2021-12-08 10:22:24 +00:00
|
|
|
}
|
2020-07-09 21:57:37 +00:00
|
|
|
}
|
|
|
|
}
|
2023-03-21 18:53:58 +00:00
|
|
|
if (qpair_found) {
|
|
|
|
spdk_thread_send_msg(spdk_get_thread(), nvmf_stop_listen_disconnect_qpairs_msg, i);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-07-09 21:57:37 +00:00
|
|
|
spdk_for_each_channel_continue(i, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
spdk_nvmf_transport_stop_listen_async(struct spdk_nvmf_transport *transport,
|
|
|
|
const struct spdk_nvme_transport_id *trid,
|
2021-12-08 10:22:24 +00:00
|
|
|
struct spdk_nvmf_subsystem *subsystem,
|
2020-07-09 21:57:37 +00:00
|
|
|
spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn,
|
|
|
|
void *cb_arg)
|
|
|
|
{
|
|
|
|
struct nvmf_stop_listen_ctx *ctx;
|
|
|
|
|
nvmf: initialize trid param in get_***_trid paths
When removing a listener, for example with
nvmf_subsystem_remove_listener RPC, we use the concept of a
"listen trid" to determine which existing connections
should be disconnected.
This listen trid has the trtype, adrfam, traddr and trsvcid
defined, but *not* the subnqn. We use the subsystem pointer
itself to match the subsystem.
nvmf_stop_listen_disconnect_qpairs gets the listen trid
for each qpair, compares it to the trid passed by the
RPC, and if it matches, then it compares the subsystem
pointers and will disconnect the qpair if it matches.
The problem is that the spdk_nvmf_qpair_get_listen_trid
path does not initialize the subnqn to an empty string,
and in this case the caller does not initialize it either.
So sometimes the subnqn on the stack used to get the
qpair's listen trid ends up with some garbage as the subnqn
string, which causes the transport_id_compare to fail, and
then the qpair won't get disconnected even if the other
trid fields and subsystem pointers match.
For the failover.sh test, this means that the qpair doesn't
get disconnected, so we never go down the reset path
on the initiator side and don't see the "Resetting" strings
expected in the log.
This similarly impacts the host/timeout.sh test, which is
also fixed by this patch. There were multiple failing
signatures, all related to remove_listener not working
correctly due to this bug.
While the get_listen_trid path is the one that caused
these bugs, the get_local_trid and get_peer_trid paths
have similar problems, so they are similarly fixed in
this patch.
Fixes issue #2862.
Fixes issue #2595.
Fixes issue #2865.
Fixes issue #2864.
Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: I36eb519cd1f434d50eebf724ecd6dbc2528288c3
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/17788
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
Reviewed-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
Reviewed-by: Mike Gerdts <mgerdts@nvidia.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Konrad Sztyber <konrad.sztyber@intel.com>
Community-CI: Mellanox Build Bot
Reviewed-by: <sebastian.brzezinka@intel.com>
2023-04-26 13:46:29 +00:00
|
|
|
if (trid->subnqn[0] != '\0') {
|
|
|
|
SPDK_ERRLOG("subnqn should be empty, use subsystem pointer instead\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-07-09 21:57:37 +00:00
|
|
|
ctx = calloc(1, sizeof(struct nvmf_stop_listen_ctx));
|
|
|
|
if (ctx == NULL) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx->trid = *trid;
|
2021-12-08 10:22:24 +00:00
|
|
|
ctx->subsystem = subsystem;
|
2020-07-09 21:57:37 +00:00
|
|
|
ctx->transport = transport;
|
|
|
|
ctx->cb_fn = cb_fn;
|
|
|
|
ctx->cb_arg = cb_arg;
|
|
|
|
|
|
|
|
spdk_for_each_channel(transport->tgt, nvmf_stop_listen_disconnect_qpairs, ctx,
|
|
|
|
nvmf_stop_listen_fini);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-07-25 20:47:41 +00:00
|
|
|
void
|
2020-05-15 01:14:26 +00:00
|
|
|
nvmf_transport_listener_discover(struct spdk_nvmf_transport *transport,
|
|
|
|
struct spdk_nvme_transport_id *trid,
|
|
|
|
struct spdk_nvmf_discovery_log_page_entry *entry)
|
2017-07-25 20:47:41 +00:00
|
|
|
{
|
2017-08-23 17:23:44 +00:00
|
|
|
transport->ops->listener_discover(transport, trid, entry);
|
2017-07-25 20:47:41 +00:00
|
|
|
}
|
|
|
|
|
2017-08-28 23:24:33 +00:00
|
|
|
struct spdk_nvmf_transport_poll_group *
|
2021-12-16 11:16:27 +00:00
|
|
|
nvmf_transport_poll_group_create(struct spdk_nvmf_transport *transport,
|
|
|
|
struct spdk_nvmf_poll_group *group)
|
2017-07-25 20:47:41 +00:00
|
|
|
{
|
2021-12-16 11:16:27 +00:00
|
|
|
struct spdk_nvmf_transport_poll_group *tgroup;
|
2020-08-21 07:19:27 +00:00
|
|
|
struct spdk_nvmf_transport_pg_cache_buf **bufs;
|
|
|
|
uint32_t i;
|
2017-07-25 20:47:41 +00:00
|
|
|
|
2022-07-13 13:24:22 +00:00
|
|
|
pthread_mutex_lock(&transport->mutex);
|
2021-12-16 11:16:27 +00:00
|
|
|
tgroup = transport->ops->poll_group_create(transport, group);
|
2022-07-13 13:24:22 +00:00
|
|
|
pthread_mutex_unlock(&transport->mutex);
|
2021-12-16 11:16:27 +00:00
|
|
|
if (!tgroup) {
|
2018-12-11 19:57:21 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
2021-12-16 11:16:27 +00:00
|
|
|
tgroup->transport = transport;
|
2017-07-25 20:47:41 +00:00
|
|
|
|
2021-12-16 11:16:27 +00:00
|
|
|
STAILQ_INIT(&tgroup->pending_buf_queue);
|
|
|
|
STAILQ_INIT(&tgroup->buf_cache);
|
2019-01-14 20:24:35 +00:00
|
|
|
|
2023-03-24 22:37:02 +00:00
|
|
|
if (transport->opts.buf_cache_size == 0) {
|
|
|
|
/* We aren't going to allocate any buffers for the cache, so just return now. */
|
|
|
|
return tgroup;
|
|
|
|
}
|
2020-08-21 07:19:27 +00:00
|
|
|
|
2023-03-24 22:37:02 +00:00
|
|
|
tgroup->buf_cache_size = transport->opts.buf_cache_size;
|
2023-03-24 22:26:07 +00:00
|
|
|
/* buf_cache_size of UINT32_MAX means the value should be calculated dynamically
|
|
|
|
* based on the number of buffers in the shared pool and the number of poll groups
|
|
|
|
* that are sharing them. We allocate 75% of the pool for the cache, and then
|
|
|
|
* divide that by number of poll groups to determine the buf_cache_size for this
|
|
|
|
* poll group.
|
|
|
|
*/
|
|
|
|
if (tgroup->buf_cache_size == UINT32_MAX) {
|
|
|
|
uint32_t num_shared_buffers = transport->opts.num_shared_buffers;
|
|
|
|
/* Theoretically the nvmf library can dynamically add poll groups to
|
|
|
|
* the target, after transports have already been created. We aren't
|
|
|
|
* going to try to really handle this case efficiently, just do enough
|
|
|
|
* here to ensure we don't divide-by-zero.
|
|
|
|
*/
|
|
|
|
uint16_t num_poll_groups = group->tgt->num_poll_groups ? : spdk_env_get_core_count();
|
|
|
|
|
|
|
|
tgroup->buf_cache_size = (num_shared_buffers * 3 / 4) / num_poll_groups;
|
|
|
|
}
|
|
|
|
|
2023-03-24 22:37:02 +00:00
|
|
|
bufs = calloc(tgroup->buf_cache_size, sizeof(struct spdk_nvmf_transport_pg_cache_buf *));
|
2020-08-21 07:19:27 +00:00
|
|
|
|
2023-03-24 22:37:02 +00:00
|
|
|
if (!bufs) {
|
|
|
|
SPDK_ERRLOG("Memory allocation failed, can't reserve buffers for the pg buffer cache\n");
|
|
|
|
return tgroup;
|
|
|
|
}
|
2020-08-21 07:19:27 +00:00
|
|
|
|
2023-03-24 22:37:02 +00:00
|
|
|
if (spdk_mempool_get_bulk(transport->data_buf_pool, (void **)bufs, tgroup->buf_cache_size)) {
|
|
|
|
tgroup->buf_cache_size = (uint32_t)spdk_mempool_count(transport->data_buf_pool);
|
|
|
|
SPDK_NOTICELOG("Unable to reserve the full number of buffers for the pg buffer cache. "
|
|
|
|
"Decrease the number of cached buffers from %u to %u\n",
|
|
|
|
transport->opts.buf_cache_size, tgroup->buf_cache_size);
|
|
|
|
/* Sanity check */
|
|
|
|
assert(tgroup->buf_cache_size <= transport->opts.buf_cache_size);
|
|
|
|
/* Try again with less number of buffers */
|
|
|
|
if (spdk_mempool_get_bulk(transport->data_buf_pool, (void **)bufs, tgroup->buf_cache_size)) {
|
|
|
|
SPDK_NOTICELOG("Failed to reserve %u buffers\n", tgroup->buf_cache_size);
|
|
|
|
tgroup->buf_cache_size = 0;
|
2020-08-21 07:19:27 +00:00
|
|
|
}
|
2023-03-24 22:37:02 +00:00
|
|
|
}
|
2020-08-21 07:19:27 +00:00
|
|
|
|
2023-03-24 22:37:02 +00:00
|
|
|
for (i = 0; i < tgroup->buf_cache_size; i++) {
|
|
|
|
STAILQ_INSERT_HEAD(&tgroup->buf_cache, bufs[i], link);
|
2019-01-14 20:24:35 +00:00
|
|
|
}
|
2023-03-24 22:37:02 +00:00
|
|
|
tgroup->buf_cache_count = tgroup->buf_cache_size;
|
|
|
|
|
|
|
|
free(bufs);
|
2021-11-15 11:25:38 +00:00
|
|
|
|
2021-12-16 11:16:27 +00:00
|
|
|
return tgroup;
|
2017-07-25 20:47:41 +00:00
|
|
|
}
|
|
|
|
|
2019-05-15 13:53:39 +00:00
|
|
|
struct spdk_nvmf_transport_poll_group *
|
2020-05-15 01:14:26 +00:00
|
|
|
nvmf_transport_get_optimal_poll_group(struct spdk_nvmf_transport *transport,
|
|
|
|
struct spdk_nvmf_qpair *qpair)
|
2019-05-15 13:53:39 +00:00
|
|
|
{
|
2022-07-13 13:24:22 +00:00
|
|
|
struct spdk_nvmf_transport_poll_group *tgroup;
|
|
|
|
|
2019-05-15 13:53:39 +00:00
|
|
|
if (transport->ops->get_optimal_poll_group) {
|
2022-07-13 13:24:22 +00:00
|
|
|
pthread_mutex_lock(&transport->mutex);
|
|
|
|
tgroup = transport->ops->get_optimal_poll_group(qpair);
|
|
|
|
pthread_mutex_unlock(&transport->mutex);
|
|
|
|
|
|
|
|
return tgroup;
|
2019-05-15 13:53:39 +00:00
|
|
|
} else {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-25 20:47:41 +00:00
|
|
|
void
|
2020-05-15 01:14:26 +00:00
|
|
|
nvmf_transport_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
|
2017-07-25 20:47:41 +00:00
|
|
|
{
|
2019-01-14 20:24:35 +00:00
|
|
|
struct spdk_nvmf_transport_pg_cache_buf *buf, *tmp;
|
2022-07-13 13:24:22 +00:00
|
|
|
struct spdk_nvmf_transport *transport;
|
|
|
|
|
|
|
|
transport = group->transport;
|
2019-01-14 20:24:35 +00:00
|
|
|
|
2019-09-02 06:00:58 +00:00
|
|
|
if (!STAILQ_EMPTY(&group->pending_buf_queue)) {
|
|
|
|
SPDK_ERRLOG("Pending I/O list wasn't empty on poll group destruction\n");
|
|
|
|
}
|
|
|
|
|
2019-01-14 20:24:35 +00:00
|
|
|
STAILQ_FOREACH_SAFE(buf, &group->buf_cache, link, tmp) {
|
|
|
|
STAILQ_REMOVE(&group->buf_cache, buf, spdk_nvmf_transport_pg_cache_buf, link);
|
2022-07-13 13:24:22 +00:00
|
|
|
spdk_mempool_put(transport->data_buf_pool, buf);
|
2019-01-14 20:24:35 +00:00
|
|
|
}
|
2022-07-13 13:24:22 +00:00
|
|
|
|
|
|
|
pthread_mutex_lock(&transport->mutex);
|
|
|
|
transport->ops->poll_group_destroy(group);
|
|
|
|
pthread_mutex_unlock(&transport->mutex);
|
2017-07-25 20:47:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2020-05-15 01:14:26 +00:00
|
|
|
nvmf_transport_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
|
|
|
|
struct spdk_nvmf_qpair *qpair)
|
2017-07-25 20:47:41 +00:00
|
|
|
{
|
|
|
|
if (qpair->transport) {
|
2017-07-28 18:21:45 +00:00
|
|
|
assert(qpair->transport == group->transport);
|
|
|
|
if (qpair->transport != group->transport) {
|
2017-07-25 20:47:41 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
} else {
|
2017-07-28 18:21:45 +00:00
|
|
|
qpair->transport = group->transport;
|
2017-07-25 20:47:41 +00:00
|
|
|
}
|
|
|
|
|
2021-11-15 11:25:38 +00:00
|
|
|
SPDK_DTRACE_PROBE3(nvmf_transport_poll_group_add, qpair, qpair->qid,
|
|
|
|
spdk_thread_get_id(group->group->thread));
|
|
|
|
|
2017-07-28 18:21:45 +00:00
|
|
|
return group->transport->ops->poll_group_add(group, qpair);
|
2017-07-25 20:47:41 +00:00
|
|
|
}
|
|
|
|
|
2018-11-21 02:11:25 +00:00
|
|
|
int
|
2020-05-15 01:14:26 +00:00
|
|
|
nvmf_transport_poll_group_remove(struct spdk_nvmf_transport_poll_group *group,
|
|
|
|
struct spdk_nvmf_qpair *qpair)
|
2018-11-21 02:11:25 +00:00
|
|
|
{
|
|
|
|
int rc = ENOTSUP;
|
|
|
|
|
2021-11-15 11:25:38 +00:00
|
|
|
SPDK_DTRACE_PROBE3(nvmf_transport_poll_group_remove, qpair, qpair->qid,
|
|
|
|
spdk_thread_get_id(group->group->thread));
|
|
|
|
|
2018-11-21 02:11:25 +00:00
|
|
|
assert(qpair->transport == group->transport);
|
|
|
|
if (group->transport->ops->poll_group_remove) {
|
|
|
|
rc = group->transport->ops->poll_group_remove(group, qpair);
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2017-08-28 20:48:39 +00:00
|
|
|
int
|
2020-05-15 01:14:26 +00:00
|
|
|
nvmf_transport_poll_group_poll(struct spdk_nvmf_transport_poll_group *group)
|
2017-08-28 20:48:39 +00:00
|
|
|
{
|
|
|
|
return group->transport->ops->poll_group_poll(group);
|
|
|
|
}
|
|
|
|
|
2018-07-18 15:47:16 +00:00
|
|
|
int
|
2020-05-15 01:14:26 +00:00
|
|
|
nvmf_transport_req_free(struct spdk_nvmf_request *req)
|
2018-07-18 15:47:16 +00:00
|
|
|
{
|
|
|
|
return req->qpair->transport->ops->req_free(req);
|
|
|
|
}
|
|
|
|
|
2017-07-25 20:47:41 +00:00
|
|
|
int
|
2020-05-15 01:14:26 +00:00
|
|
|
nvmf_transport_req_complete(struct spdk_nvmf_request *req)
|
2017-07-25 20:47:41 +00:00
|
|
|
{
|
|
|
|
return req->qpair->transport->ops->req_complete(req);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-11-13 07:23:42 +00:00
|
|
|
nvmf_transport_qpair_fini(struct spdk_nvmf_qpair *qpair,
|
|
|
|
spdk_nvmf_transport_qpair_fini_cb cb_fn,
|
|
|
|
void *cb_arg)
|
2017-07-25 20:47:41 +00:00
|
|
|
{
|
2021-03-31 07:37:25 +00:00
|
|
|
SPDK_DTRACE_PROBE1(nvmf_transport_qpair_fini, qpair);
|
|
|
|
|
2020-11-13 07:23:42 +00:00
|
|
|
qpair->transport->ops->qpair_fini(qpair, cb_fn, cb_arg);
|
2017-07-25 20:47:41 +00:00
|
|
|
}
|
|
|
|
|
2018-08-02 22:08:12 +00:00
|
|
|
int
|
2020-05-15 01:14:26 +00:00
|
|
|
nvmf_transport_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair,
|
|
|
|
struct spdk_nvme_transport_id *trid)
|
2018-08-02 22:08:12 +00:00
|
|
|
{
|
|
|
|
return qpair->transport->ops->qpair_get_peer_trid(qpair, trid);
|
|
|
|
}
|
2018-09-07 20:41:41 +00:00
|
|
|
|
2018-09-10 21:28:04 +00:00
|
|
|
int
|
2020-05-15 01:14:26 +00:00
|
|
|
nvmf_transport_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair,
|
|
|
|
struct spdk_nvme_transport_id *trid)
|
2018-09-10 21:28:04 +00:00
|
|
|
{
|
|
|
|
return qpair->transport->ops->qpair_get_local_trid(qpair, trid);
|
|
|
|
}
|
|
|
|
|
2018-09-07 20:41:41 +00:00
|
|
|
int
|
2020-05-15 01:14:26 +00:00
|
|
|
nvmf_transport_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair,
|
|
|
|
struct spdk_nvme_transport_id *trid)
|
2018-09-07 20:41:41 +00:00
|
|
|
{
|
|
|
|
return qpair->transport->ops->qpair_get_listen_trid(qpair, trid);
|
|
|
|
}
|
2018-08-27 22:27:47 +00:00
|
|
|
|
2020-06-21 15:28:27 +00:00
|
|
|
void
|
|
|
|
nvmf_transport_qpair_abort_request(struct spdk_nvmf_qpair *qpair,
|
|
|
|
struct spdk_nvmf_request *req)
|
|
|
|
{
|
2021-04-09 09:00:50 +00:00
|
|
|
if (qpair->transport->ops->qpair_abort_request) {
|
|
|
|
qpair->transport->ops->qpair_abort_request(qpair, req);
|
|
|
|
}
|
2020-06-21 15:28:27 +00:00
|
|
|
}
|
|
|
|
|
2018-08-27 22:27:47 +00:00
|
|
|
bool
|
2019-12-23 23:27:35 +00:00
|
|
|
spdk_nvmf_transport_opts_init(const char *transport_name,
|
2020-11-26 16:48:10 +00:00
|
|
|
struct spdk_nvmf_transport_opts *opts, size_t opts_size)
|
2018-08-27 22:27:47 +00:00
|
|
|
{
|
|
|
|
const struct spdk_nvmf_transport_ops *ops;
|
2020-11-26 16:48:10 +00:00
|
|
|
struct spdk_nvmf_transport_opts opts_local = {};
|
2018-08-27 22:27:47 +00:00
|
|
|
|
2020-05-10 05:53:39 +00:00
|
|
|
ops = nvmf_get_transport_ops(transport_name);
|
2018-08-27 22:27:47 +00:00
|
|
|
if (!ops) {
|
2019-12-23 23:27:35 +00:00
|
|
|
SPDK_ERRLOG("Transport type %s unavailable.\n", transport_name);
|
2018-08-27 22:27:47 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-11-26 16:48:10 +00:00
|
|
|
if (!opts) {
|
|
|
|
SPDK_ERRLOG("opts should not be NULL\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!opts_size) {
|
|
|
|
SPDK_ERRLOG("opts_size inside opts should not be zero value\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
opts_local.association_timeout = NVMF_TRANSPORT_DEFAULT_ASSOCIATION_TIMEOUT_IN_MS;
|
2021-11-25 22:08:12 +00:00
|
|
|
opts_local.acceptor_poll_rate = SPDK_NVMF_DEFAULT_ACCEPT_POLL_RATE_US;
|
2020-11-26 16:48:10 +00:00
|
|
|
ops->opts_init(&opts_local);
|
|
|
|
|
|
|
|
nvmf_transport_opts_copy(opts, &opts_local, opts_size);
|
|
|
|
|
2018-08-27 22:27:47 +00:00
|
|
|
return true;
|
|
|
|
}
|
2018-08-02 02:21:45 +00:00
|
|
|
|
2019-08-21 05:36:13 +00:00
|
|
|
void
|
|
|
|
spdk_nvmf_request_free_buffers(struct spdk_nvmf_request *req,
|
|
|
|
struct spdk_nvmf_transport_poll_group *group,
|
2019-09-24 01:01:53 +00:00
|
|
|
struct spdk_nvmf_transport *transport)
|
2019-08-21 05:36:13 +00:00
|
|
|
{
|
|
|
|
uint32_t i;
|
|
|
|
|
2019-09-25 23:43:31 +00:00
|
|
|
for (i = 0; i < req->iovcnt; i++) {
|
2019-08-21 05:36:13 +00:00
|
|
|
if (group->buf_cache_count < group->buf_cache_size) {
|
|
|
|
STAILQ_INSERT_HEAD(&group->buf_cache,
|
|
|
|
(struct spdk_nvmf_transport_pg_cache_buf *)req->buffers[i],
|
|
|
|
link);
|
|
|
|
group->buf_cache_count++;
|
|
|
|
} else {
|
|
|
|
spdk_mempool_put(transport->data_buf_pool, req->buffers[i]);
|
|
|
|
}
|
|
|
|
req->iov[i].iov_base = NULL;
|
|
|
|
req->buffers[i] = NULL;
|
|
|
|
req->iov[i].iov_len = 0;
|
|
|
|
}
|
2023-01-12 16:43:16 +00:00
|
|
|
req->iovcnt = 0;
|
2019-08-21 05:36:13 +00:00
|
|
|
req->data_from_pool = false;
|
|
|
|
}
|
|
|
|
|
2022-03-09 11:44:03 +00:00
|
|
|
typedef int (*set_buffer_callback)(struct spdk_nvmf_request *req, void *buf,
|
|
|
|
uint32_t length, uint32_t io_unit_size);
|
|
|
|
static int
|
2019-09-25 23:43:31 +00:00
|
|
|
nvmf_request_set_buffer(struct spdk_nvmf_request *req, void *buf, uint32_t length,
|
|
|
|
uint32_t io_unit_size)
|
|
|
|
{
|
|
|
|
req->buffers[req->iovcnt] = buf;
|
|
|
|
req->iov[req->iovcnt].iov_base = (void *)((uintptr_t)(buf + NVMF_DATA_BUFFER_MASK) &
|
|
|
|
~NVMF_DATA_BUFFER_MASK);
|
|
|
|
req->iov[req->iovcnt].iov_len = spdk_min(length, io_unit_size);
|
|
|
|
length -= req->iov[req->iovcnt].iov_len;
|
|
|
|
req->iovcnt++;
|
|
|
|
|
|
|
|
return length;
|
|
|
|
}
|
|
|
|
|
2019-09-24 04:21:13 +00:00
|
|
|
static int
|
|
|
|
nvmf_request_get_buffers(struct spdk_nvmf_request *req,
|
|
|
|
struct spdk_nvmf_transport_poll_group *group,
|
|
|
|
struct spdk_nvmf_transport *transport,
|
2022-03-09 11:44:03 +00:00
|
|
|
uint32_t length, uint32_t io_unit_size,
|
|
|
|
set_buffer_callback cb_func)
|
2019-08-21 05:36:13 +00:00
|
|
|
{
|
2019-09-24 00:53:14 +00:00
|
|
|
uint32_t num_buffers;
|
2019-09-25 23:43:31 +00:00
|
|
|
uint32_t i = 0, j;
|
|
|
|
void *buffer, *buffers[NVMF_REQ_MAX_BUFFERS];
|
2019-08-21 05:36:13 +00:00
|
|
|
|
2019-09-20 05:19:28 +00:00
|
|
|
/* If the number of buffers is too large, then we know the I/O is larger than allowed.
|
|
|
|
* Fail it.
|
|
|
|
*/
|
2019-09-25 23:43:31 +00:00
|
|
|
num_buffers = SPDK_CEIL_DIV(length, io_unit_size);
|
2021-11-02 15:31:12 +00:00
|
|
|
if (num_buffers > NVMF_REQ_MAX_BUFFERS) {
|
2019-09-20 05:19:28 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2019-08-21 05:36:13 +00:00
|
|
|
while (i < num_buffers) {
|
|
|
|
if (!(STAILQ_EMPTY(&group->buf_cache))) {
|
|
|
|
group->buf_cache_count--;
|
2019-09-25 23:43:31 +00:00
|
|
|
buffer = STAILQ_FIRST(&group->buf_cache);
|
2019-08-21 05:36:13 +00:00
|
|
|
STAILQ_REMOVE_HEAD(&group->buf_cache, link);
|
2019-09-25 23:43:31 +00:00
|
|
|
assert(buffer != NULL);
|
|
|
|
|
2022-03-09 11:44:03 +00:00
|
|
|
length = cb_func(req, buffer, length, io_unit_size);
|
2019-08-21 05:36:13 +00:00
|
|
|
i++;
|
|
|
|
} else {
|
2019-09-25 23:43:31 +00:00
|
|
|
if (spdk_mempool_get_bulk(transport->data_buf_pool, buffers,
|
2019-08-21 05:36:13 +00:00
|
|
|
num_buffers - i)) {
|
2019-09-24 04:21:13 +00:00
|
|
|
return -ENOMEM;
|
2019-08-21 05:36:13 +00:00
|
|
|
}
|
2019-09-25 23:43:31 +00:00
|
|
|
for (j = 0; j < num_buffers - i; j++) {
|
2022-03-09 11:44:03 +00:00
|
|
|
length = cb_func(req, buffers[j], length, io_unit_size);
|
2019-09-25 23:43:31 +00:00
|
|
|
}
|
2019-08-21 05:36:13 +00:00
|
|
|
i += num_buffers - i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-25 23:43:31 +00:00
|
|
|
assert(length == 0);
|
2019-09-26 09:37:19 +00:00
|
|
|
|
2019-08-21 05:36:13 +00:00
|
|
|
return 0;
|
2019-09-24 04:21:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
|
|
|
|
struct spdk_nvmf_transport_poll_group *group,
|
|
|
|
struct spdk_nvmf_transport *transport,
|
|
|
|
uint32_t length)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
req->iovcnt = 0;
|
2022-03-09 11:44:03 +00:00
|
|
|
rc = nvmf_request_get_buffers(req, group, transport, length,
|
|
|
|
transport->opts.io_unit_size,
|
|
|
|
nvmf_request_set_buffer);
|
|
|
|
if (!rc) {
|
|
|
|
req->data_from_pool = true;
|
|
|
|
} else if (rc == -ENOMEM) {
|
2019-09-24 04:21:13 +00:00
|
|
|
spdk_nvmf_request_free_buffers(req, group, transport);
|
2022-03-09 11:44:03 +00:00
|
|
|
return rc;
|
2019-09-24 04:21:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
2022-03-09 11:44:03 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
nvmf_request_set_stripped_buffer(struct spdk_nvmf_request *req, void *buf, uint32_t length,
|
|
|
|
uint32_t io_unit_size)
|
|
|
|
{
|
|
|
|
struct spdk_nvmf_stripped_data *data = req->stripped_data;
|
|
|
|
|
|
|
|
data->buffers[data->iovcnt] = buf;
|
|
|
|
data->iov[data->iovcnt].iov_base = (void *)((uintptr_t)(buf + NVMF_DATA_BUFFER_MASK) &
|
|
|
|
~NVMF_DATA_BUFFER_MASK);
|
|
|
|
data->iov[data->iovcnt].iov_len = spdk_min(length, io_unit_size);
|
|
|
|
length -= data->iov[data->iovcnt].iov_len;
|
|
|
|
data->iovcnt++;
|
|
|
|
|
|
|
|
return length;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
nvmf_request_free_stripped_buffers(struct spdk_nvmf_request *req,
|
|
|
|
struct spdk_nvmf_transport_poll_group *group,
|
|
|
|
struct spdk_nvmf_transport *transport)
|
|
|
|
{
|
|
|
|
struct spdk_nvmf_stripped_data *data = req->stripped_data;
|
|
|
|
uint32_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < data->iovcnt; i++) {
|
|
|
|
if (group->buf_cache_count < group->buf_cache_size) {
|
|
|
|
STAILQ_INSERT_HEAD(&group->buf_cache,
|
|
|
|
(struct spdk_nvmf_transport_pg_cache_buf *)data->buffers[i],
|
|
|
|
link);
|
|
|
|
group->buf_cache_count++;
|
|
|
|
} else {
|
|
|
|
spdk_mempool_put(transport->data_buf_pool, data->buffers[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
free(data);
|
|
|
|
req->stripped_data = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
nvmf_request_get_stripped_buffers(struct spdk_nvmf_request *req,
|
|
|
|
struct spdk_nvmf_transport_poll_group *group,
|
|
|
|
struct spdk_nvmf_transport *transport,
|
|
|
|
uint32_t length)
|
|
|
|
{
|
|
|
|
uint32_t block_size = req->dif.dif_ctx.block_size;
|
|
|
|
uint32_t data_block_size = block_size - req->dif.dif_ctx.md_size;
|
|
|
|
uint32_t io_unit_size = transport->opts.io_unit_size / block_size * data_block_size;
|
|
|
|
struct spdk_nvmf_stripped_data *data;
|
|
|
|
uint32_t i;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
/* Data blocks must be block aligned */
|
|
|
|
for (i = 0; i < req->iovcnt; i++) {
|
|
|
|
if (req->iov[i].iov_len % block_size) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
data = calloc(1, sizeof(*data));
|
|
|
|
if (data == NULL) {
|
|
|
|
SPDK_ERRLOG("Unable to allocate memory for stripped_data.\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
req->stripped_data = data;
|
|
|
|
req->stripped_data->iovcnt = 0;
|
|
|
|
|
|
|
|
rc = nvmf_request_get_buffers(req, group, transport, length, io_unit_size,
|
|
|
|
nvmf_request_set_stripped_buffer);
|
|
|
|
if (rc == -ENOMEM) {
|
|
|
|
nvmf_request_free_stripped_buffers(req, group, transport);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
return rc;
|
|
|
|
}
|