2022-06-03 19:15:11 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
2022-11-01 20:26:26 +00:00
|
|
|
* Copyright (C) 2016 Intel Corporation. All rights reserved.
|
2021-02-26 15:27:23 +00:00
|
|
|
* Copyright (c) 2018-2019, 2021 Mellanox Technologies LTD. All rights reserved.
|
2023-03-13 08:10:32 +00:00
|
|
|
* Copyright (c) 2021, 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
2016-06-06 21:44:30 +00:00
|
|
|
*/
|
|
|
|
|
2017-05-02 18:18:25 +00:00
|
|
|
#include "spdk/stdinc.h"
|
2016-06-06 21:44:30 +00:00
|
|
|
|
2017-11-20 16:50:10 +00:00
|
|
|
#include "spdk/bdev.h"
|
2018-06-14 17:27:37 +00:00
|
|
|
#include "spdk/bit_array.h"
|
2018-06-11 20:32:15 +00:00
|
|
|
#include "spdk/thread.h"
|
2016-09-19 17:01:52 +00:00
|
|
|
#include "spdk/nvmf.h"
|
2018-05-07 18:26:13 +00:00
|
|
|
#include "spdk/endian.h"
|
|
|
|
#include "spdk/string.h"
|
2020-10-06 16:16:26 +00:00
|
|
|
#include "spdk/log.h"
|
2021-03-31 07:37:25 +00:00
|
|
|
#include "spdk_internal/usdt.h"
|
2016-11-07 22:10:28 +00:00
|
|
|
|
2017-08-29 20:22:37 +00:00
|
|
|
#include "nvmf_internal.h"
|
2016-07-14 22:25:23 +00:00
|
|
|
#include "transport.h"
|
2016-06-06 21:44:30 +00:00
|
|
|
|
2020-09-04 11:27:29 +00:00
|
|
|
SPDK_LOG_REGISTER_COMPONENT(nvmf)
|
2016-06-06 21:44:30 +00:00
|
|
|
|
2018-05-08 23:05:28 +00:00
|
|
|
#define SPDK_NVMF_DEFAULT_MAX_SUBSYSTEMS 1024
|
2017-08-17 20:39:27 +00:00
|
|
|
|
2019-08-15 16:52:20 +00:00
|
|
|
static TAILQ_HEAD(, spdk_nvmf_tgt) g_nvmf_tgts = TAILQ_HEAD_INITIALIZER(g_nvmf_tgts);
|
|
|
|
|
2018-06-29 20:15:15 +00:00
|
|
|
typedef void (*nvmf_qpair_disconnect_cpl)(void *ctx, int status);
|
|
|
|
|
|
|
|
/* supplied to a single call to nvmf_qpair_disconnect */
|
2018-06-29 19:09:47 +00:00
|
|
|
struct nvmf_qpair_disconnect_ctx {
|
|
|
|
struct spdk_nvmf_qpair *qpair;
|
|
|
|
struct spdk_nvmf_ctrlr *ctrlr;
|
|
|
|
nvmf_qpair_disconnect_cb cb_fn;
|
2018-07-19 22:50:34 +00:00
|
|
|
struct spdk_thread *thread;
|
2018-06-29 19:09:47 +00:00
|
|
|
void *ctx;
|
2018-07-31 03:55:40 +00:00
|
|
|
uint16_t qid;
|
2018-06-29 19:09:47 +00:00
|
|
|
};
|
|
|
|
|
2018-06-29 20:15:15 +00:00
|
|
|
/*
|
|
|
|
* There are several times when we need to iterate through the list of all qpairs and selectively delete them.
|
|
|
|
* In order to do this sequentially without overlap, we must provide a context to recover the next qpair from
|
|
|
|
* to enable calling nvmf_qpair_disconnect on the next desired qpair.
|
|
|
|
*/
|
|
|
|
struct nvmf_qpair_disconnect_many_ctx {
|
|
|
|
struct spdk_nvmf_subsystem *subsystem;
|
|
|
|
struct spdk_nvmf_poll_group *group;
|
2018-07-13 23:23:23 +00:00
|
|
|
spdk_nvmf_poll_group_mod_done cpl_fn;
|
|
|
|
void *cpl_ctx;
|
2018-06-29 20:15:15 +00:00
|
|
|
};
|
|
|
|
|
2018-08-24 17:16:23 +00:00
|
|
|
static void
|
2020-05-09 21:31:37 +00:00
|
|
|
nvmf_qpair_set_state(struct spdk_nvmf_qpair *qpair,
|
|
|
|
enum spdk_nvmf_qpair_state state)
|
2018-08-24 17:16:23 +00:00
|
|
|
{
|
|
|
|
assert(qpair != NULL);
|
|
|
|
assert(qpair->group->thread == spdk_get_thread());
|
|
|
|
|
|
|
|
qpair->state = state;
|
|
|
|
}
|
|
|
|
|
2018-03-13 00:16:47 +00:00
|
|
|
static int
|
2020-05-09 21:31:37 +00:00
|
|
|
nvmf_poll_group_poll(void *ctx)
|
2017-11-17 17:01:39 +00:00
|
|
|
{
|
|
|
|
struct spdk_nvmf_poll_group *group = ctx;
|
|
|
|
int rc;
|
2018-03-13 00:16:47 +00:00
|
|
|
int count = 0;
|
2017-11-17 17:01:39 +00:00
|
|
|
struct spdk_nvmf_transport_poll_group *tgroup;
|
|
|
|
|
|
|
|
TAILQ_FOREACH(tgroup, &group->tgroups, link) {
|
2020-05-15 01:14:26 +00:00
|
|
|
rc = nvmf_transport_poll_group_poll(tgroup);
|
2017-11-17 17:01:39 +00:00
|
|
|
if (rc < 0) {
|
2020-05-04 09:51:27 +00:00
|
|
|
return SPDK_POLLER_BUSY;
|
2017-11-17 17:01:39 +00:00
|
|
|
}
|
2018-03-13 00:16:47 +00:00
|
|
|
count += rc;
|
2017-11-17 17:01:39 +00:00
|
|
|
}
|
2018-03-13 00:16:47 +00:00
|
|
|
|
2020-05-04 09:51:27 +00:00
|
|
|
return count > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
|
2017-11-17 17:01:39 +00:00
|
|
|
}
|
|
|
|
|
2022-02-07 21:17:00 +00:00
|
|
|
/*
|
|
|
|
* Reset and clean up the poll group (I/O channel code will actually free the
|
|
|
|
* group).
|
|
|
|
*/
|
2022-02-07 21:14:54 +00:00
|
|
|
static void
|
2022-02-07 21:17:00 +00:00
|
|
|
nvmf_tgt_cleanup_poll_group(struct spdk_nvmf_poll_group *group)
|
2022-02-07 21:14:54 +00:00
|
|
|
{
|
|
|
|
struct spdk_nvmf_transport_poll_group *tgroup, *tmp;
|
|
|
|
struct spdk_nvmf_subsystem_poll_group *sgroup;
|
|
|
|
uint32_t sid, nsid;
|
|
|
|
|
|
|
|
TAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp) {
|
|
|
|
TAILQ_REMOVE(&group->tgroups, tgroup, link);
|
|
|
|
nvmf_transport_poll_group_destroy(tgroup);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (sid = 0; sid < group->num_sgroups; sid++) {
|
|
|
|
sgroup = &group->sgroups[sid];
|
|
|
|
|
2022-02-07 21:17:00 +00:00
|
|
|
assert(sgroup != NULL);
|
|
|
|
|
2022-02-07 21:14:54 +00:00
|
|
|
for (nsid = 0; nsid < sgroup->num_ns; nsid++) {
|
|
|
|
if (sgroup->ns_info[nsid].channel) {
|
|
|
|
spdk_put_io_channel(sgroup->ns_info[nsid].channel);
|
|
|
|
sgroup->ns_info[nsid].channel = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
free(sgroup->ns_info);
|
|
|
|
}
|
|
|
|
|
|
|
|
free(group->sgroups);
|
|
|
|
|
|
|
|
spdk_poller_unregister(&group->poller);
|
|
|
|
|
|
|
|
if (group->destroy_cb_fn) {
|
|
|
|
group->destroy_cb_fn(group->destroy_cb_arg, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-07 21:17:00 +00:00
|
|
|
/*
|
|
|
|
* Callback to unregister a poll group from the target, and clean up its state.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
nvmf_tgt_destroy_poll_group(void *io_device, void *ctx_buf)
|
|
|
|
{
|
|
|
|
struct spdk_nvmf_tgt *tgt = io_device;
|
|
|
|
struct spdk_nvmf_poll_group *group = ctx_buf;
|
|
|
|
|
2023-04-21 20:36:10 +00:00
|
|
|
SPDK_DTRACE_PROBE1_TICKS(nvmf_destroy_poll_group, spdk_thread_get_id(group->thread));
|
2022-02-07 21:17:00 +00:00
|
|
|
|
|
|
|
pthread_mutex_lock(&tgt->mutex);
|
|
|
|
TAILQ_REMOVE(&tgt->poll_groups, group, link);
|
2023-03-24 21:53:08 +00:00
|
|
|
tgt->num_poll_groups--;
|
2022-02-07 21:17:00 +00:00
|
|
|
pthread_mutex_unlock(&tgt->mutex);
|
|
|
|
|
2022-12-13 14:16:32 +00:00
|
|
|
assert(!(tgt->state == NVMF_TGT_PAUSING || tgt->state == NVMF_TGT_RESUMING));
|
2022-02-07 21:17:00 +00:00
|
|
|
nvmf_tgt_cleanup_poll_group(group);
|
|
|
|
}
|
|
|
|
|
2022-07-13 13:19:09 +00:00
|
|
|
static int
|
|
|
|
nvmf_poll_group_add_transport(struct spdk_nvmf_poll_group *group,
|
|
|
|
struct spdk_nvmf_transport *transport)
|
|
|
|
{
|
|
|
|
struct spdk_nvmf_transport_poll_group *tgroup;
|
|
|
|
|
|
|
|
TAILQ_FOREACH(tgroup, &group->tgroups, link) {
|
|
|
|
if (tgroup->transport == transport) {
|
|
|
|
/* Transport already in the poll group */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
tgroup = nvmf_transport_poll_group_create(transport, group);
|
|
|
|
if (!tgroup) {
|
|
|
|
SPDK_ERRLOG("Unable to create poll group for transport\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2023-04-21 20:36:10 +00:00
|
|
|
SPDK_DTRACE_PROBE2_TICKS(nvmf_transport_poll_group_create, transport,
|
|
|
|
spdk_thread_get_id(group->thread));
|
2022-07-13 13:19:09 +00:00
|
|
|
|
|
|
|
tgroup->group = group;
|
|
|
|
TAILQ_INSERT_TAIL(&group->tgroups, tgroup, link);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-11-17 17:01:39 +00:00
|
|
|
static int
|
2020-05-09 21:31:37 +00:00
|
|
|
nvmf_tgt_create_poll_group(void *io_device, void *ctx_buf)
|
2017-11-17 17:01:39 +00:00
|
|
|
{
|
|
|
|
struct spdk_nvmf_tgt *tgt = io_device;
|
|
|
|
struct spdk_nvmf_poll_group *group = ctx_buf;
|
|
|
|
struct spdk_nvmf_transport *transport;
|
2021-03-31 07:37:25 +00:00
|
|
|
struct spdk_thread *thread = spdk_get_thread();
|
2017-11-20 16:50:10 +00:00
|
|
|
uint32_t sid;
|
2021-08-23 09:53:03 +00:00
|
|
|
int rc;
|
2017-11-17 17:01:39 +00:00
|
|
|
|
2023-03-24 22:43:32 +00:00
|
|
|
group->tgt = tgt;
|
2017-11-17 17:01:39 +00:00
|
|
|
TAILQ_INIT(&group->tgroups);
|
2018-06-01 22:10:12 +00:00
|
|
|
TAILQ_INIT(&group->qpairs);
|
2021-11-15 11:25:38 +00:00
|
|
|
group->thread = thread;
|
2022-11-24 20:02:48 +00:00
|
|
|
pthread_mutex_init(&group->mutex, NULL);
|
2017-11-17 17:01:39 +00:00
|
|
|
|
2021-12-16 11:16:27 +00:00
|
|
|
group->poller = SPDK_POLLER_REGISTER(nvmf_poll_group_poll, group, 0);
|
|
|
|
|
2023-04-21 20:36:10 +00:00
|
|
|
SPDK_DTRACE_PROBE1_TICKS(nvmf_create_poll_group, spdk_thread_get_id(thread));
|
2021-12-16 11:16:27 +00:00
|
|
|
|
2017-11-17 17:01:39 +00:00
|
|
|
TAILQ_FOREACH(transport, &tgt->transports, link) {
|
2021-08-23 09:53:03 +00:00
|
|
|
rc = nvmf_poll_group_add_transport(group, transport);
|
|
|
|
if (rc != 0) {
|
2022-02-07 21:17:00 +00:00
|
|
|
nvmf_tgt_cleanup_poll_group(group);
|
2021-08-23 09:53:03 +00:00
|
|
|
return rc;
|
|
|
|
}
|
2017-11-17 17:01:39 +00:00
|
|
|
}
|
|
|
|
|
2018-10-19 20:19:09 +00:00
|
|
|
group->num_sgroups = tgt->max_subsystems;
|
|
|
|
group->sgroups = calloc(tgt->max_subsystems, sizeof(struct spdk_nvmf_subsystem_poll_group));
|
2017-11-20 16:50:10 +00:00
|
|
|
if (!group->sgroups) {
|
2022-02-07 21:17:00 +00:00
|
|
|
nvmf_tgt_cleanup_poll_group(group);
|
2018-10-31 15:48:25 +00:00
|
|
|
return -ENOMEM;
|
2017-11-20 16:50:10 +00:00
|
|
|
}
|
|
|
|
|
2018-10-19 20:19:09 +00:00
|
|
|
for (sid = 0; sid < tgt->max_subsystems; sid++) {
|
2017-11-20 16:50:10 +00:00
|
|
|
struct spdk_nvmf_subsystem *subsystem;
|
|
|
|
|
|
|
|
subsystem = tgt->subsystems[sid];
|
|
|
|
if (!subsystem) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2020-05-09 22:57:13 +00:00
|
|
|
if (nvmf_poll_group_add_subsystem(group, subsystem, NULL, NULL) != 0) {
|
2022-02-07 21:17:00 +00:00
|
|
|
nvmf_tgt_cleanup_poll_group(group);
|
2018-08-28 22:41:16 +00:00
|
|
|
return -1;
|
|
|
|
}
|
2017-11-20 16:50:10 +00:00
|
|
|
}
|
|
|
|
|
2020-05-27 21:59:54 +00:00
|
|
|
pthread_mutex_lock(&tgt->mutex);
|
2023-03-24 21:53:08 +00:00
|
|
|
tgt->num_poll_groups++;
|
2020-05-27 21:59:54 +00:00
|
|
|
TAILQ_INSERT_TAIL(&tgt->poll_groups, group, link);
|
|
|
|
pthread_mutex_unlock(&tgt->mutex);
|
|
|
|
|
2017-11-17 17:01:39 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-06-29 23:19:45 +00:00
|
|
|
static void
|
2023-03-13 08:10:32 +00:00
|
|
|
_nvmf_tgt_disconnect_qpairs(void *ctx)
|
2018-06-29 23:19:45 +00:00
|
|
|
{
|
2023-03-13 08:10:32 +00:00
|
|
|
struct spdk_nvmf_qpair *qpair, *qpair_tmp;
|
2018-06-29 23:19:45 +00:00
|
|
|
struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx;
|
|
|
|
struct spdk_nvmf_poll_group *group = qpair_ctx->group;
|
2018-07-23 22:40:02 +00:00
|
|
|
struct spdk_io_channel *ch;
|
2023-03-13 08:10:32 +00:00
|
|
|
int rc;
|
2018-06-29 23:19:45 +00:00
|
|
|
|
2023-03-13 08:10:32 +00:00
|
|
|
TAILQ_FOREACH_SAFE(qpair, &group->qpairs, link, qpair_tmp) {
|
|
|
|
rc = spdk_nvmf_qpair_disconnect(qpair, NULL, NULL);
|
2023-03-13 08:28:25 +00:00
|
|
|
if (rc && rc != -EINPROGRESS) {
|
2023-03-13 08:10:32 +00:00
|
|
|
break;
|
|
|
|
}
|
2018-06-29 23:19:45 +00:00
|
|
|
}
|
|
|
|
|
2023-03-13 08:10:32 +00:00
|
|
|
if (TAILQ_EMPTY(&group->qpairs)) {
|
2020-05-09 21:31:37 +00:00
|
|
|
/* When the refcount from the channels reaches 0, nvmf_tgt_destroy_poll_group will be called. */
|
2018-07-23 22:40:02 +00:00
|
|
|
ch = spdk_io_channel_from_ctx(group);
|
|
|
|
spdk_put_io_channel(ch);
|
2018-06-29 23:19:45 +00:00
|
|
|
free(qpair_ctx);
|
2023-03-13 08:10:32 +00:00
|
|
|
return;
|
2018-06-29 23:19:45 +00:00
|
|
|
}
|
2023-03-13 08:10:32 +00:00
|
|
|
|
|
|
|
/* Some qpairs are in process of being disconnected. Send a message and try to remove them again */
|
|
|
|
spdk_thread_send_msg(spdk_get_thread(), _nvmf_tgt_disconnect_qpairs, ctx);
|
2018-06-29 23:19:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2020-05-09 21:31:37 +00:00
|
|
|
nvmf_tgt_destroy_poll_group_qpairs(struct spdk_nvmf_poll_group *group)
|
2018-06-29 23:19:45 +00:00
|
|
|
{
|
|
|
|
struct nvmf_qpair_disconnect_many_ctx *ctx;
|
|
|
|
|
2023-04-21 20:36:10 +00:00
|
|
|
SPDK_DTRACE_PROBE1_TICKS(nvmf_destroy_poll_group_qpairs, spdk_thread_get_id(group->thread));
|
2018-06-29 23:19:45 +00:00
|
|
|
|
2021-03-31 07:37:25 +00:00
|
|
|
ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_many_ctx));
|
2018-06-29 23:19:45 +00:00
|
|
|
if (!ctx) {
|
|
|
|
SPDK_ERRLOG("Failed to allocate memory for destroy poll group ctx\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx->group = group;
|
2023-03-13 08:10:32 +00:00
|
|
|
_nvmf_tgt_disconnect_qpairs(ctx);
|
2018-06-29 23:19:45 +00:00
|
|
|
}
|
|
|
|
|
2017-08-18 22:38:33 +00:00
|
|
|
struct spdk_nvmf_tgt *
|
2019-08-15 20:51:08 +00:00
|
|
|
spdk_nvmf_tgt_create(struct spdk_nvmf_target_opts *opts)
|
2016-06-06 21:44:30 +00:00
|
|
|
{
|
2019-08-15 17:34:12 +00:00
|
|
|
struct spdk_nvmf_tgt *tgt, *tmp_tgt;
|
|
|
|
|
|
|
|
if (strnlen(opts->name, NVMF_TGT_NAME_MAX_LENGTH) == NVMF_TGT_NAME_MAX_LENGTH) {
|
|
|
|
SPDK_ERRLOG("Provided target name exceeds the max length of %u.\n", NVMF_TGT_NAME_MAX_LENGTH);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
TAILQ_FOREACH(tmp_tgt, &g_nvmf_tgts, link) {
|
2019-09-16 21:26:29 +00:00
|
|
|
if (!strncmp(opts->name, tmp_tgt->name, NVMF_TGT_NAME_MAX_LENGTH)) {
|
2019-08-15 17:34:12 +00:00
|
|
|
SPDK_ERRLOG("Provided target name must be unique.\n");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
2017-08-18 22:38:33 +00:00
|
|
|
|
2017-08-23 17:35:44 +00:00
|
|
|
tgt = calloc(1, sizeof(*tgt));
|
|
|
|
if (!tgt) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2017-08-18 22:38:33 +00:00
|
|
|
|
2019-08-15 17:34:12 +00:00
|
|
|
snprintf(tgt->name, NVMF_TGT_NAME_MAX_LENGTH, "%s", opts->name);
|
|
|
|
|
2019-08-15 20:51:08 +00:00
|
|
|
if (!opts || !opts->max_subsystems) {
|
2018-10-19 20:19:09 +00:00
|
|
|
tgt->max_subsystems = SPDK_NVMF_DEFAULT_MAX_SUBSYSTEMS;
|
2017-08-17 20:39:27 +00:00
|
|
|
} else {
|
2019-08-15 20:51:08 +00:00
|
|
|
tgt->max_subsystems = opts->max_subsystems;
|
2017-08-17 20:39:27 +00:00
|
|
|
}
|
|
|
|
|
2021-05-24 04:22:43 +00:00
|
|
|
if (!opts) {
|
|
|
|
tgt->crdt[0] = 0;
|
|
|
|
tgt->crdt[1] = 0;
|
|
|
|
tgt->crdt[2] = 0;
|
|
|
|
} else {
|
|
|
|
tgt->crdt[0] = opts->crdt[0];
|
|
|
|
tgt->crdt[1] = opts->crdt[1];
|
|
|
|
tgt->crdt[2] = opts->crdt[2];
|
|
|
|
}
|
|
|
|
|
2021-08-12 16:01:39 +00:00
|
|
|
if (!opts) {
|
|
|
|
tgt->discovery_filter = SPDK_NVMF_TGT_DISCOVERY_MATCH_ANY;
|
|
|
|
} else {
|
|
|
|
tgt->discovery_filter = opts->discovery_filter;
|
|
|
|
}
|
|
|
|
|
2017-08-18 22:38:33 +00:00
|
|
|
tgt->discovery_genctr = 0;
|
|
|
|
TAILQ_INIT(&tgt->transports);
|
2020-05-27 21:59:54 +00:00
|
|
|
TAILQ_INIT(&tgt->poll_groups);
|
2023-03-24 21:53:08 +00:00
|
|
|
tgt->num_poll_groups = 0;
|
2016-07-25 21:22:58 +00:00
|
|
|
|
2018-10-19 20:19:09 +00:00
|
|
|
tgt->subsystems = calloc(tgt->max_subsystems, sizeof(struct spdk_nvmf_subsystem *));
|
2018-05-08 23:05:28 +00:00
|
|
|
if (!tgt->subsystems) {
|
|
|
|
free(tgt);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-05-27 21:59:54 +00:00
|
|
|
pthread_mutex_init(&tgt->mutex, NULL);
|
|
|
|
|
2017-11-17 17:01:39 +00:00
|
|
|
spdk_io_device_register(tgt,
|
2020-05-09 21:31:37 +00:00
|
|
|
nvmf_tgt_create_poll_group,
|
|
|
|
nvmf_tgt_destroy_poll_group,
|
2018-08-30 20:26:50 +00:00
|
|
|
sizeof(struct spdk_nvmf_poll_group),
|
2019-08-15 17:34:12 +00:00
|
|
|
tgt->name);
|
2017-11-17 17:01:39 +00:00
|
|
|
|
2022-12-13 14:16:32 +00:00
|
|
|
tgt->state = NVMF_TGT_RUNNING;
|
|
|
|
|
2020-06-29 17:56:47 +00:00
|
|
|
TAILQ_INSERT_HEAD(&g_nvmf_tgts, tgt, link);
|
|
|
|
|
2017-08-18 22:38:33 +00:00
|
|
|
return tgt;
|
2016-06-06 21:44:30 +00:00
|
|
|
}
|
|
|
|
|
2020-11-20 05:50:52 +00:00
|
|
|
static void
|
|
|
|
_nvmf_tgt_destroy_next_transport(void *ctx)
|
|
|
|
{
|
|
|
|
struct spdk_nvmf_tgt *tgt = ctx;
|
|
|
|
struct spdk_nvmf_transport *transport;
|
|
|
|
|
|
|
|
if (!TAILQ_EMPTY(&tgt->transports)) {
|
|
|
|
transport = TAILQ_FIRST(&tgt->transports);
|
|
|
|
TAILQ_REMOVE(&tgt->transports, transport, link);
|
|
|
|
spdk_nvmf_transport_destroy(transport, _nvmf_tgt_destroy_next_transport, tgt);
|
|
|
|
} else {
|
|
|
|
spdk_nvmf_tgt_destroy_done_fn *destroy_cb_fn = tgt->destroy_cb_fn;
|
|
|
|
void *destroy_cb_arg = tgt->destroy_cb_arg;
|
|
|
|
|
|
|
|
pthread_mutex_destroy(&tgt->mutex);
|
|
|
|
free(tgt);
|
|
|
|
|
|
|
|
if (destroy_cb_fn) {
|
|
|
|
destroy_cb_fn(destroy_cb_arg, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-05 22:34:04 +00:00
|
|
|
static void
|
2020-05-09 21:31:37 +00:00
|
|
|
nvmf_tgt_destroy_cb(void *io_device)
|
2016-10-19 17:03:45 +00:00
|
|
|
{
|
2018-06-05 22:34:04 +00:00
|
|
|
struct spdk_nvmf_tgt *tgt = io_device;
|
2017-12-20 20:45:59 +00:00
|
|
|
uint32_t i;
|
2021-03-03 08:48:19 +00:00
|
|
|
int rc;
|
2017-02-20 04:49:39 +00:00
|
|
|
|
2017-08-29 20:03:13 +00:00
|
|
|
if (tgt->subsystems) {
|
2018-10-19 20:19:09 +00:00
|
|
|
for (i = 0; i < tgt->max_subsystems; i++) {
|
2017-12-20 20:45:59 +00:00
|
|
|
if (tgt->subsystems[i]) {
|
2020-05-09 22:57:13 +00:00
|
|
|
nvmf_subsystem_remove_all_listeners(tgt->subsystems[i], true);
|
2021-03-03 08:48:19 +00:00
|
|
|
|
|
|
|
rc = spdk_nvmf_subsystem_destroy(tgt->subsystems[i], nvmf_tgt_destroy_cb, tgt);
|
|
|
|
if (rc) {
|
|
|
|
if (rc == -EINPROGRESS) {
|
|
|
|
/* If rc is -EINPROGRESS, nvmf_tgt_destroy_cb will be called again when subsystem #i
|
|
|
|
* is destroyed, nvmf_tgt_destroy_cb will continue to destroy other subsystems if any */
|
|
|
|
return;
|
|
|
|
} else {
|
2022-08-03 04:44:12 +00:00
|
|
|
SPDK_ERRLOG("Failed to destroy subsystem %s, rc %d\n", tgt->subsystems[i]->subnqn, rc);
|
2021-03-03 08:48:19 +00:00
|
|
|
}
|
|
|
|
}
|
2017-12-20 20:45:59 +00:00
|
|
|
}
|
|
|
|
}
|
2017-08-29 20:03:13 +00:00
|
|
|
free(tgt->subsystems);
|
|
|
|
}
|
|
|
|
|
2020-11-20 05:50:52 +00:00
|
|
|
_nvmf_tgt_destroy_next_transport(tgt);
|
2018-06-05 22:34:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
spdk_nvmf_tgt_destroy(struct spdk_nvmf_tgt *tgt,
|
|
|
|
spdk_nvmf_tgt_destroy_done_fn cb_fn,
|
|
|
|
void *cb_arg)
|
|
|
|
{
|
2022-12-13 14:16:32 +00:00
|
|
|
assert(!(tgt->state == NVMF_TGT_PAUSING || tgt->state == NVMF_TGT_RESUMING));
|
|
|
|
|
2018-06-05 22:34:04 +00:00
|
|
|
tgt->destroy_cb_fn = cb_fn;
|
|
|
|
tgt->destroy_cb_arg = cb_arg;
|
|
|
|
|
2019-08-15 16:52:20 +00:00
|
|
|
TAILQ_REMOVE(&g_nvmf_tgts, tgt, link);
|
|
|
|
|
2020-05-09 21:31:37 +00:00
|
|
|
spdk_io_device_unregister(tgt, nvmf_tgt_destroy_cb);
|
2016-10-19 17:03:45 +00:00
|
|
|
}
|
|
|
|
|
2019-09-13 18:09:07 +00:00
|
|
|
const char *
|
|
|
|
spdk_nvmf_tgt_get_name(struct spdk_nvmf_tgt *tgt)
|
|
|
|
{
|
|
|
|
return tgt->name;
|
|
|
|
}
|
|
|
|
|
2019-08-15 18:32:11 +00:00
|
|
|
struct spdk_nvmf_tgt *
|
|
|
|
spdk_nvmf_get_tgt(const char *name)
|
|
|
|
{
|
|
|
|
struct spdk_nvmf_tgt *tgt;
|
|
|
|
uint32_t num_targets = 0;
|
|
|
|
|
|
|
|
TAILQ_FOREACH(tgt, &g_nvmf_tgts, link) {
|
|
|
|
if (name) {
|
2019-09-16 21:26:29 +00:00
|
|
|
if (!strncmp(tgt->name, name, NVMF_TGT_NAME_MAX_LENGTH)) {
|
2019-08-15 18:32:11 +00:00
|
|
|
return tgt;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
num_targets++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* special case. If there is only one target and
|
|
|
|
* no name was specified, return the only available
|
|
|
|
* target. If there is more than one target, name must
|
|
|
|
* be specified.
|
|
|
|
*/
|
|
|
|
if (!name && num_targets == 1) {
|
|
|
|
return TAILQ_FIRST(&g_nvmf_tgts);
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-09-13 18:09:07 +00:00
|
|
|
struct spdk_nvmf_tgt *
|
|
|
|
spdk_nvmf_get_first_tgt(void)
|
|
|
|
{
|
|
|
|
return TAILQ_FIRST(&g_nvmf_tgts);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct spdk_nvmf_tgt *
|
|
|
|
spdk_nvmf_get_next_tgt(struct spdk_nvmf_tgt *prev)
|
|
|
|
{
|
|
|
|
return TAILQ_NEXT(prev, link);
|
|
|
|
}
|
|
|
|
|
2018-05-07 18:26:13 +00:00
|
|
|
static void
|
2020-05-09 21:31:37 +00:00
|
|
|
nvmf_write_subsystem_config_json(struct spdk_json_write_ctx *w,
|
|
|
|
struct spdk_nvmf_subsystem *subsystem)
|
2018-05-07 18:26:13 +00:00
|
|
|
{
|
|
|
|
struct spdk_nvmf_host *host;
|
2020-02-15 05:47:36 +00:00
|
|
|
struct spdk_nvmf_subsystem_listener *listener;
|
2018-05-07 18:26:13 +00:00
|
|
|
const struct spdk_nvme_transport_id *trid;
|
|
|
|
struct spdk_nvmf_ns *ns;
|
|
|
|
struct spdk_nvmf_ns_opts ns_opts;
|
|
|
|
uint32_t max_namespaces;
|
|
|
|
char uuid_str[SPDK_UUID_STRING_LEN];
|
|
|
|
|
|
|
|
if (spdk_nvmf_subsystem_get_type(subsystem) != SPDK_NVMF_SUBTYPE_NVME) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* { */
|
|
|
|
spdk_json_write_object_begin(w);
|
2019-09-20 10:22:44 +00:00
|
|
|
spdk_json_write_named_string(w, "method", "nvmf_create_subsystem");
|
2018-05-07 18:26:13 +00:00
|
|
|
|
|
|
|
/* "params" : { */
|
|
|
|
spdk_json_write_named_object_begin(w, "params");
|
|
|
|
spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem));
|
|
|
|
spdk_json_write_named_bool(w, "allow_any_host", spdk_nvmf_subsystem_get_allow_any_host(subsystem));
|
2018-09-10 18:27:39 +00:00
|
|
|
spdk_json_write_named_string(w, "serial_number", spdk_nvmf_subsystem_get_sn(subsystem));
|
2018-12-29 19:39:48 +00:00
|
|
|
spdk_json_write_named_string(w, "model_number", spdk_nvmf_subsystem_get_mn(subsystem));
|
2018-09-10 18:27:39 +00:00
|
|
|
|
|
|
|
max_namespaces = spdk_nvmf_subsystem_get_max_namespaces(subsystem);
|
|
|
|
if (max_namespaces != 0) {
|
|
|
|
spdk_json_write_named_uint32(w, "max_namespaces", max_namespaces);
|
|
|
|
}
|
|
|
|
|
2021-04-12 11:01:16 +00:00
|
|
|
spdk_json_write_named_uint32(w, "min_cntlid", spdk_nvmf_subsystem_get_min_cntlid(subsystem));
|
|
|
|
spdk_json_write_named_uint32(w, "max_cntlid", spdk_nvmf_subsystem_get_max_cntlid(subsystem));
|
2021-08-05 22:13:04 +00:00
|
|
|
spdk_json_write_named_bool(w, "ana_reporting", nvmf_subsystem_get_ana_reporting(subsystem));
|
2021-04-12 11:01:16 +00:00
|
|
|
|
2018-09-10 18:27:39 +00:00
|
|
|
/* } "params" */
|
|
|
|
spdk_json_write_object_end(w);
|
|
|
|
|
|
|
|
/* } */
|
|
|
|
spdk_json_write_object_end(w);
|
2018-05-07 18:26:13 +00:00
|
|
|
|
|
|
|
for (host = spdk_nvmf_subsystem_get_first_host(subsystem); host != NULL;
|
|
|
|
host = spdk_nvmf_subsystem_get_next_host(subsystem, host)) {
|
|
|
|
|
2018-09-10 18:27:39 +00:00
|
|
|
spdk_json_write_object_begin(w);
|
|
|
|
spdk_json_write_named_string(w, "method", "nvmf_subsystem_add_host");
|
2018-05-07 18:26:13 +00:00
|
|
|
|
2018-09-10 18:27:39 +00:00
|
|
|
/* "params" : { */
|
|
|
|
spdk_json_write_named_object_begin(w, "params");
|
|
|
|
|
|
|
|
spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem));
|
|
|
|
spdk_json_write_named_string(w, "host", spdk_nvmf_host_get_nqn(host));
|
|
|
|
|
|
|
|
/* } "params" */
|
|
|
|
spdk_json_write_object_end(w);
|
|
|
|
|
|
|
|
/* } */
|
|
|
|
spdk_json_write_object_end(w);
|
2018-05-07 18:26:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL;
|
|
|
|
ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) {
|
|
|
|
spdk_nvmf_ns_get_opts(ns, &ns_opts, sizeof(ns_opts));
|
|
|
|
|
|
|
|
spdk_json_write_object_begin(w);
|
2018-09-10 18:27:39 +00:00
|
|
|
spdk_json_write_named_string(w, "method", "nvmf_subsystem_add_ns");
|
|
|
|
|
|
|
|
/* "params" : { */
|
|
|
|
spdk_json_write_named_object_begin(w, "params");
|
|
|
|
|
|
|
|
spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem));
|
|
|
|
|
|
|
|
/* "namespace" : { */
|
|
|
|
spdk_json_write_named_object_begin(w, "namespace");
|
|
|
|
|
2018-05-07 18:26:13 +00:00
|
|
|
spdk_json_write_named_uint32(w, "nsid", spdk_nvmf_ns_get_id(ns));
|
|
|
|
spdk_json_write_named_string(w, "bdev_name", spdk_bdev_get_name(spdk_nvmf_ns_get_bdev(ns)));
|
|
|
|
|
|
|
|
if (!spdk_mem_all_zero(ns_opts.nguid, sizeof(ns_opts.nguid))) {
|
|
|
|
SPDK_STATIC_ASSERT(sizeof(ns_opts.nguid) == sizeof(uint64_t) * 2, "size mismatch");
|
|
|
|
spdk_json_write_named_string_fmt(w, "nguid", "%016"PRIX64"%016"PRIX64, from_be64(&ns_opts.nguid[0]),
|
|
|
|
from_be64(&ns_opts.nguid[8]));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!spdk_mem_all_zero(ns_opts.eui64, sizeof(ns_opts.eui64))) {
|
|
|
|
SPDK_STATIC_ASSERT(sizeof(ns_opts.eui64) == sizeof(uint64_t), "size mismatch");
|
|
|
|
spdk_json_write_named_string_fmt(w, "eui64", "%016"PRIX64, from_be64(&ns_opts.eui64));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!spdk_mem_all_zero(&ns_opts.uuid, sizeof(ns_opts.uuid))) {
|
|
|
|
spdk_uuid_fmt_lower(uuid_str, sizeof(uuid_str), &ns_opts.uuid);
|
|
|
|
spdk_json_write_named_string(w, "uuid", uuid_str);
|
|
|
|
}
|
|
|
|
|
2021-08-06 06:05:52 +00:00
|
|
|
if (nvmf_subsystem_get_ana_reporting(subsystem)) {
|
|
|
|
spdk_json_write_named_uint32(w, "anagrpid", ns_opts.anagrpid);
|
|
|
|
}
|
|
|
|
|
2018-09-10 18:27:39 +00:00
|
|
|
/* "namespace" */
|
|
|
|
spdk_json_write_object_end(w);
|
2018-05-07 18:26:13 +00:00
|
|
|
|
2018-09-10 18:27:39 +00:00
|
|
|
/* } "params" */
|
|
|
|
spdk_json_write_object_end(w);
|
2018-05-07 18:26:13 +00:00
|
|
|
|
2018-09-10 18:27:39 +00:00
|
|
|
/* } */
|
|
|
|
spdk_json_write_object_end(w);
|
|
|
|
}
|
2022-09-21 00:22:32 +00:00
|
|
|
|
|
|
|
for (listener = spdk_nvmf_subsystem_get_first_listener(subsystem); listener != NULL;
|
|
|
|
listener = spdk_nvmf_subsystem_get_next_listener(subsystem, listener)) {
|
|
|
|
trid = spdk_nvmf_subsystem_listener_get_trid(listener);
|
|
|
|
|
|
|
|
spdk_json_write_object_begin(w);
|
|
|
|
spdk_json_write_named_string(w, "method", "nvmf_subsystem_add_listener");
|
|
|
|
|
|
|
|
/* "params" : { */
|
|
|
|
spdk_json_write_named_object_begin(w, "params");
|
|
|
|
|
|
|
|
spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem));
|
|
|
|
nvmf_transport_listen_dump_opts(listener->transport, trid, w);
|
|
|
|
|
|
|
|
/* } "params" */
|
|
|
|
spdk_json_write_object_end(w);
|
|
|
|
|
|
|
|
/* } */
|
|
|
|
spdk_json_write_object_end(w);
|
|
|
|
}
|
|
|
|
|
2018-05-07 18:26:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
spdk_nvmf_tgt_write_config_json(struct spdk_json_write_ctx *w, struct spdk_nvmf_tgt *tgt)
|
|
|
|
{
|
|
|
|
struct spdk_nvmf_subsystem *subsystem;
|
2018-08-27 22:27:47 +00:00
|
|
|
struct spdk_nvmf_transport *transport;
|
2018-05-07 18:26:13 +00:00
|
|
|
|
|
|
|
spdk_json_write_object_begin(w);
|
2019-09-23 10:29:33 +00:00
|
|
|
spdk_json_write_named_string(w, "method", "nvmf_set_max_subsystems");
|
2018-05-07 18:26:13 +00:00
|
|
|
|
|
|
|
spdk_json_write_named_object_begin(w, "params");
|
2018-10-19 20:19:09 +00:00
|
|
|
spdk_json_write_named_uint32(w, "max_subsystems", tgt->max_subsystems);
|
2018-05-07 18:26:13 +00:00
|
|
|
spdk_json_write_object_end(w);
|
|
|
|
|
|
|
|
spdk_json_write_object_end(w);
|
|
|
|
|
2021-05-24 04:22:43 +00:00
|
|
|
spdk_json_write_object_begin(w);
|
|
|
|
spdk_json_write_named_string(w, "method", "nvmf_set_crdt");
|
|
|
|
spdk_json_write_named_object_begin(w, "params");
|
|
|
|
spdk_json_write_named_uint32(w, "crdt1", tgt->crdt[0]);
|
|
|
|
spdk_json_write_named_uint32(w, "crdt2", tgt->crdt[1]);
|
|
|
|
spdk_json_write_named_uint32(w, "crdt3", tgt->crdt[2]);
|
|
|
|
spdk_json_write_object_end(w);
|
|
|
|
spdk_json_write_object_end(w);
|
|
|
|
|
2018-08-27 22:27:47 +00:00
|
|
|
/* write transports */
|
|
|
|
TAILQ_FOREACH(transport, &tgt->transports, link) {
|
|
|
|
spdk_json_write_object_begin(w);
|
|
|
|
spdk_json_write_named_string(w, "method", "nvmf_create_transport");
|
2021-08-31 13:44:05 +00:00
|
|
|
nvmf_transport_dump_opts(transport, w, true);
|
2018-08-27 22:27:47 +00:00
|
|
|
spdk_json_write_object_end(w);
|
|
|
|
}
|
|
|
|
|
2018-05-07 18:26:13 +00:00
|
|
|
subsystem = spdk_nvmf_subsystem_get_first(tgt);
|
|
|
|
while (subsystem) {
|
2020-05-09 21:31:37 +00:00
|
|
|
nvmf_write_subsystem_config_json(w, subsystem);
|
2018-05-07 18:26:13 +00:00
|
|
|
subsystem = spdk_nvmf_subsystem_get_next(subsystem);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-18 19:12:49 +00:00
|
|
|
static void
|
|
|
|
nvmf_listen_opts_copy(struct spdk_nvmf_listen_opts *opts,
|
|
|
|
const struct spdk_nvmf_listen_opts *opts_src, size_t opts_size)
|
|
|
|
{
|
|
|
|
assert(opts);
|
|
|
|
assert(opts_src);
|
|
|
|
|
|
|
|
opts->opts_size = opts_size;
|
|
|
|
|
|
|
|
#define SET_FIELD(field) \
|
|
|
|
if (offsetof(struct spdk_nvmf_listen_opts, field) + sizeof(opts->field) <= opts_size) { \
|
|
|
|
opts->field = opts_src->field; \
|
|
|
|
} \
|
|
|
|
|
|
|
|
SET_FIELD(transport_specific);
|
|
|
|
#undef SET_FIELD
|
|
|
|
|
|
|
|
/* Do not remove this statement, you should always update this statement when you adding a new field,
|
|
|
|
* and do not forget to add the SET_FIELD statement for your added field. */
|
|
|
|
SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_listen_opts) == 16, "Incorrect size");
|
|
|
|
}
|
|
|
|
|
2020-12-21 13:07:39 +00:00
|
|
|
void
|
|
|
|
spdk_nvmf_listen_opts_init(struct spdk_nvmf_listen_opts *opts, size_t opts_size)
|
|
|
|
{
|
|
|
|
struct spdk_nvmf_listen_opts opts_local = {};
|
|
|
|
|
|
|
|
/* local version of opts should have defaults set here */
|
|
|
|
|
|
|
|
nvmf_listen_opts_copy(opts, &opts_local, opts_size);
|
|
|
|
}
|
|
|
|
|
2020-02-13 19:44:00 +00:00
|
|
|
int
|
2020-12-18 19:12:49 +00:00
|
|
|
spdk_nvmf_tgt_listen_ext(struct spdk_nvmf_tgt *tgt, const struct spdk_nvme_transport_id *trid,
|
|
|
|
struct spdk_nvmf_listen_opts *opts)
|
2017-08-21 18:41:51 +00:00
|
|
|
{
|
|
|
|
struct spdk_nvmf_transport *transport;
|
|
|
|
int rc;
|
2020-12-18 19:12:49 +00:00
|
|
|
struct spdk_nvmf_listen_opts opts_local = {};
|
|
|
|
|
|
|
|
if (!opts) {
|
|
|
|
SPDK_ERRLOG("opts should not be NULL\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!opts->opts_size) {
|
|
|
|
SPDK_ERRLOG("The opts_size in opts structure should not be zero\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2017-08-21 18:41:51 +00:00
|
|
|
|
2020-01-07 17:36:40 +00:00
|
|
|
transport = spdk_nvmf_tgt_get_transport(tgt, trid->trstring);
|
2017-08-21 18:41:51 +00:00
|
|
|
if (!transport) {
|
2020-12-18 17:40:24 +00:00
|
|
|
SPDK_ERRLOG("Unable to find %s transport. The transport must be created first also make sure it is properly registered.\n",
|
|
|
|
trid->trstring);
|
2020-02-13 19:44:00 +00:00
|
|
|
return -EINVAL;
|
2018-04-03 18:35:04 +00:00
|
|
|
}
|
|
|
|
|
2020-12-18 19:12:49 +00:00
|
|
|
nvmf_listen_opts_copy(&opts_local, opts, opts->opts_size);
|
|
|
|
rc = spdk_nvmf_transport_listen(transport, trid, &opts_local);
|
2018-04-03 18:35:04 +00:00
|
|
|
if (rc < 0) {
|
|
|
|
SPDK_ERRLOG("Unable to listen on address '%s'\n", trid->traddr);
|
|
|
|
}
|
2020-02-13 19:44:00 +00:00
|
|
|
|
|
|
|
return rc;
|
2018-08-27 22:27:47 +00:00
|
|
|
}
|
2018-04-03 18:35:04 +00:00
|
|
|
|
2020-01-02 19:00:45 +00:00
|
|
|
int
|
|
|
|
spdk_nvmf_tgt_stop_listen(struct spdk_nvmf_tgt *tgt,
|
|
|
|
struct spdk_nvme_transport_id *trid)
|
|
|
|
{
|
|
|
|
struct spdk_nvmf_transport *transport;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
transport = spdk_nvmf_tgt_get_transport(tgt, trid->trstring);
|
|
|
|
if (!transport) {
|
2020-12-18 17:40:24 +00:00
|
|
|
SPDK_ERRLOG("Unable to find %s transport. The transport must be created first also make sure it is properly registered.\n",
|
|
|
|
trid->trstring);
|
2020-01-02 19:00:45 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = spdk_nvmf_transport_stop_listen(transport, trid);
|
|
|
|
if (rc < 0) {
|
|
|
|
SPDK_ERRLOG("Failed to stop listening on address '%s'\n", trid->traddr);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-08-27 22:27:47 +00:00
|
|
|
struct spdk_nvmf_tgt_add_transport_ctx {
|
|
|
|
struct spdk_nvmf_tgt *tgt;
|
|
|
|
struct spdk_nvmf_transport *transport;
|
|
|
|
spdk_nvmf_tgt_add_transport_done_fn cb_fn;
|
|
|
|
void *cb_arg;
|
2021-11-26 11:51:14 +00:00
|
|
|
int status;
|
2018-08-27 22:27:47 +00:00
|
|
|
};
|
|
|
|
|
2021-11-26 11:51:14 +00:00
|
|
|
static void
|
|
|
|
_nvmf_tgt_remove_transport_done(struct spdk_io_channel_iter *i, int status)
|
|
|
|
{
|
|
|
|
struct spdk_nvmf_tgt_add_transport_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
|
|
|
|
|
|
|
|
ctx->cb_fn(ctx->cb_arg, ctx->status);
|
|
|
|
free(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
_nvmf_tgt_remove_transport(struct spdk_io_channel_iter *i)
|
|
|
|
{
|
|
|
|
struct spdk_nvmf_tgt_add_transport_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
|
|
|
|
struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
|
|
|
|
struct spdk_nvmf_poll_group *group = spdk_io_channel_get_ctx(ch);
|
|
|
|
struct spdk_nvmf_transport_poll_group *tgroup, *tmp;
|
|
|
|
|
|
|
|
TAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp) {
|
|
|
|
if (tgroup->transport == ctx->transport) {
|
|
|
|
TAILQ_REMOVE(&group->tgroups, tgroup, link);
|
|
|
|
nvmf_transport_poll_group_destroy(tgroup);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
spdk_for_each_channel_continue(i, 0);
|
|
|
|
}
|
|
|
|
|
2018-08-27 22:27:47 +00:00
|
|
|
static void
|
2020-05-09 21:31:37 +00:00
|
|
|
_nvmf_tgt_add_transport_done(struct spdk_io_channel_iter *i, int status)
|
2018-08-27 22:27:47 +00:00
|
|
|
{
|
|
|
|
struct spdk_nvmf_tgt_add_transport_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
|
2017-11-17 17:01:39 +00:00
|
|
|
|
2021-11-26 11:51:14 +00:00
|
|
|
if (status) {
|
|
|
|
ctx->status = status;
|
|
|
|
spdk_for_each_channel(ctx->tgt,
|
|
|
|
_nvmf_tgt_remove_transport,
|
|
|
|
ctx,
|
|
|
|
_nvmf_tgt_remove_transport_done);
|
|
|
|
return;
|
2021-08-23 09:53:03 +00:00
|
|
|
}
|
2017-11-17 17:01:39 +00:00
|
|
|
|
2021-11-26 11:51:14 +00:00
|
|
|
ctx->transport->tgt = ctx->tgt;
|
|
|
|
TAILQ_INSERT_TAIL(&ctx->tgt->transports, ctx->transport, link);
|
2021-11-26 09:39:38 +00:00
|
|
|
ctx->cb_fn(ctx->cb_arg, status);
|
2018-08-27 22:27:47 +00:00
|
|
|
free(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2020-05-09 21:31:37 +00:00
|
|
|
_nvmf_tgt_add_transport(struct spdk_io_channel_iter *i)
|
2018-08-27 22:27:47 +00:00
|
|
|
{
|
|
|
|
struct spdk_nvmf_tgt_add_transport_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
|
|
|
|
struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
|
|
|
|
struct spdk_nvmf_poll_group *group = spdk_io_channel_get_ctx(ch);
|
|
|
|
int rc;
|
|
|
|
|
2020-05-09 22:57:13 +00:00
|
|
|
rc = nvmf_poll_group_add_transport(group, ctx->transport);
|
2018-08-27 22:27:47 +00:00
|
|
|
spdk_for_each_channel_continue(i, rc);
|
|
|
|
}
|
|
|
|
|
2022-06-22 21:35:04 +00:00
|
|
|
void
|
|
|
|
spdk_nvmf_tgt_add_transport(struct spdk_nvmf_tgt *tgt,
|
|
|
|
struct spdk_nvmf_transport *transport,
|
|
|
|
spdk_nvmf_tgt_add_transport_done_fn cb_fn,
|
|
|
|
void *cb_arg)
|
2018-08-27 22:27:47 +00:00
|
|
|
{
|
|
|
|
struct spdk_nvmf_tgt_add_transport_ctx *ctx;
|
|
|
|
|
2023-04-21 20:36:10 +00:00
|
|
|
SPDK_DTRACE_PROBE2_TICKS(nvmf_tgt_add_transport, transport, tgt->name);
|
2021-11-15 11:25:38 +00:00
|
|
|
|
2020-01-07 17:36:40 +00:00
|
|
|
if (spdk_nvmf_tgt_get_transport(tgt, transport->ops->name)) {
|
2018-08-27 22:27:47 +00:00
|
|
|
cb_fn(cb_arg, -EEXIST);
|
|
|
|
return; /* transport already created */
|
2017-11-17 17:01:39 +00:00
|
|
|
}
|
2018-08-27 22:27:47 +00:00
|
|
|
|
|
|
|
ctx = calloc(1, sizeof(*ctx));
|
|
|
|
if (!ctx) {
|
|
|
|
cb_fn(cb_arg, -ENOMEM);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx->tgt = tgt;
|
|
|
|
ctx->transport = transport;
|
|
|
|
ctx->cb_fn = cb_fn;
|
|
|
|
ctx->cb_arg = cb_arg;
|
|
|
|
|
|
|
|
spdk_for_each_channel(tgt,
|
2020-05-09 21:31:37 +00:00
|
|
|
_nvmf_tgt_add_transport,
|
2018-08-27 22:27:47 +00:00
|
|
|
ctx,
|
2020-05-09 21:31:37 +00:00
|
|
|
_nvmf_tgt_add_transport_done);
|
2017-08-21 18:41:51 +00:00
|
|
|
}
|
|
|
|
|
2022-12-13 14:16:32 +00:00
|
|
|
struct nvmf_tgt_pause_ctx {
|
|
|
|
struct spdk_nvmf_tgt *tgt;
|
|
|
|
spdk_nvmf_tgt_pause_polling_cb_fn cb_fn;
|
|
|
|
void *cb_arg;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
_nvmf_tgt_pause_polling_done(struct spdk_io_channel_iter *i, int status)
|
|
|
|
{
|
|
|
|
struct nvmf_tgt_pause_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
|
|
|
|
|
|
|
|
ctx->tgt->state = NVMF_TGT_PAUSED;
|
|
|
|
|
|
|
|
ctx->cb_fn(ctx->cb_arg, status);
|
|
|
|
free(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
_nvmf_tgt_pause_polling(struct spdk_io_channel_iter *i)
|
|
|
|
{
|
|
|
|
struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
|
|
|
|
struct spdk_nvmf_poll_group *group = spdk_io_channel_get_ctx(ch);
|
|
|
|
|
|
|
|
spdk_poller_unregister(&group->poller);
|
|
|
|
|
|
|
|
spdk_for_each_channel_continue(i, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
spdk_nvmf_tgt_pause_polling(struct spdk_nvmf_tgt *tgt, spdk_nvmf_tgt_pause_polling_cb_fn cb_fn,
|
|
|
|
void *cb_arg)
|
|
|
|
{
|
|
|
|
struct nvmf_tgt_pause_ctx *ctx;
|
|
|
|
|
2023-04-21 20:36:10 +00:00
|
|
|
SPDK_DTRACE_PROBE2_TICKS(nvmf_tgt_pause_polling, tgt, tgt->name);
|
2022-12-13 14:16:32 +00:00
|
|
|
|
|
|
|
switch (tgt->state) {
|
|
|
|
case NVMF_TGT_PAUSING:
|
|
|
|
case NVMF_TGT_RESUMING:
|
|
|
|
return -EBUSY;
|
|
|
|
case NVMF_TGT_RUNNING:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx = calloc(1, sizeof(*ctx));
|
|
|
|
if (!ctx) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
tgt->state = NVMF_TGT_PAUSING;
|
|
|
|
|
|
|
|
ctx->tgt = tgt;
|
|
|
|
ctx->cb_fn = cb_fn;
|
|
|
|
ctx->cb_arg = cb_arg;
|
|
|
|
|
|
|
|
spdk_for_each_channel(tgt,
|
|
|
|
_nvmf_tgt_pause_polling,
|
|
|
|
ctx,
|
|
|
|
_nvmf_tgt_pause_polling_done);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
_nvmf_tgt_resume_polling_done(struct spdk_io_channel_iter *i, int status)
|
|
|
|
{
|
|
|
|
struct nvmf_tgt_pause_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
|
|
|
|
|
|
|
|
ctx->tgt->state = NVMF_TGT_RUNNING;
|
|
|
|
|
|
|
|
ctx->cb_fn(ctx->cb_arg, status);
|
|
|
|
free(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
_nvmf_tgt_resume_polling(struct spdk_io_channel_iter *i)
|
|
|
|
{
|
|
|
|
struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
|
|
|
|
struct spdk_nvmf_poll_group *group = spdk_io_channel_get_ctx(ch);
|
|
|
|
|
|
|
|
assert(group->poller == NULL);
|
|
|
|
group->poller = SPDK_POLLER_REGISTER(nvmf_poll_group_poll, group, 0);
|
|
|
|
|
|
|
|
spdk_for_each_channel_continue(i, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
spdk_nvmf_tgt_resume_polling(struct spdk_nvmf_tgt *tgt, spdk_nvmf_tgt_resume_polling_cb_fn cb_fn,
|
|
|
|
void *cb_arg)
|
|
|
|
{
|
|
|
|
struct nvmf_tgt_pause_ctx *ctx;
|
|
|
|
|
2023-04-21 20:36:10 +00:00
|
|
|
SPDK_DTRACE_PROBE2_TICKS(nvmf_tgt_resume_polling, tgt, tgt->name);
|
2022-12-13 14:16:32 +00:00
|
|
|
|
|
|
|
switch (tgt->state) {
|
|
|
|
case NVMF_TGT_PAUSING:
|
|
|
|
case NVMF_TGT_RESUMING:
|
|
|
|
return -EBUSY;
|
|
|
|
case NVMF_TGT_PAUSED:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx = calloc(1, sizeof(*ctx));
|
|
|
|
if (!ctx) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
tgt->state = NVMF_TGT_RESUMING;
|
|
|
|
|
|
|
|
ctx->tgt = tgt;
|
|
|
|
ctx->cb_fn = cb_fn;
|
|
|
|
ctx->cb_arg = cb_arg;
|
|
|
|
|
|
|
|
spdk_for_each_channel(tgt,
|
|
|
|
_nvmf_tgt_resume_polling,
|
|
|
|
ctx,
|
|
|
|
_nvmf_tgt_resume_polling_done);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-08-18 23:05:13 +00:00
|
|
|
struct spdk_nvmf_subsystem *
|
|
|
|
spdk_nvmf_tgt_find_subsystem(struct spdk_nvmf_tgt *tgt, const char *subnqn)
|
|
|
|
{
|
|
|
|
struct spdk_nvmf_subsystem *subsystem;
|
2017-08-29 20:03:13 +00:00
|
|
|
uint32_t sid;
|
2017-08-18 23:05:13 +00:00
|
|
|
|
|
|
|
if (!subnqn) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-01-10 23:26:48 +00:00
|
|
|
/* Ensure that subnqn is null terminated */
|
|
|
|
if (!memchr(subnqn, '\0', SPDK_NVMF_NQN_MAX_LEN + 1)) {
|
|
|
|
SPDK_ERRLOG("Connect SUBNQN is not null terminated\n");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-10-19 20:19:09 +00:00
|
|
|
for (sid = 0; sid < tgt->max_subsystems; sid++) {
|
2017-08-29 20:03:13 +00:00
|
|
|
subsystem = tgt->subsystems[sid];
|
|
|
|
if (subsystem == NULL) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-08-18 23:05:13 +00:00
|
|
|
if (strcmp(subnqn, subsystem->subnqn) == 0) {
|
|
|
|
return subsystem;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-07-24 23:30:07 +00:00
|
|
|
struct spdk_nvmf_transport *
|
2020-01-07 17:36:40 +00:00
|
|
|
spdk_nvmf_tgt_get_transport(struct spdk_nvmf_tgt *tgt, const char *transport_name)
|
2017-07-24 23:30:07 +00:00
|
|
|
{
|
|
|
|
struct spdk_nvmf_transport *transport;
|
|
|
|
|
|
|
|
TAILQ_FOREACH(transport, &tgt->transports, link) {
|
2020-01-07 17:36:40 +00:00
|
|
|
if (!strncasecmp(transport->ops->name, transport_name, SPDK_NVMF_TRSTRING_MAX_LEN)) {
|
2017-07-24 23:30:07 +00:00
|
|
|
return transport;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-06-08 21:38:29 +00:00
|
|
|
struct nvmf_new_qpair_ctx {
|
|
|
|
struct spdk_nvmf_qpair *qpair;
|
|
|
|
struct spdk_nvmf_poll_group *group;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
_nvmf_poll_group_add(void *_ctx)
|
|
|
|
{
|
|
|
|
struct nvmf_new_qpair_ctx *ctx = _ctx;
|
|
|
|
struct spdk_nvmf_qpair *qpair = ctx->qpair;
|
|
|
|
struct spdk_nvmf_poll_group *group = ctx->group;
|
|
|
|
|
|
|
|
free(_ctx);
|
|
|
|
|
|
|
|
if (spdk_nvmf_poll_group_add(group, qpair) != 0) {
|
|
|
|
SPDK_ERRLOG("Unable to add the qpair to a poll group.\n");
|
|
|
|
spdk_nvmf_qpair_disconnect(qpair, NULL, NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-08 21:57:25 +00:00
|
|
|
void
|
|
|
|
spdk_nvmf_tgt_new_qpair(struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair)
|
2020-06-08 21:38:29 +00:00
|
|
|
{
|
|
|
|
struct spdk_nvmf_poll_group *group;
|
|
|
|
struct nvmf_new_qpair_ctx *ctx;
|
|
|
|
|
|
|
|
group = spdk_nvmf_get_optimal_poll_group(qpair);
|
|
|
|
if (group == NULL) {
|
|
|
|
if (tgt->next_poll_group == NULL) {
|
|
|
|
tgt->next_poll_group = TAILQ_FIRST(&tgt->poll_groups);
|
|
|
|
if (tgt->next_poll_group == NULL) {
|
|
|
|
SPDK_ERRLOG("No poll groups exist.\n");
|
|
|
|
spdk_nvmf_qpair_disconnect(qpair, NULL, NULL);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
group = tgt->next_poll_group;
|
|
|
|
tgt->next_poll_group = TAILQ_NEXT(group, link);
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx = calloc(1, sizeof(*ctx));
|
|
|
|
if (!ctx) {
|
|
|
|
SPDK_ERRLOG("Unable to send message to poll group.\n");
|
|
|
|
spdk_nvmf_qpair_disconnect(qpair, NULL, NULL);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx->qpair = qpair;
|
|
|
|
ctx->group = group;
|
|
|
|
|
2022-11-24 20:02:48 +00:00
|
|
|
pthread_mutex_lock(&group->mutex);
|
|
|
|
group->current_unassociated_qpairs++;
|
|
|
|
pthread_mutex_unlock(&group->mutex);
|
|
|
|
|
2020-06-08 21:38:29 +00:00
|
|
|
spdk_thread_send_msg(group->thread, _nvmf_poll_group_add, ctx);
|
|
|
|
}
|
|
|
|
|
2017-08-28 23:24:33 +00:00
|
|
|
struct spdk_nvmf_poll_group *
|
|
|
|
spdk_nvmf_poll_group_create(struct spdk_nvmf_tgt *tgt)
|
|
|
|
{
|
2017-11-17 17:01:39 +00:00
|
|
|
struct spdk_io_channel *ch;
|
2017-08-28 23:24:33 +00:00
|
|
|
|
2017-11-17 17:01:39 +00:00
|
|
|
ch = spdk_get_io_channel(tgt);
|
|
|
|
if (!ch) {
|
|
|
|
SPDK_ERRLOG("Unable to get I/O channel for target\n");
|
2017-08-28 23:24:33 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-11-17 17:01:39 +00:00
|
|
|
return spdk_io_channel_get_ctx(ch);
|
2017-08-28 23:24:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-02-13 10:03:16 +00:00
|
|
|
spdk_nvmf_poll_group_destroy(struct spdk_nvmf_poll_group *group,
|
|
|
|
spdk_nvmf_poll_group_destroy_done_fn cb_fn,
|
|
|
|
void *cb_arg)
|
2017-08-28 23:24:33 +00:00
|
|
|
{
|
2020-02-13 10:03:16 +00:00
|
|
|
assert(group->destroy_cb_fn == NULL);
|
|
|
|
group->destroy_cb_fn = cb_fn;
|
|
|
|
group->destroy_cb_arg = cb_arg;
|
|
|
|
|
2018-07-23 22:40:02 +00:00
|
|
|
/* This function will put the io_channel associated with this poll group */
|
2020-05-09 21:31:37 +00:00
|
|
|
nvmf_tgt_destroy_poll_group_qpairs(group);
|
2017-08-28 23:24:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
spdk_nvmf_poll_group_add(struct spdk_nvmf_poll_group *group,
|
|
|
|
struct spdk_nvmf_qpair *qpair)
|
|
|
|
{
|
|
|
|
int rc = -1;
|
|
|
|
struct spdk_nvmf_transport_poll_group *tgroup;
|
|
|
|
|
2018-05-11 00:18:40 +00:00
|
|
|
TAILQ_INIT(&qpair->outstanding);
|
2017-11-29 20:59:10 +00:00
|
|
|
qpair->group = group;
|
2020-10-01 00:20:38 +00:00
|
|
|
qpair->ctrlr = NULL;
|
|
|
|
qpair->disconnect_started = false;
|
2017-11-29 20:59:10 +00:00
|
|
|
|
2017-08-28 23:24:33 +00:00
|
|
|
TAILQ_FOREACH(tgroup, &group->tgroups, link) {
|
|
|
|
if (tgroup->transport == qpair->transport) {
|
2020-05-15 01:14:26 +00:00
|
|
|
rc = nvmf_transport_poll_group_add(tgroup, qpair);
|
2017-08-28 23:24:33 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-25 01:40:58 +00:00
|
|
|
/* We add the qpair to the group only it is successfully added into the tgroup */
|
2018-05-22 17:07:43 +00:00
|
|
|
if (rc == 0) {
|
2023-04-21 20:36:10 +00:00
|
|
|
SPDK_DTRACE_PROBE2_TICKS(nvmf_poll_group_add_qpair, qpair, spdk_thread_get_id(group->thread));
|
2018-12-17 18:40:31 +00:00
|
|
|
TAILQ_INSERT_TAIL(&group->qpairs, qpair, link);
|
2020-05-09 21:31:37 +00:00
|
|
|
nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_ACTIVE);
|
2018-05-22 17:07:43 +00:00
|
|
|
}
|
|
|
|
|
2017-08-28 23:24:33 +00:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2020-08-01 23:00:40 +00:00
|
|
|
static void
|
|
|
|
_nvmf_ctrlr_destruct(void *ctx)
|
2018-07-31 03:55:40 +00:00
|
|
|
{
|
|
|
|
struct spdk_nvmf_ctrlr *ctrlr = ctx;
|
|
|
|
|
2020-05-09 22:57:13 +00:00
|
|
|
nvmf_ctrlr_destruct(ctrlr);
|
2018-07-31 03:55:40 +00:00
|
|
|
}
|
|
|
|
|
2018-06-14 17:27:37 +00:00
|
|
|
static void
|
2020-05-09 21:31:37 +00:00
|
|
|
_nvmf_ctrlr_free_from_qpair(void *ctx)
|
2018-06-14 17:27:37 +00:00
|
|
|
{
|
2018-06-29 19:09:47 +00:00
|
|
|
struct nvmf_qpair_disconnect_ctx *qpair_ctx = ctx;
|
|
|
|
struct spdk_nvmf_ctrlr *ctrlr = qpair_ctx->ctrlr;
|
2018-07-31 03:55:40 +00:00
|
|
|
uint32_t count;
|
2018-06-14 17:27:37 +00:00
|
|
|
|
2018-07-31 03:55:40 +00:00
|
|
|
spdk_bit_array_clear(ctrlr->qpair_mask, qpair_ctx->qid);
|
|
|
|
count = spdk_bit_array_count_set(ctrlr->qpair_mask);
|
|
|
|
if (count == 0) {
|
2021-03-03 08:48:19 +00:00
|
|
|
assert(!ctrlr->in_destruct);
|
2021-10-26 07:45:34 +00:00
|
|
|
SPDK_DEBUGLOG(nvmf, "Last qpair %u, destroy ctrlr 0x%hx\n", qpair_ctx->qid, ctrlr->cntlid);
|
2020-08-07 01:22:38 +00:00
|
|
|
ctrlr->in_destruct = true;
|
2018-07-31 03:55:40 +00:00
|
|
|
spdk_thread_send_msg(ctrlr->subsys->thread, _nvmf_ctrlr_destruct, ctrlr);
|
|
|
|
}
|
2021-10-26 07:45:34 +00:00
|
|
|
free(qpair_ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
_nvmf_transport_qpair_fini_complete(void *cb_ctx)
|
|
|
|
{
|
|
|
|
struct nvmf_qpair_disconnect_ctx *qpair_ctx = cb_ctx;
|
|
|
|
struct spdk_nvmf_ctrlr *ctrlr;
|
|
|
|
/* Store cb args since cb_ctx can be freed in _nvmf_ctrlr_free_from_qpair */
|
|
|
|
nvmf_qpair_disconnect_cb cb_fn = qpair_ctx->cb_fn;
|
|
|
|
void *cb_arg = qpair_ctx->ctx;
|
|
|
|
struct spdk_thread *cb_thread = qpair_ctx->thread;
|
|
|
|
|
|
|
|
ctrlr = qpair_ctx->ctrlr;
|
|
|
|
SPDK_DEBUGLOG(nvmf, "Finish destroying qid %u\n", qpair_ctx->qid);
|
|
|
|
|
|
|
|
if (ctrlr) {
|
|
|
|
if (qpair_ctx->qid == 0) {
|
|
|
|
/* Admin qpair is removed, so set the pointer to NULL.
|
|
|
|
* This operation is safe since we are on ctrlr thread now, admin qpair's thread is the same
|
|
|
|
* as controller's thread */
|
|
|
|
assert(ctrlr->thread == spdk_get_thread());
|
|
|
|
ctrlr->admin_qpair = NULL;
|
|
|
|
}
|
|
|
|
/* Free qpair id from controller's bit mask and destroy the controller if it is the last qpair */
|
|
|
|
if (ctrlr->thread) {
|
|
|
|
spdk_thread_send_msg(ctrlr->thread, _nvmf_ctrlr_free_from_qpair, qpair_ctx);
|
|
|
|
} else {
|
|
|
|
_nvmf_ctrlr_free_from_qpair(qpair_ctx);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
free(qpair_ctx);
|
|
|
|
}
|
2018-06-29 19:09:47 +00:00
|
|
|
|
2021-10-26 07:45:34 +00:00
|
|
|
if (cb_fn) {
|
|
|
|
spdk_thread_send_msg(cb_thread, cb_fn, cb_arg);
|
|
|
|
}
|
2018-06-14 17:27:37 +00:00
|
|
|
}
|
|
|
|
|
2020-06-04 14:13:06 +00:00
|
|
|
void
|
|
|
|
spdk_nvmf_poll_group_remove(struct spdk_nvmf_qpair *qpair)
|
2018-06-14 17:27:37 +00:00
|
|
|
{
|
2018-11-21 02:11:25 +00:00
|
|
|
struct spdk_nvmf_transport_poll_group *tgroup;
|
|
|
|
int rc;
|
2018-08-02 22:27:32 +00:00
|
|
|
|
2023-04-21 20:36:10 +00:00
|
|
|
SPDK_DTRACE_PROBE2_TICKS(nvmf_poll_group_remove_qpair, qpair,
|
|
|
|
spdk_thread_get_id(qpair->group->thread));
|
2020-05-09 21:31:37 +00:00
|
|
|
nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_ERROR);
|
2018-06-14 17:27:37 +00:00
|
|
|
|
2018-11-21 02:11:25 +00:00
|
|
|
/* Find the tgroup and remove the qpair from the tgroup */
|
|
|
|
TAILQ_FOREACH(tgroup, &qpair->group->tgroups, link) {
|
|
|
|
if (tgroup->transport == qpair->transport) {
|
2020-05-15 01:14:26 +00:00
|
|
|
rc = nvmf_transport_poll_group_remove(tgroup, qpair);
|
2018-11-21 02:11:25 +00:00
|
|
|
if (rc && (rc != ENOTSUP)) {
|
|
|
|
SPDK_ERRLOG("Cannot remove qpair=%p from transport group=%p\n",
|
|
|
|
qpair, tgroup);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-24 17:16:23 +00:00
|
|
|
TAILQ_REMOVE(&qpair->group->qpairs, qpair, link);
|
2020-06-27 15:52:36 +00:00
|
|
|
qpair->group = NULL;
|
2020-06-04 14:13:06 +00:00
|
|
|
}
|
|
|
|
|
2023-03-17 07:02:24 +00:00
|
|
|
static void
|
|
|
|
_nvmf_qpair_sgroup_req_clean(struct spdk_nvmf_subsystem_poll_group *sgroup,
|
|
|
|
const struct spdk_nvmf_qpair *qpair)
|
|
|
|
{
|
|
|
|
struct spdk_nvmf_request *req, *tmp;
|
|
|
|
TAILQ_FOREACH_SAFE(req, &sgroup->queued, link, tmp) {
|
|
|
|
if (req->qpair == qpair) {
|
|
|
|
TAILQ_REMOVE(&sgroup->queued, req, link);
|
|
|
|
if (nvmf_transport_req_free(req)) {
|
|
|
|
SPDK_ERRLOG("Transport request free error!\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-04 14:13:06 +00:00
|
|
|
static void
|
|
|
|
_nvmf_qpair_destroy(void *ctx, int status)
|
|
|
|
{
|
|
|
|
struct nvmf_qpair_disconnect_ctx *qpair_ctx = ctx;
|
|
|
|
struct spdk_nvmf_qpair *qpair = qpair_ctx->qpair;
|
|
|
|
struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr;
|
2021-06-20 14:18:44 +00:00
|
|
|
struct spdk_nvmf_subsystem_poll_group *sgroup;
|
2023-03-17 07:02:24 +00:00
|
|
|
uint32_t sid;
|
2020-06-04 14:13:06 +00:00
|
|
|
|
|
|
|
assert(qpair->state == SPDK_NVMF_QPAIR_DEACTIVATING);
|
|
|
|
qpair_ctx->qid = qpair->qid;
|
|
|
|
|
2022-11-24 20:04:53 +00:00
|
|
|
if (qpair->connect_received) {
|
2021-05-20 01:44:00 +00:00
|
|
|
if (0 == qpair->qid) {
|
|
|
|
assert(qpair->group->stat.current_admin_qpairs > 0);
|
|
|
|
qpair->group->stat.current_admin_qpairs--;
|
|
|
|
} else {
|
|
|
|
assert(qpair->group->stat.current_io_qpairs > 0);
|
|
|
|
qpair->group->stat.current_io_qpairs--;
|
|
|
|
}
|
2022-11-24 20:02:48 +00:00
|
|
|
} else {
|
|
|
|
pthread_mutex_lock(&qpair->group->mutex);
|
|
|
|
qpair->group->current_unassociated_qpairs--;
|
|
|
|
pthread_mutex_unlock(&qpair->group->mutex);
|
2022-11-24 20:04:53 +00:00
|
|
|
}
|
2021-06-20 14:18:44 +00:00
|
|
|
|
2022-11-24 20:04:53 +00:00
|
|
|
if (ctrlr) {
|
2021-06-20 14:18:44 +00:00
|
|
|
sgroup = &qpair->group->sgroups[ctrlr->subsys->id];
|
2023-03-17 07:02:24 +00:00
|
|
|
_nvmf_qpair_sgroup_req_clean(sgroup, qpair);
|
|
|
|
} else {
|
|
|
|
for (sid = 0; sid < qpair->group->num_sgroups; sid++) {
|
|
|
|
sgroup = &qpair->group->sgroups[sid];
|
|
|
|
assert(sgroup != NULL);
|
|
|
|
_nvmf_qpair_sgroup_req_clean(sgroup, qpair);
|
2021-06-20 14:18:44 +00:00
|
|
|
}
|
2021-05-20 01:44:00 +00:00
|
|
|
}
|
|
|
|
|
2018-07-31 03:55:40 +00:00
|
|
|
qpair_ctx->ctrlr = ctrlr;
|
2021-10-26 07:45:34 +00:00
|
|
|
spdk_nvmf_poll_group_remove(qpair);
|
|
|
|
nvmf_transport_qpair_fini(qpair, _nvmf_transport_qpair_fini_complete, qpair_ctx);
|
2018-06-14 17:27:37 +00:00
|
|
|
}
|
|
|
|
|
2020-08-05 19:13:00 +00:00
|
|
|
static void
|
|
|
|
_nvmf_qpair_disconnect_msg(void *ctx)
|
|
|
|
{
|
|
|
|
struct nvmf_qpair_disconnect_ctx *qpair_ctx = ctx;
|
|
|
|
|
|
|
|
spdk_nvmf_qpair_disconnect(qpair_ctx->qpair, qpair_ctx->cb_fn, qpair_ctx->ctx);
|
|
|
|
free(ctx);
|
|
|
|
}
|
|
|
|
|
2023-03-13 08:36:05 +00:00
|
|
|
SPDK_LOG_DEPRECATION_REGISTER(spdk_nvmf_qpair_disconnect, "cb_fn and ctx are deprecated", "v23.09",
|
|
|
|
0);
|
|
|
|
|
2018-06-29 19:09:47 +00:00
|
|
|
int
|
|
|
|
spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx)
|
2018-06-14 17:27:37 +00:00
|
|
|
{
|
2021-03-31 07:37:25 +00:00
|
|
|
struct spdk_nvmf_poll_group *group = qpair->group;
|
2018-09-04 18:37:31 +00:00
|
|
|
struct nvmf_qpair_disconnect_ctx *qpair_ctx;
|
2018-06-29 19:09:47 +00:00
|
|
|
|
2020-08-05 19:13:00 +00:00
|
|
|
if (__atomic_test_and_set(&qpair->disconnect_started, __ATOMIC_RELAXED)) {
|
2023-03-13 08:28:25 +00:00
|
|
|
return -EINPROGRESS;
|
2020-08-05 19:13:00 +00:00
|
|
|
}
|
|
|
|
|
2023-03-13 08:36:05 +00:00
|
|
|
if (cb_fn || ctx) {
|
|
|
|
SPDK_LOG_DEPRECATED(spdk_nvmf_qpair_disconnect);
|
|
|
|
}
|
|
|
|
|
2018-07-27 19:21:00 +00:00
|
|
|
/* If we get a qpair in the uninitialized state, we can just destroy it immediately */
|
|
|
|
if (qpair->state == SPDK_NVMF_QPAIR_UNINITIALIZED) {
|
2020-11-13 07:23:42 +00:00
|
|
|
nvmf_transport_qpair_fini(qpair, NULL, NULL);
|
2018-07-27 19:21:00 +00:00
|
|
|
if (cb_fn) {
|
|
|
|
cb_fn(ctx);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-03-31 07:37:25 +00:00
|
|
|
assert(group != NULL);
|
|
|
|
if (spdk_get_thread() != group->thread) {
|
2020-08-05 19:13:00 +00:00
|
|
|
/* clear the atomic so we can set it on the next call on the proper thread. */
|
|
|
|
__atomic_clear(&qpair->disconnect_started, __ATOMIC_RELAXED);
|
|
|
|
qpair_ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_ctx));
|
|
|
|
if (!qpair_ctx) {
|
|
|
|
SPDK_ERRLOG("Unable to allocate context for nvmf_qpair_disconnect\n");
|
|
|
|
return -ENOMEM;
|
2018-09-04 18:46:31 +00:00
|
|
|
}
|
2020-08-05 19:13:00 +00:00
|
|
|
qpair_ctx->qpair = qpair;
|
|
|
|
qpair_ctx->cb_fn = cb_fn;
|
2021-03-31 07:37:25 +00:00
|
|
|
qpair_ctx->thread = group->thread;
|
2020-08-05 19:13:00 +00:00
|
|
|
qpair_ctx->ctx = ctx;
|
2021-03-31 07:37:25 +00:00
|
|
|
spdk_thread_send_msg(group->thread, _nvmf_qpair_disconnect_msg, qpair_ctx);
|
2018-09-04 18:46:31 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2018-09-04 18:37:31 +00:00
|
|
|
|
2023-04-21 20:36:10 +00:00
|
|
|
SPDK_DTRACE_PROBE2_TICKS(nvmf_qpair_disconnect, qpair, spdk_thread_get_id(group->thread));
|
2018-09-04 18:46:31 +00:00
|
|
|
assert(qpair->state == SPDK_NVMF_QPAIR_ACTIVE);
|
2020-05-09 21:31:37 +00:00
|
|
|
nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_DEACTIVATING);
|
2018-09-04 18:46:31 +00:00
|
|
|
|
|
|
|
qpair_ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_ctx));
|
2018-09-04 18:37:31 +00:00
|
|
|
if (!qpair_ctx) {
|
|
|
|
SPDK_ERRLOG("Unable to allocate context for nvmf_qpair_disconnect\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2018-06-29 19:09:47 +00:00
|
|
|
qpair_ctx->qpair = qpair;
|
|
|
|
qpair_ctx->cb_fn = cb_fn;
|
2021-03-31 07:37:25 +00:00
|
|
|
qpair_ctx->thread = group->thread;
|
2018-06-29 19:09:47 +00:00
|
|
|
qpair_ctx->ctx = ctx;
|
2018-09-04 18:46:31 +00:00
|
|
|
|
|
|
|
/* Check for outstanding I/O */
|
|
|
|
if (!TAILQ_EMPTY(&qpair->outstanding)) {
|
2023-04-21 20:36:10 +00:00
|
|
|
SPDK_DTRACE_PROBE2_TICKS(nvmf_poll_group_drain_qpair, qpair, spdk_thread_get_id(group->thread));
|
2020-05-09 21:31:37 +00:00
|
|
|
qpair->state_cb = _nvmf_qpair_destroy;
|
2018-09-04 18:46:31 +00:00
|
|
|
qpair->state_cb_arg = qpair_ctx;
|
2021-12-17 14:04:21 +00:00
|
|
|
nvmf_qpair_abort_pending_zcopy_reqs(qpair);
|
2020-05-09 22:57:13 +00:00
|
|
|
nvmf_qpair_free_aer(qpair);
|
2018-09-04 18:46:31 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-05-09 21:31:37 +00:00
|
|
|
_nvmf_qpair_destroy(qpair_ctx, 0);
|
2018-09-04 18:46:31 +00:00
|
|
|
|
2018-06-29 19:09:47 +00:00
|
|
|
return 0;
|
2018-06-14 17:27:37 +00:00
|
|
|
}
|
|
|
|
|
2018-08-02 22:08:12 +00:00
|
|
|
int
|
|
|
|
spdk_nvmf_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair,
|
|
|
|
struct spdk_nvme_transport_id *trid)
|
|
|
|
{
|
nvmf: initialize trid param in get_***_trid paths
When removing a listener, for example with
nvmf_subsystem_remove_listener RPC, we use the concept of a
"listen trid" to determine which existing connections
should be disconnected.
This listen trid has the trtype, adrfam, traddr and trsvcid
defined, but *not* the subnqn. We use the subsystem pointer
itself to match the subsystem.
nvmf_stop_listen_disconnect_qpairs gets the listen trid
for each qpair, compares it to the trid passed by the
RPC, and if it matches, then it compares the subsystem
pointers and will disconnect the qpair if it matches.
The problem is that the spdk_nvmf_qpair_get_listen_trid
path does not initialize the subnqn to an empty string,
and in this case the caller does not initialize it either.
So sometimes the subnqn on the stack used to get the
qpair's listen trid ends up with some garbage as the subnqn
string, which causes the transport_id_compare to fail, and
then the qpair won't get disconnected even if the other
trid fields and subsystem pointers match.
For the failover.sh test, this means that the qpair doesn't
get disconnected, so we never go down the reset path
on the initiator side and don't see the "Resetting" strings
expected in the log.
This similarly impacts the host/timeout.sh test, which is
also fixed by this patch. There were multiple failing
signatures, all related to remove_listener not working
correctly due to this bug.
While the get_listen_trid path is the one that caused
these bugs, the get_local_trid and get_peer_trid paths
have similar problems, so they are similarly fixed in
this patch.
Fixes issue #2862.
Fixes issue #2595.
Fixes issue #2865.
Fixes issue #2864.
Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: I36eb519cd1f434d50eebf724ecd6dbc2528288c3
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/17788
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
Reviewed-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
Reviewed-by: Mike Gerdts <mgerdts@nvidia.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Konrad Sztyber <konrad.sztyber@intel.com>
Community-CI: Mellanox Build Bot
Reviewed-by: <sebastian.brzezinka@intel.com>
2023-04-26 13:46:29 +00:00
|
|
|
memset(trid, 0, sizeof(*trid));
|
2020-05-15 01:14:26 +00:00
|
|
|
return nvmf_transport_qpair_get_peer_trid(qpair, trid);
|
2018-08-02 22:08:12 +00:00
|
|
|
}
|
|
|
|
|
2018-09-10 21:28:04 +00:00
|
|
|
int
|
|
|
|
spdk_nvmf_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair,
|
|
|
|
struct spdk_nvme_transport_id *trid)
|
|
|
|
{
|
nvmf: initialize trid param in get_***_trid paths
When removing a listener, for example with
nvmf_subsystem_remove_listener RPC, we use the concept of a
"listen trid" to determine which existing connections
should be disconnected.
This listen trid has the trtype, adrfam, traddr and trsvcid
defined, but *not* the subnqn. We use the subsystem pointer
itself to match the subsystem.
nvmf_stop_listen_disconnect_qpairs gets the listen trid
for each qpair, compares it to the trid passed by the
RPC, and if it matches, then it compares the subsystem
pointers and will disconnect the qpair if it matches.
The problem is that the spdk_nvmf_qpair_get_listen_trid
path does not initialize the subnqn to an empty string,
and in this case the caller does not initialize it either.
So sometimes the subnqn on the stack used to get the
qpair's listen trid ends up with some garbage as the subnqn
string, which causes the transport_id_compare to fail, and
then the qpair won't get disconnected even if the other
trid fields and subsystem pointers match.
For the failover.sh test, this means that the qpair doesn't
get disconnected, so we never go down the reset path
on the initiator side and don't see the "Resetting" strings
expected in the log.
This similarly impacts the host/timeout.sh test, which is
also fixed by this patch. There were multiple failing
signatures, all related to remove_listener not working
correctly due to this bug.
While the get_listen_trid path is the one that caused
these bugs, the get_local_trid and get_peer_trid paths
have similar problems, so they are similarly fixed in
this patch.
Fixes issue #2862.
Fixes issue #2595.
Fixes issue #2865.
Fixes issue #2864.
Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: I36eb519cd1f434d50eebf724ecd6dbc2528288c3
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/17788
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
Reviewed-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
Reviewed-by: Mike Gerdts <mgerdts@nvidia.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Konrad Sztyber <konrad.sztyber@intel.com>
Community-CI: Mellanox Build Bot
Reviewed-by: <sebastian.brzezinka@intel.com>
2023-04-26 13:46:29 +00:00
|
|
|
memset(trid, 0, sizeof(*trid));
|
2020-05-15 01:14:26 +00:00
|
|
|
return nvmf_transport_qpair_get_local_trid(qpair, trid);
|
2018-09-10 21:28:04 +00:00
|
|
|
}
|
|
|
|
|
2018-09-07 20:41:41 +00:00
|
|
|
int
|
|
|
|
spdk_nvmf_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair,
|
|
|
|
struct spdk_nvme_transport_id *trid)
|
|
|
|
{
|
nvmf: initialize trid param in get_***_trid paths
When removing a listener, for example with
nvmf_subsystem_remove_listener RPC, we use the concept of a
"listen trid" to determine which existing connections
should be disconnected.
This listen trid has the trtype, adrfam, traddr and trsvcid
defined, but *not* the subnqn. We use the subsystem pointer
itself to match the subsystem.
nvmf_stop_listen_disconnect_qpairs gets the listen trid
for each qpair, compares it to the trid passed by the
RPC, and if it matches, then it compares the subsystem
pointers and will disconnect the qpair if it matches.
The problem is that the spdk_nvmf_qpair_get_listen_trid
path does not initialize the subnqn to an empty string,
and in this case the caller does not initialize it either.
So sometimes the subnqn on the stack used to get the
qpair's listen trid ends up with some garbage as the subnqn
string, which causes the transport_id_compare to fail, and
then the qpair won't get disconnected even if the other
trid fields and subsystem pointers match.
For the failover.sh test, this means that the qpair doesn't
get disconnected, so we never go down the reset path
on the initiator side and don't see the "Resetting" strings
expected in the log.
This similarly impacts the host/timeout.sh test, which is
also fixed by this patch. There were multiple failing
signatures, all related to remove_listener not working
correctly due to this bug.
While the get_listen_trid path is the one that caused
these bugs, the get_local_trid and get_peer_trid paths
have similar problems, so they are similarly fixed in
this patch.
Fixes issue #2862.
Fixes issue #2595.
Fixes issue #2865.
Fixes issue #2864.
Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: I36eb519cd1f434d50eebf724ecd6dbc2528288c3
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/17788
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
Reviewed-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
Reviewed-by: Mike Gerdts <mgerdts@nvidia.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Konrad Sztyber <konrad.sztyber@intel.com>
Community-CI: Mellanox Build Bot
Reviewed-by: <sebastian.brzezinka@intel.com>
2023-04-26 13:46:29 +00:00
|
|
|
memset(trid, 0, sizeof(*trid));
|
2020-05-15 01:14:26 +00:00
|
|
|
return nvmf_transport_qpair_get_listen_trid(qpair, trid);
|
2018-09-07 20:41:41 +00:00
|
|
|
}
|
|
|
|
|
2017-12-19 23:39:04 +00:00
|
|
|
static int
|
|
|
|
poll_group_update_subsystem(struct spdk_nvmf_poll_group *group,
|
|
|
|
struct spdk_nvmf_subsystem *subsystem)
|
2017-11-20 16:50:10 +00:00
|
|
|
{
|
|
|
|
struct spdk_nvmf_subsystem_poll_group *sgroup;
|
2019-03-01 04:21:37 +00:00
|
|
|
uint32_t new_num_ns, old_num_ns;
|
2019-03-15 02:40:19 +00:00
|
|
|
uint32_t i, j;
|
2017-12-19 23:39:04 +00:00
|
|
|
struct spdk_nvmf_ns *ns;
|
2019-03-15 02:40:19 +00:00
|
|
|
struct spdk_nvmf_registrant *reg, *tmp;
|
2019-06-28 08:50:02 +00:00
|
|
|
struct spdk_io_channel *ch;
|
2019-06-28 09:35:05 +00:00
|
|
|
struct spdk_nvmf_subsystem_pg_ns_info *ns_info;
|
2019-08-08 21:15:17 +00:00
|
|
|
struct spdk_nvmf_ctrlr *ctrlr;
|
|
|
|
bool ns_changed;
|
2017-11-20 16:50:10 +00:00
|
|
|
|
2018-04-05 20:51:51 +00:00
|
|
|
/* Make sure our poll group has memory for this subsystem allocated */
|
2017-11-20 16:50:10 +00:00
|
|
|
if (subsystem->id >= group->num_sgroups) {
|
2018-05-08 23:05:28 +00:00
|
|
|
return -ENOMEM;
|
2017-11-20 16:50:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
sgroup = &group->sgroups[subsystem->id];
|
|
|
|
|
2019-03-01 04:21:37 +00:00
|
|
|
/* Make sure the array of namespace information is the correct size */
|
|
|
|
new_num_ns = subsystem->max_nsid;
|
|
|
|
old_num_ns = sgroup->num_ns;
|
2017-12-19 23:39:04 +00:00
|
|
|
|
2019-08-08 21:15:17 +00:00
|
|
|
ns_changed = false;
|
|
|
|
|
2019-03-01 04:21:37 +00:00
|
|
|
if (old_num_ns == 0) {
|
|
|
|
if (new_num_ns > 0) {
|
2018-04-05 20:51:51 +00:00
|
|
|
/* First allocation */
|
2019-03-01 04:21:37 +00:00
|
|
|
sgroup->ns_info = calloc(new_num_ns, sizeof(struct spdk_nvmf_subsystem_pg_ns_info));
|
|
|
|
if (!sgroup->ns_info) {
|
2018-04-05 20:51:51 +00:00
|
|
|
return -ENOMEM;
|
2018-03-21 06:16:23 +00:00
|
|
|
}
|
|
|
|
}
|
2019-03-01 04:21:37 +00:00
|
|
|
} else if (new_num_ns > old_num_ns) {
|
2018-04-05 20:51:51 +00:00
|
|
|
void *buf;
|
|
|
|
|
|
|
|
/* Make the array larger */
|
2019-03-01 04:21:37 +00:00
|
|
|
buf = realloc(sgroup->ns_info, new_num_ns * sizeof(struct spdk_nvmf_subsystem_pg_ns_info));
|
2018-04-05 20:51:51 +00:00
|
|
|
if (!buf) {
|
2017-12-19 23:39:04 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2019-03-01 04:21:37 +00:00
|
|
|
sgroup->ns_info = buf;
|
2018-04-05 20:51:51 +00:00
|
|
|
|
2019-03-01 04:21:37 +00:00
|
|
|
/* Null out the new namespace information slots */
|
|
|
|
for (i = old_num_ns; i < new_num_ns; i++) {
|
|
|
|
memset(&sgroup->ns_info[i], 0, sizeof(struct spdk_nvmf_subsystem_pg_ns_info));
|
2017-12-19 23:39:04 +00:00
|
|
|
}
|
2019-03-01 04:21:37 +00:00
|
|
|
} else if (new_num_ns < old_num_ns) {
|
2018-04-05 20:51:51 +00:00
|
|
|
void *buf;
|
|
|
|
|
2017-12-19 23:39:04 +00:00
|
|
|
/* Free the extra I/O channels */
|
2019-03-01 04:21:37 +00:00
|
|
|
for (i = new_num_ns; i < old_num_ns; i++) {
|
2019-06-28 09:35:05 +00:00
|
|
|
ns_info = &sgroup->ns_info[i];
|
|
|
|
|
|
|
|
if (ns_info->channel) {
|
|
|
|
spdk_put_io_channel(ns_info->channel);
|
|
|
|
ns_info->channel = NULL;
|
2017-12-19 23:39:04 +00:00
|
|
|
}
|
|
|
|
}
|
2017-11-20 16:50:10 +00:00
|
|
|
|
2018-04-05 20:51:51 +00:00
|
|
|
/* Make the array smaller */
|
2019-03-01 04:21:37 +00:00
|
|
|
if (new_num_ns > 0) {
|
|
|
|
buf = realloc(sgroup->ns_info, new_num_ns * sizeof(struct spdk_nvmf_subsystem_pg_ns_info));
|
2018-04-05 20:51:51 +00:00
|
|
|
if (!buf) {
|
|
|
|
return -ENOMEM;
|
2017-11-20 16:50:10 +00:00
|
|
|
}
|
2019-03-01 04:21:37 +00:00
|
|
|
sgroup->ns_info = buf;
|
2018-04-05 20:51:51 +00:00
|
|
|
} else {
|
2019-03-01 04:21:37 +00:00
|
|
|
free(sgroup->ns_info);
|
|
|
|
sgroup->ns_info = NULL;
|
2017-11-20 16:50:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-01 04:21:37 +00:00
|
|
|
sgroup->num_ns = new_num_ns;
|
2018-04-05 20:51:51 +00:00
|
|
|
|
|
|
|
/* Detect bdevs that were added or removed */
|
2019-03-01 04:21:37 +00:00
|
|
|
for (i = 0; i < sgroup->num_ns; i++) {
|
2018-04-05 20:51:51 +00:00
|
|
|
ns = subsystem->ns[i];
|
2019-06-28 09:35:05 +00:00
|
|
|
ns_info = &sgroup->ns_info[i];
|
|
|
|
ch = ns_info->channel;
|
2019-06-28 08:50:02 +00:00
|
|
|
|
|
|
|
if (ns == NULL && ch == NULL) {
|
2018-04-05 20:51:51 +00:00
|
|
|
/* Both NULL. Leave empty */
|
2019-06-28 08:50:02 +00:00
|
|
|
} else if (ns == NULL && ch != NULL) {
|
2018-04-05 20:51:51 +00:00
|
|
|
/* There was a channel here, but the namespace is gone. */
|
2019-08-08 21:15:17 +00:00
|
|
|
ns_changed = true;
|
2019-06-28 08:50:02 +00:00
|
|
|
spdk_put_io_channel(ch);
|
2019-06-28 09:35:05 +00:00
|
|
|
ns_info->channel = NULL;
|
2019-06-28 08:50:02 +00:00
|
|
|
} else if (ns != NULL && ch == NULL) {
|
2018-04-05 20:51:51 +00:00
|
|
|
/* A namespace appeared but there is no channel yet */
|
2019-08-08 21:15:17 +00:00
|
|
|
ns_changed = true;
|
2019-06-28 09:35:05 +00:00
|
|
|
ch = spdk_bdev_get_io_channel(ns->desc);
|
|
|
|
if (ch == NULL) {
|
2018-06-25 03:02:16 +00:00
|
|
|
SPDK_ERRLOG("Could not allocate I/O channel.\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2019-06-28 09:35:05 +00:00
|
|
|
ns_info->channel = ch;
|
2019-06-27 11:24:12 +00:00
|
|
|
} else if (spdk_uuid_compare(&ns_info->uuid, spdk_bdev_get_uuid(ns->bdev)) != 0) {
|
|
|
|
/* A namespace was here before, but was replaced by a new one. */
|
2019-08-08 21:15:17 +00:00
|
|
|
ns_changed = true;
|
2019-06-27 11:24:12 +00:00
|
|
|
spdk_put_io_channel(ns_info->channel);
|
|
|
|
memset(ns_info, 0, sizeof(*ns_info));
|
|
|
|
|
|
|
|
ch = spdk_bdev_get_io_channel(ns->desc);
|
|
|
|
if (ch == NULL) {
|
|
|
|
SPDK_ERRLOG("Could not allocate I/O channel.\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
ns_info->channel = ch;
|
2019-09-11 14:47:39 +00:00
|
|
|
} else if (ns_info->num_blocks != spdk_bdev_get_num_blocks(ns->bdev)) {
|
|
|
|
/* Namespace is still there but size has changed */
|
2020-11-17 16:49:41 +00:00
|
|
|
SPDK_DEBUGLOG(nvmf, "Namespace resized: subsystem_id %u,"
|
|
|
|
" nsid %u, pg %p, old %" PRIu64 ", new %" PRIu64 "\n",
|
2019-09-11 14:47:39 +00:00
|
|
|
subsystem->id,
|
|
|
|
ns->nsid,
|
|
|
|
group,
|
|
|
|
ns_info->num_blocks,
|
|
|
|
spdk_bdev_get_num_blocks(ns->bdev));
|
|
|
|
ns_changed = true;
|
2018-04-05 20:51:51 +00:00
|
|
|
}
|
2019-03-01 05:12:55 +00:00
|
|
|
|
|
|
|
if (ns == NULL) {
|
2019-06-28 09:35:05 +00:00
|
|
|
memset(ns_info, 0, sizeof(*ns_info));
|
2019-04-10 06:48:53 +00:00
|
|
|
} else {
|
2019-06-27 11:24:12 +00:00
|
|
|
ns_info->uuid = *spdk_bdev_get_uuid(ns->bdev);
|
2019-09-11 14:47:39 +00:00
|
|
|
ns_info->num_blocks = spdk_bdev_get_num_blocks(ns->bdev);
|
2019-06-28 09:35:05 +00:00
|
|
|
ns_info->crkey = ns->crkey;
|
|
|
|
ns_info->rtype = ns->rtype;
|
2019-04-10 06:48:53 +00:00
|
|
|
if (ns->holder) {
|
2019-06-28 09:35:05 +00:00
|
|
|
ns_info->holder_id = ns->holder->hostid;
|
2019-04-10 06:48:53 +00:00
|
|
|
}
|
2019-03-15 02:40:19 +00:00
|
|
|
|
2019-06-28 09:35:05 +00:00
|
|
|
memset(&ns_info->reg_hostid, 0, SPDK_NVMF_MAX_NUM_REGISTRANTS * sizeof(struct spdk_uuid));
|
2019-03-15 02:40:19 +00:00
|
|
|
j = 0;
|
|
|
|
TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) {
|
|
|
|
if (j >= SPDK_NVMF_MAX_NUM_REGISTRANTS) {
|
|
|
|
SPDK_ERRLOG("Maximum %u registrants can support.\n", SPDK_NVMF_MAX_NUM_REGISTRANTS);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2019-06-28 09:35:05 +00:00
|
|
|
ns_info->reg_hostid[j++] = reg->hostid;
|
2019-03-15 02:40:19 +00:00
|
|
|
}
|
2019-03-01 05:12:55 +00:00
|
|
|
}
|
2018-04-05 20:51:51 +00:00
|
|
|
}
|
2017-12-19 23:39:04 +00:00
|
|
|
|
2019-08-08 21:15:17 +00:00
|
|
|
if (ns_changed) {
|
|
|
|
TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) {
|
2021-06-30 20:24:45 +00:00
|
|
|
/* It is possible that a ctrlr was added but the admin_qpair hasn't been
|
|
|
|
* assigned yet.
|
|
|
|
*/
|
|
|
|
if (!ctrlr->admin_qpair) {
|
|
|
|
continue;
|
|
|
|
}
|
2019-08-08 21:15:17 +00:00
|
|
|
if (ctrlr->admin_qpair->group == group) {
|
2020-05-09 22:57:13 +00:00
|
|
|
nvmf_ctrlr_async_event_ns_notice(ctrlr);
|
2020-11-13 08:13:30 +00:00
|
|
|
nvmf_ctrlr_async_event_ana_change_notice(ctrlr);
|
2019-08-08 21:15:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-20 16:50:10 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-04-18 16:31:40 +00:00
|
|
|
int
|
2020-05-09 22:57:13 +00:00
|
|
|
nvmf_poll_group_update_subsystem(struct spdk_nvmf_poll_group *group,
|
|
|
|
struct spdk_nvmf_subsystem *subsystem)
|
2018-04-18 16:31:40 +00:00
|
|
|
{
|
|
|
|
return poll_group_update_subsystem(group, subsystem);
|
|
|
|
}
|
|
|
|
|
2018-08-28 22:41:16 +00:00
|
|
|
int
|
2020-05-09 22:57:13 +00:00
|
|
|
nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group,
|
|
|
|
struct spdk_nvmf_subsystem *subsystem,
|
|
|
|
spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
|
2017-12-14 02:38:18 +00:00
|
|
|
{
|
2018-07-13 23:23:23 +00:00
|
|
|
int rc = 0;
|
2018-08-20 22:48:27 +00:00
|
|
|
struct spdk_nvmf_subsystem_poll_group *sgroup = &group->sgroups[subsystem->id];
|
2020-11-02 17:12:11 +00:00
|
|
|
uint32_t i;
|
2018-08-20 22:48:27 +00:00
|
|
|
|
|
|
|
TAILQ_INIT(&sgroup->queued);
|
2017-12-14 02:38:18 +00:00
|
|
|
|
2017-12-19 23:39:04 +00:00
|
|
|
rc = poll_group_update_subsystem(group, subsystem);
|
|
|
|
if (rc) {
|
2020-05-09 22:57:13 +00:00
|
|
|
nvmf_poll_group_remove_subsystem(group, subsystem, NULL, NULL);
|
2018-07-13 23:23:23 +00:00
|
|
|
goto fini;
|
2017-12-14 02:38:18 +00:00
|
|
|
}
|
|
|
|
|
2017-12-19 23:39:04 +00:00
|
|
|
sgroup->state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
|
2020-11-02 17:12:11 +00:00
|
|
|
|
|
|
|
for (i = 0; i < sgroup->num_ns; i++) {
|
|
|
|
sgroup->ns_info[i].state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
|
|
|
|
}
|
|
|
|
|
2018-07-13 23:23:23 +00:00
|
|
|
fini:
|
|
|
|
if (cb_fn) {
|
|
|
|
cb_fn(cb_arg, rc);
|
|
|
|
}
|
2018-08-28 22:41:16 +00:00
|
|
|
|
2023-04-21 20:36:10 +00:00
|
|
|
SPDK_DTRACE_PROBE2_TICKS(nvmf_poll_group_add_subsystem, spdk_thread_get_id(group->thread),
|
|
|
|
subsystem->subnqn);
|
2021-11-15 11:25:38 +00:00
|
|
|
|
2018-08-28 22:41:16 +00:00
|
|
|
return rc;
|
2017-12-14 02:38:18 +00:00
|
|
|
}
|
|
|
|
|
2018-06-29 20:15:15 +00:00
|
|
|
static void
|
|
|
|
_nvmf_poll_group_remove_subsystem_cb(void *ctx, int status)
|
2017-11-20 16:50:10 +00:00
|
|
|
{
|
2018-07-13 23:23:23 +00:00
|
|
|
struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx;
|
|
|
|
struct spdk_nvmf_subsystem *subsystem;
|
|
|
|
struct spdk_nvmf_poll_group *group;
|
2017-11-20 16:50:10 +00:00
|
|
|
struct spdk_nvmf_subsystem_poll_group *sgroup;
|
2018-07-13 23:23:23 +00:00
|
|
|
spdk_nvmf_poll_group_mod_done cpl_fn = NULL;
|
|
|
|
void *cpl_ctx = NULL;
|
2017-11-20 16:50:10 +00:00
|
|
|
uint32_t nsid;
|
|
|
|
|
2018-07-13 23:23:23 +00:00
|
|
|
group = qpair_ctx->group;
|
|
|
|
subsystem = qpair_ctx->subsystem;
|
|
|
|
cpl_fn = qpair_ctx->cpl_fn;
|
|
|
|
cpl_ctx = qpair_ctx->cpl_ctx;
|
|
|
|
sgroup = &group->sgroups[subsystem->id];
|
|
|
|
|
2018-06-29 20:15:15 +00:00
|
|
|
if (status) {
|
2018-07-13 23:23:23 +00:00
|
|
|
goto fini;
|
2018-06-01 22:10:12 +00:00
|
|
|
}
|
|
|
|
|
2019-03-01 04:21:37 +00:00
|
|
|
for (nsid = 0; nsid < sgroup->num_ns; nsid++) {
|
|
|
|
if (sgroup->ns_info[nsid].channel) {
|
|
|
|
spdk_put_io_channel(sgroup->ns_info[nsid].channel);
|
|
|
|
sgroup->ns_info[nsid].channel = NULL;
|
2017-11-20 16:50:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-01 04:21:37 +00:00
|
|
|
sgroup->num_ns = 0;
|
|
|
|
free(sgroup->ns_info);
|
|
|
|
sgroup->ns_info = NULL;
|
2018-07-13 23:23:23 +00:00
|
|
|
fini:
|
|
|
|
free(qpair_ctx);
|
|
|
|
if (cpl_fn) {
|
|
|
|
cpl_fn(cpl_ctx, status);
|
|
|
|
}
|
2018-06-29 20:15:15 +00:00
|
|
|
}
|
|
|
|
|
2021-05-13 15:31:54 +00:00
|
|
|
static void nvmf_poll_group_remove_subsystem_msg(void *ctx);
|
|
|
|
|
2018-06-29 20:15:15 +00:00
|
|
|
static void
|
2021-02-26 15:27:23 +00:00
|
|
|
nvmf_poll_group_remove_subsystem_msg(void *ctx)
|
2018-06-29 20:15:15 +00:00
|
|
|
{
|
2021-02-26 15:27:23 +00:00
|
|
|
struct spdk_nvmf_qpair *qpair, *qpair_tmp;
|
2018-06-29 20:15:15 +00:00
|
|
|
struct spdk_nvmf_subsystem *subsystem;
|
|
|
|
struct spdk_nvmf_poll_group *group;
|
2021-02-26 15:27:23 +00:00
|
|
|
struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx;
|
2021-05-13 15:31:54 +00:00
|
|
|
bool qpairs_found = false;
|
2018-06-29 20:15:15 +00:00
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
group = qpair_ctx->group;
|
|
|
|
subsystem = qpair_ctx->subsystem;
|
|
|
|
|
2021-02-26 15:27:23 +00:00
|
|
|
TAILQ_FOREACH_SAFE(qpair, &group->qpairs, link, qpair_tmp) {
|
2018-12-12 17:39:44 +00:00
|
|
|
if ((qpair->ctrlr != NULL) && (qpair->ctrlr->subsys == subsystem)) {
|
2021-05-13 15:31:54 +00:00
|
|
|
qpairs_found = true;
|
2023-03-13 08:10:32 +00:00
|
|
|
rc = spdk_nvmf_qpair_disconnect(qpair, NULL, NULL);
|
2023-03-13 08:28:25 +00:00
|
|
|
if (rc && rc != -EINPROGRESS) {
|
2021-02-26 15:27:23 +00:00
|
|
|
break;
|
|
|
|
}
|
2018-06-29 20:15:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-13 15:31:54 +00:00
|
|
|
if (!qpairs_found) {
|
|
|
|
_nvmf_poll_group_remove_subsystem_cb(ctx, 0);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-03-13 08:10:32 +00:00
|
|
|
/* Some qpairs are in process of being disconnected. Send a message and try to remove them again */
|
|
|
|
spdk_thread_send_msg(spdk_get_thread(), nvmf_poll_group_remove_subsystem_msg, ctx);
|
2018-06-29 20:15:15 +00:00
|
|
|
}
|
|
|
|
|
2018-07-13 23:23:23 +00:00
|
|
|
void
|
2020-05-09 22:57:13 +00:00
|
|
|
nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group,
|
|
|
|
struct spdk_nvmf_subsystem *subsystem,
|
|
|
|
spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
|
2018-06-29 20:15:15 +00:00
|
|
|
{
|
|
|
|
struct spdk_nvmf_subsystem_poll_group *sgroup;
|
|
|
|
struct nvmf_qpair_disconnect_many_ctx *ctx;
|
2020-11-02 17:12:11 +00:00
|
|
|
uint32_t i;
|
2018-06-29 20:15:15 +00:00
|
|
|
|
2023-04-21 20:36:10 +00:00
|
|
|
SPDK_DTRACE_PROBE3_TICKS(nvmf_poll_group_remove_subsystem, group, spdk_thread_get_id(group->thread),
|
|
|
|
subsystem->subnqn);
|
2021-11-15 11:25:38 +00:00
|
|
|
|
2018-06-29 20:15:15 +00:00
|
|
|
ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_many_ctx));
|
|
|
|
if (!ctx) {
|
2018-07-13 23:23:23 +00:00
|
|
|
SPDK_ERRLOG("Unable to allocate memory for context to remove poll subsystem\n");
|
2021-02-26 15:27:23 +00:00
|
|
|
if (cb_fn) {
|
|
|
|
cb_fn(cb_arg, -1);
|
|
|
|
}
|
|
|
|
return;
|
2018-06-29 20:15:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ctx->group = group;
|
|
|
|
ctx->subsystem = subsystem;
|
2018-07-13 23:23:23 +00:00
|
|
|
ctx->cpl_fn = cb_fn;
|
|
|
|
ctx->cpl_ctx = cb_arg;
|
2018-06-29 20:15:15 +00:00
|
|
|
|
|
|
|
sgroup = &group->sgroups[subsystem->id];
|
|
|
|
sgroup->state = SPDK_NVMF_SUBSYSTEM_INACTIVE;
|
|
|
|
|
2020-11-02 17:12:11 +00:00
|
|
|
for (i = 0; i < sgroup->num_ns; i++) {
|
|
|
|
sgroup->ns_info[i].state = SPDK_NVMF_SUBSYSTEM_INACTIVE;
|
|
|
|
}
|
|
|
|
|
2021-02-26 15:27:23 +00:00
|
|
|
nvmf_poll_group_remove_subsystem_msg(ctx);
|
2017-11-20 16:50:10 +00:00
|
|
|
}
|
|
|
|
|
2018-07-13 23:23:23 +00:00
|
|
|
void
|
2020-05-09 22:57:13 +00:00
|
|
|
nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group,
|
|
|
|
struct spdk_nvmf_subsystem *subsystem,
|
2020-11-02 17:12:11 +00:00
|
|
|
uint32_t nsid,
|
2020-05-09 22:57:13 +00:00
|
|
|
spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
|
2017-11-20 17:45:39 +00:00
|
|
|
{
|
|
|
|
struct spdk_nvmf_subsystem_poll_group *sgroup;
|
2020-11-02 17:12:11 +00:00
|
|
|
struct spdk_nvmf_subsystem_pg_ns_info *ns_info = NULL;
|
2018-07-13 23:23:23 +00:00
|
|
|
int rc = 0;
|
2022-06-21 21:08:00 +00:00
|
|
|
uint32_t i;
|
2017-11-20 17:45:39 +00:00
|
|
|
|
2017-12-19 23:39:04 +00:00
|
|
|
if (subsystem->id >= group->num_sgroups) {
|
2018-07-13 23:23:23 +00:00
|
|
|
rc = -1;
|
|
|
|
goto fini;
|
2017-12-19 23:39:04 +00:00
|
|
|
}
|
2017-11-20 17:45:39 +00:00
|
|
|
|
2017-12-19 23:39:04 +00:00
|
|
|
sgroup = &group->sgroups[subsystem->id];
|
2020-07-17 00:52:27 +00:00
|
|
|
if (sgroup->state == SPDK_NVMF_SUBSYSTEM_PAUSED) {
|
|
|
|
goto fini;
|
|
|
|
}
|
2019-04-25 14:03:06 +00:00
|
|
|
sgroup->state = SPDK_NVMF_SUBSYSTEM_PAUSING;
|
|
|
|
|
2022-06-21 21:08:00 +00:00
|
|
|
if (nsid == SPDK_NVME_GLOBAL_NS_TAG) {
|
|
|
|
for (i = 0; i < sgroup->num_ns; i++) {
|
|
|
|
ns_info = &sgroup->ns_info[i];
|
|
|
|
ns_info->state = SPDK_NVMF_SUBSYSTEM_PAUSING;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* NOTE: This implicitly also checks for 0, since 0 - 1 wraps around to UINT32_MAX. */
|
|
|
|
if (nsid - 1 < sgroup->num_ns) {
|
|
|
|
ns_info = &sgroup->ns_info[nsid - 1];
|
|
|
|
ns_info->state = SPDK_NVMF_SUBSYSTEM_PAUSING;
|
|
|
|
}
|
2020-11-02 17:12:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (sgroup->mgmt_io_outstanding > 0) {
|
2020-12-11 00:19:52 +00:00
|
|
|
assert(sgroup->cb_fn == NULL);
|
2019-04-25 14:03:06 +00:00
|
|
|
sgroup->cb_fn = cb_fn;
|
2020-12-11 00:19:52 +00:00
|
|
|
assert(sgroup->cb_arg == NULL);
|
2019-04-25 14:03:06 +00:00
|
|
|
sgroup->cb_arg = cb_arg;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-06-21 21:08:00 +00:00
|
|
|
if (nsid == SPDK_NVME_GLOBAL_NS_TAG) {
|
|
|
|
for (i = 0; i < sgroup->num_ns; i++) {
|
|
|
|
ns_info = &sgroup->ns_info[i];
|
|
|
|
|
|
|
|
if (ns_info->io_outstanding > 0) {
|
|
|
|
assert(sgroup->cb_fn == NULL);
|
|
|
|
sgroup->cb_fn = cb_fn;
|
|
|
|
assert(sgroup->cb_arg == NULL);
|
|
|
|
sgroup->cb_arg = cb_arg;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (ns_info != NULL && ns_info->io_outstanding > 0) {
|
|
|
|
assert(sgroup->cb_fn == NULL);
|
|
|
|
sgroup->cb_fn = cb_fn;
|
|
|
|
assert(sgroup->cb_arg == NULL);
|
|
|
|
sgroup->cb_arg = cb_arg;
|
|
|
|
return;
|
|
|
|
}
|
2020-11-02 17:12:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
assert(sgroup->mgmt_io_outstanding == 0);
|
2017-12-19 23:39:04 +00:00
|
|
|
sgroup->state = SPDK_NVMF_SUBSYSTEM_PAUSED;
|
2018-07-13 23:23:23 +00:00
|
|
|
fini:
|
|
|
|
if (cb_fn) {
|
|
|
|
cb_fn(cb_arg, rc);
|
|
|
|
}
|
2017-12-19 23:39:04 +00:00
|
|
|
}
|
2017-11-20 17:45:39 +00:00
|
|
|
|
2018-07-13 23:23:23 +00:00
|
|
|
void
|
2020-05-09 22:57:13 +00:00
|
|
|
nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group,
|
|
|
|
struct spdk_nvmf_subsystem *subsystem,
|
|
|
|
spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
|
2017-12-19 23:39:04 +00:00
|
|
|
{
|
2018-04-04 16:16:39 +00:00
|
|
|
struct spdk_nvmf_request *req, *tmp;
|
|
|
|
struct spdk_nvmf_subsystem_poll_group *sgroup;
|
2018-07-13 23:23:23 +00:00
|
|
|
int rc = 0;
|
2020-11-02 17:12:11 +00:00
|
|
|
uint32_t i;
|
2017-11-20 17:45:39 +00:00
|
|
|
|
2017-12-19 23:39:04 +00:00
|
|
|
if (subsystem->id >= group->num_sgroups) {
|
2018-07-13 23:23:23 +00:00
|
|
|
rc = -1;
|
|
|
|
goto fini;
|
2017-11-20 17:45:39 +00:00
|
|
|
}
|
|
|
|
|
2018-04-04 16:16:39 +00:00
|
|
|
sgroup = &group->sgroups[subsystem->id];
|
|
|
|
|
2020-07-17 00:52:27 +00:00
|
|
|
if (sgroup->state == SPDK_NVMF_SUBSYSTEM_ACTIVE) {
|
|
|
|
goto fini;
|
|
|
|
}
|
2017-12-19 23:39:04 +00:00
|
|
|
|
|
|
|
rc = poll_group_update_subsystem(group, subsystem);
|
|
|
|
if (rc) {
|
2018-07-13 23:23:23 +00:00
|
|
|
goto fini;
|
2017-11-20 17:45:39 +00:00
|
|
|
}
|
|
|
|
|
2020-11-02 17:12:11 +00:00
|
|
|
for (i = 0; i < sgroup->num_ns; i++) {
|
|
|
|
sgroup->ns_info[i].state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
|
|
|
|
}
|
|
|
|
|
2018-04-04 16:16:39 +00:00
|
|
|
sgroup->state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
|
|
|
|
|
|
|
|
/* Release all queued requests */
|
|
|
|
TAILQ_FOREACH_SAFE(req, &sgroup->queued, link, tmp) {
|
|
|
|
TAILQ_REMOVE(&sgroup->queued, req, link);
|
2021-12-10 14:42:54 +00:00
|
|
|
if (spdk_nvmf_request_using_zcopy(req)) {
|
|
|
|
spdk_nvmf_request_zcopy_start(req);
|
|
|
|
} else {
|
|
|
|
spdk_nvmf_request_exec(req);
|
|
|
|
}
|
|
|
|
|
2018-04-04 16:16:39 +00:00
|
|
|
}
|
2018-07-13 23:23:23 +00:00
|
|
|
fini:
|
|
|
|
if (cb_fn) {
|
|
|
|
cb_fn(cb_arg, rc);
|
|
|
|
}
|
2017-11-20 17:45:39 +00:00
|
|
|
}
|
2019-05-15 12:40:12 +00:00
|
|
|
|
|
|
|
|
|
|
|
struct spdk_nvmf_poll_group *
|
|
|
|
spdk_nvmf_get_optimal_poll_group(struct spdk_nvmf_qpair *qpair)
|
|
|
|
{
|
|
|
|
struct spdk_nvmf_transport_poll_group *tgroup;
|
|
|
|
|
2020-05-15 01:14:26 +00:00
|
|
|
tgroup = nvmf_transport_get_optimal_poll_group(qpair->transport, qpair);
|
2019-05-15 12:40:12 +00:00
|
|
|
|
|
|
|
if (tgroup == NULL) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return tgroup->group;
|
|
|
|
}
|
2019-04-15 09:54:38 +00:00
|
|
|
|
2021-03-03 15:36:02 +00:00
|
|
|
void
|
|
|
|
spdk_nvmf_poll_group_dump_stat(struct spdk_nvmf_poll_group *group, struct spdk_json_write_ctx *w)
|
|
|
|
{
|
|
|
|
struct spdk_nvmf_transport_poll_group *tgroup;
|
|
|
|
|
|
|
|
spdk_json_write_object_begin(w);
|
|
|
|
|
|
|
|
spdk_json_write_named_string(w, "name", spdk_thread_get_name(spdk_get_thread()));
|
|
|
|
spdk_json_write_named_uint32(w, "admin_qpairs", group->stat.admin_qpairs);
|
|
|
|
spdk_json_write_named_uint32(w, "io_qpairs", group->stat.io_qpairs);
|
2021-05-20 01:44:00 +00:00
|
|
|
spdk_json_write_named_uint32(w, "current_admin_qpairs", group->stat.current_admin_qpairs);
|
|
|
|
spdk_json_write_named_uint32(w, "current_io_qpairs", group->stat.current_io_qpairs);
|
2021-03-03 15:36:02 +00:00
|
|
|
spdk_json_write_named_uint64(w, "pending_bdev_io", group->stat.pending_bdev_io);
|
2022-12-13 23:13:12 +00:00
|
|
|
spdk_json_write_named_uint64(w, "completed_nvme_io", group->stat.completed_nvme_io);
|
2021-03-03 15:36:02 +00:00
|
|
|
|
|
|
|
spdk_json_write_named_array_begin(w, "transports");
|
|
|
|
|
|
|
|
TAILQ_FOREACH(tgroup, &group->tgroups, link) {
|
|
|
|
spdk_json_write_object_begin(w);
|
|
|
|
/*
|
|
|
|
* The trtype field intentionally contains a transport name as this is more informative.
|
|
|
|
* The field has not been renamed for backward compatibility.
|
|
|
|
*/
|
|
|
|
spdk_json_write_named_string(w, "trtype", spdk_nvmf_get_transport_name(tgroup->transport));
|
|
|
|
|
|
|
|
if (tgroup->transport->ops->poll_group_dump_stat) {
|
|
|
|
tgroup->transport->ops->poll_group_dump_stat(tgroup, w);
|
|
|
|
}
|
|
|
|
|
|
|
|
spdk_json_write_object_end(w);
|
|
|
|
}
|
|
|
|
|
|
|
|
spdk_json_write_array_end(w);
|
|
|
|
spdk_json_write_object_end(w);
|
|
|
|
}
|