2016-06-06 21:44:30 +00:00
|
|
|
/*-
|
|
|
|
* BSD LICENSE
|
|
|
|
*
|
|
|
|
* Copyright (c) Intel Corporation.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
*
|
|
|
|
* * Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* * Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in
|
|
|
|
* the documentation and/or other materials provided with the
|
|
|
|
* distribution.
|
|
|
|
* * Neither the name of Intel Corporation nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2018-03-08 20:26:44 +00:00
|
|
|
#include "event_nvmf.h"
|
2016-08-15 23:25:11 +00:00
|
|
|
|
2016-09-19 17:01:52 +00:00
|
|
|
#include "spdk/bdev.h"
|
2016-06-06 21:44:30 +00:00
|
|
|
#include "spdk/event.h"
|
2018-06-11 20:32:15 +00:00
|
|
|
#include "spdk/thread.h"
|
2016-06-06 21:44:30 +00:00
|
|
|
#include "spdk/log.h"
|
|
|
|
#include "spdk/nvme.h"
|
2020-01-31 17:13:43 +00:00
|
|
|
#include "spdk/nvmf_cmd.h"
|
2017-09-25 22:27:01 +00:00
|
|
|
#include "spdk/util.h"
|
|
|
|
|
2018-03-10 00:28:52 +00:00
|
|
|
enum nvmf_tgt_state {
|
|
|
|
NVMF_TGT_INIT_NONE = 0,
|
|
|
|
NVMF_TGT_INIT_PARSE_CONFIG,
|
|
|
|
NVMF_TGT_INIT_CREATE_POLL_GROUPS,
|
|
|
|
NVMF_TGT_INIT_START_SUBSYSTEMS,
|
|
|
|
NVMF_TGT_INIT_START_ACCEPTOR,
|
|
|
|
NVMF_TGT_RUNNING,
|
|
|
|
NVMF_TGT_FINI_STOP_SUBSYSTEMS,
|
2018-07-27 18:38:38 +00:00
|
|
|
NVMF_TGT_FINI_DESTROY_POLL_GROUPS,
|
|
|
|
NVMF_TGT_FINI_STOP_ACCEPTOR,
|
2018-03-10 00:28:52 +00:00
|
|
|
NVMF_TGT_FINI_FREE_RESOURCES,
|
|
|
|
NVMF_TGT_STOPPED,
|
|
|
|
NVMF_TGT_ERROR,
|
|
|
|
};
|
|
|
|
|
2017-09-25 22:27:01 +00:00
|
|
|
struct nvmf_tgt_poll_group {
|
2019-04-22 21:09:51 +00:00
|
|
|
struct spdk_nvmf_poll_group *group;
|
|
|
|
struct spdk_thread *thread;
|
|
|
|
TAILQ_ENTRY(nvmf_tgt_poll_group) link;
|
2017-09-25 22:27:01 +00:00
|
|
|
};
|
2016-06-06 21:44:30 +00:00
|
|
|
|
2018-03-10 00:33:41 +00:00
|
|
|
struct spdk_nvmf_tgt *g_spdk_nvmf_tgt = NULL;
|
2017-08-18 22:38:33 +00:00
|
|
|
|
2018-03-10 00:28:52 +00:00
|
|
|
static enum nvmf_tgt_state g_tgt_state;
|
|
|
|
|
2020-02-03 01:53:46 +00:00
|
|
|
static struct spdk_thread *g_tgt_init_thread = NULL;
|
2020-02-03 01:10:34 +00:00
|
|
|
static struct spdk_thread *g_tgt_fini_thread = NULL;
|
|
|
|
|
2019-04-22 21:09:51 +00:00
|
|
|
static TAILQ_HEAD(, nvmf_tgt_poll_group) g_poll_groups = TAILQ_HEAD_INITIALIZER(g_poll_groups);
|
2017-09-25 22:27:01 +00:00
|
|
|
static size_t g_num_poll_groups = 0;
|
|
|
|
|
2016-08-16 16:35:59 +00:00
|
|
|
static struct spdk_poller *g_acceptor_poller = NULL;
|
|
|
|
|
2018-03-08 20:26:44 +00:00
|
|
|
static void nvmf_tgt_advance_state(void);
|
2016-08-25 22:00:50 +00:00
|
|
|
|
2016-08-16 16:35:59 +00:00
|
|
|
static void
|
2020-05-10 19:34:39 +00:00
|
|
|
nvmf_shutdown_cb(void *arg1)
|
2016-08-16 16:35:59 +00:00
|
|
|
{
|
nvmf/tgt: Fix issues for ctrlr+ c handling.
When receving ctrlr+c event, NVMe-oF target
could in any state. So we cannot guarantee
g_acceptor_poller is initialized or not. If
we do not handle such case, ctrlr+c will
trigger unexpected coredump issue. To
solve this issue, following methods are used.
Currently, our code in event module (lib/event/app.c)
can only receive ctrlr + c command once, so
when we receive the ctrlr+c, we should complete
the shutdown process.
The idea is to use spdk_event_call, we will only
enter shutdown process if tgt is in NVMF_TGT_RUNNING
status.
After several patch tries, I think that this solution
is much simple. Though we would like to kill after
entering the running state, it may wait some time
if users kill the application in early state, but those
operations will not be quite often in real case.
Change-Id: Id89a96b5d39f8a528e72dea8c0eb6524bdaf7ee4
Signed-off-by: Ziye Yang <optimistyzy@gmail.com>
Reviewed-on: https://review.gerrithub.io/389433
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
2017-12-08 10:57:43 +00:00
|
|
|
/* Still in initialization state, defer shutdown operation */
|
2018-03-10 00:28:52 +00:00
|
|
|
if (g_tgt_state < NVMF_TGT_RUNNING) {
|
2020-05-10 19:34:39 +00:00
|
|
|
spdk_thread_send_msg(spdk_get_thread(), nvmf_shutdown_cb, NULL);
|
nvmf/tgt: Fix issues for ctrlr+ c handling.
When receving ctrlr+c event, NVMe-oF target
could in any state. So we cannot guarantee
g_acceptor_poller is initialized or not. If
we do not handle such case, ctrlr+c will
trigger unexpected coredump issue. To
solve this issue, following methods are used.
Currently, our code in event module (lib/event/app.c)
can only receive ctrlr + c command once, so
when we receive the ctrlr+c, we should complete
the shutdown process.
The idea is to use spdk_event_call, we will only
enter shutdown process if tgt is in NVMF_TGT_RUNNING
status.
After several patch tries, I think that this solution
is much simple. Though we would like to kill after
entering the running state, it may wait some time
if users kill the application in early state, but those
operations will not be quite often in real case.
Change-Id: Id89a96b5d39f8a528e72dea8c0eb6524bdaf7ee4
Signed-off-by: Ziye Yang <optimistyzy@gmail.com>
Reviewed-on: https://review.gerrithub.io/389433
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
2017-12-08 10:57:43 +00:00
|
|
|
return;
|
2019-12-06 03:03:02 +00:00
|
|
|
} else if (g_tgt_state != NVMF_TGT_RUNNING && g_tgt_state != NVMF_TGT_ERROR) {
|
nvmf/tgt: Fix issues for ctrlr+ c handling.
When receving ctrlr+c event, NVMe-oF target
could in any state. So we cannot guarantee
g_acceptor_poller is initialized or not. If
we do not handle such case, ctrlr+c will
trigger unexpected coredump issue. To
solve this issue, following methods are used.
Currently, our code in event module (lib/event/app.c)
can only receive ctrlr + c command once, so
when we receive the ctrlr+c, we should complete
the shutdown process.
The idea is to use spdk_event_call, we will only
enter shutdown process if tgt is in NVMF_TGT_RUNNING
status.
After several patch tries, I think that this solution
is much simple. Though we would like to kill after
entering the running state, it may wait some time
if users kill the application in early state, but those
operations will not be quite often in real case.
Change-Id: Id89a96b5d39f8a528e72dea8c0eb6524bdaf7ee4
Signed-off-by: Ziye Yang <optimistyzy@gmail.com>
Reviewed-on: https://review.gerrithub.io/389433
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
2017-12-08 10:57:43 +00:00
|
|
|
/* Already in Shutdown status, ignore the signal */
|
|
|
|
return;
|
|
|
|
}
|
2016-08-16 16:35:59 +00:00
|
|
|
|
2019-12-06 03:03:02 +00:00
|
|
|
if (g_tgt_state == NVMF_TGT_ERROR) {
|
|
|
|
/* Parse configuration error */
|
|
|
|
g_tgt_state = NVMF_TGT_FINI_FREE_RESOURCES;
|
|
|
|
} else {
|
|
|
|
g_tgt_state = NVMF_TGT_FINI_STOP_SUBSYSTEMS;
|
|
|
|
}
|
2018-03-08 20:26:44 +00:00
|
|
|
nvmf_tgt_advance_state();
|
2016-06-06 21:44:30 +00:00
|
|
|
}
|
|
|
|
|
nvmf/tgt: Fix issues for ctrlr+ c handling.
When receving ctrlr+c event, NVMe-oF target
could in any state. So we cannot guarantee
g_acceptor_poller is initialized or not. If
we do not handle such case, ctrlr+c will
trigger unexpected coredump issue. To
solve this issue, following methods are used.
Currently, our code in event module (lib/event/app.c)
can only receive ctrlr + c command once, so
when we receive the ctrlr+c, we should complete
the shutdown process.
The idea is to use spdk_event_call, we will only
enter shutdown process if tgt is in NVMF_TGT_RUNNING
status.
After several patch tries, I think that this solution
is much simple. Though we would like to kill after
entering the running state, it may wait some time
if users kill the application in early state, but those
operations will not be quite often in real case.
Change-Id: Id89a96b5d39f8a528e72dea8c0eb6524bdaf7ee4
Signed-off-by: Ziye Yang <optimistyzy@gmail.com>
Reviewed-on: https://review.gerrithub.io/389433
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
2017-12-08 10:57:43 +00:00
|
|
|
static void
|
2020-05-10 19:34:39 +00:00
|
|
|
nvmf_subsystem_fini(void)
|
nvmf/tgt: Fix issues for ctrlr+ c handling.
When receving ctrlr+c event, NVMe-oF target
could in any state. So we cannot guarantee
g_acceptor_poller is initialized or not. If
we do not handle such case, ctrlr+c will
trigger unexpected coredump issue. To
solve this issue, following methods are used.
Currently, our code in event module (lib/event/app.c)
can only receive ctrlr + c command once, so
when we receive the ctrlr+c, we should complete
the shutdown process.
The idea is to use spdk_event_call, we will only
enter shutdown process if tgt is in NVMF_TGT_RUNNING
status.
After several patch tries, I think that this solution
is much simple. Though we would like to kill after
entering the running state, it may wait some time
if users kill the application in early state, but those
operations will not be quite often in real case.
Change-Id: Id89a96b5d39f8a528e72dea8c0eb6524bdaf7ee4
Signed-off-by: Ziye Yang <optimistyzy@gmail.com>
Reviewed-on: https://review.gerrithub.io/389433
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
2017-12-08 10:57:43 +00:00
|
|
|
{
|
2020-05-10 19:34:39 +00:00
|
|
|
nvmf_shutdown_cb(NULL);
|
nvmf/tgt: Fix issues for ctrlr+ c handling.
When receving ctrlr+c event, NVMe-oF target
could in any state. So we cannot guarantee
g_acceptor_poller is initialized or not. If
we do not handle such case, ctrlr+c will
trigger unexpected coredump issue. To
solve this issue, following methods are used.
Currently, our code in event module (lib/event/app.c)
can only receive ctrlr + c command once, so
when we receive the ctrlr+c, we should complete
the shutdown process.
The idea is to use spdk_event_call, we will only
enter shutdown process if tgt is in NVMF_TGT_RUNNING
status.
After several patch tries, I think that this solution
is much simple. Though we would like to kill after
entering the running state, it may wait some time
if users kill the application in early state, but those
operations will not be quite often in real case.
Change-Id: Id89a96b5d39f8a528e72dea8c0eb6524bdaf7ee4
Signed-off-by: Ziye Yang <optimistyzy@gmail.com>
Reviewed-on: https://review.gerrithub.io/389433
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
2017-12-08 10:57:43 +00:00
|
|
|
}
|
|
|
|
|
2018-03-13 00:16:47 +00:00
|
|
|
static int
|
2016-08-16 16:35:59 +00:00
|
|
|
acceptor_poll(void *arg)
|
|
|
|
{
|
2017-08-21 21:07:44 +00:00
|
|
|
struct spdk_nvmf_tgt *tgt = arg;
|
2020-06-10 10:11:16 +00:00
|
|
|
uint32_t count;
|
2017-08-21 21:07:44 +00:00
|
|
|
|
2020-06-08 21:38:29 +00:00
|
|
|
count = spdk_nvmf_tgt_accept(tgt);
|
2018-03-13 00:16:47 +00:00
|
|
|
|
2020-05-04 09:51:27 +00:00
|
|
|
if (count > 0) {
|
|
|
|
return SPDK_POLLER_BUSY;
|
|
|
|
} else {
|
|
|
|
return SPDK_POLLER_IDLE;
|
|
|
|
}
|
2016-08-16 16:35:59 +00:00
|
|
|
}
|
|
|
|
|
2017-11-03 20:44:10 +00:00
|
|
|
static void
|
2020-02-03 01:10:34 +00:00
|
|
|
_nvmf_tgt_destroy_poll_group_done(void *ctx)
|
2017-11-03 20:44:10 +00:00
|
|
|
{
|
2020-02-03 01:10:34 +00:00
|
|
|
assert(g_num_poll_groups > 0);
|
|
|
|
|
|
|
|
if (--g_num_poll_groups == 0) {
|
|
|
|
g_tgt_state = NVMF_TGT_FINI_STOP_ACCEPTOR;
|
|
|
|
nvmf_tgt_advance_state();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nvmf_tgt_destroy_poll_group_done(void *cb_arg, int status)
|
|
|
|
{
|
|
|
|
struct nvmf_tgt_poll_group *pg = cb_arg;
|
|
|
|
|
|
|
|
free(pg);
|
|
|
|
|
|
|
|
spdk_thread_send_msg(g_tgt_fini_thread, _nvmf_tgt_destroy_poll_group_done, NULL);
|
2020-02-03 01:53:46 +00:00
|
|
|
|
|
|
|
spdk_thread_exit(spdk_get_thread());
|
2017-11-03 20:44:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-11-17 18:43:45 +00:00
|
|
|
nvmf_tgt_destroy_poll_group(void *ctx)
|
2020-02-03 01:10:34 +00:00
|
|
|
{
|
|
|
|
struct nvmf_tgt_poll_group *pg = ctx;
|
|
|
|
|
|
|
|
spdk_nvmf_poll_group_destroy(pg->group, nvmf_tgt_destroy_poll_group_done, pg);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nvmf_tgt_destroy_poll_groups(void)
|
2017-11-03 20:44:10 +00:00
|
|
|
{
|
2019-04-22 21:09:51 +00:00
|
|
|
struct nvmf_tgt_poll_group *pg, *tpg;
|
|
|
|
|
2020-02-03 01:10:34 +00:00
|
|
|
g_tgt_fini_thread = spdk_get_thread();
|
|
|
|
assert(g_tgt_fini_thread != NULL);
|
2019-04-22 21:09:51 +00:00
|
|
|
|
|
|
|
TAILQ_FOREACH_SAFE(pg, &g_poll_groups, link, tpg) {
|
2020-02-03 01:10:34 +00:00
|
|
|
TAILQ_REMOVE(&g_poll_groups, pg, link);
|
|
|
|
spdk_thread_send_msg(pg->thread, nvmf_tgt_destroy_poll_group, pg);
|
2018-08-28 22:41:16 +00:00
|
|
|
}
|
2017-11-03 20:44:10 +00:00
|
|
|
}
|
|
|
|
|
2017-11-02 23:03:10 +00:00
|
|
|
static void
|
2017-11-17 18:43:45 +00:00
|
|
|
nvmf_tgt_create_poll_group_done(void *ctx)
|
2017-11-02 23:03:10 +00:00
|
|
|
{
|
2020-02-03 01:53:46 +00:00
|
|
|
struct nvmf_tgt_poll_group *pg = ctx;
|
|
|
|
|
|
|
|
TAILQ_INSERT_TAIL(&g_poll_groups, pg, link);
|
|
|
|
|
|
|
|
assert(g_num_poll_groups < spdk_env_get_core_count());
|
|
|
|
|
|
|
|
if (++g_num_poll_groups == spdk_env_get_core_count()) {
|
|
|
|
g_tgt_state = NVMF_TGT_INIT_START_SUBSYSTEMS;
|
|
|
|
nvmf_tgt_advance_state();
|
|
|
|
}
|
2017-11-02 23:03:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-11-17 18:43:45 +00:00
|
|
|
nvmf_tgt_create_poll_group(void *ctx)
|
2017-11-02 23:03:10 +00:00
|
|
|
{
|
|
|
|
struct nvmf_tgt_poll_group *pg;
|
|
|
|
|
2019-04-22 21:09:51 +00:00
|
|
|
pg = calloc(1, sizeof(*pg));
|
|
|
|
if (!pg) {
|
|
|
|
SPDK_ERRLOG("Not enough memory to allocate poll groups\n");
|
|
|
|
spdk_app_stop(-ENOMEM);
|
|
|
|
return;
|
|
|
|
}
|
2017-11-02 23:03:10 +00:00
|
|
|
|
2019-04-22 21:09:51 +00:00
|
|
|
pg->thread = spdk_get_thread();
|
2018-03-10 00:33:41 +00:00
|
|
|
pg->group = spdk_nvmf_poll_group_create(g_spdk_nvmf_tgt);
|
2019-04-22 21:09:51 +00:00
|
|
|
|
2020-02-03 01:53:46 +00:00
|
|
|
spdk_thread_send_msg(g_tgt_init_thread, nvmf_tgt_create_poll_group_done, pg);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nvmf_tgt_create_poll_groups(void)
|
|
|
|
{
|
|
|
|
struct spdk_cpuset tmp_cpumask = {};
|
|
|
|
uint32_t i;
|
|
|
|
char thread_name[32];
|
|
|
|
struct spdk_thread *thread;
|
|
|
|
|
|
|
|
g_tgt_init_thread = spdk_get_thread();
|
|
|
|
assert(g_tgt_init_thread != NULL);
|
|
|
|
|
|
|
|
SPDK_ENV_FOREACH_CORE(i) {
|
|
|
|
spdk_cpuset_zero(&tmp_cpumask);
|
|
|
|
spdk_cpuset_set_cpu(&tmp_cpumask, i, true);
|
|
|
|
snprintf(thread_name, sizeof(thread_name), "nvmf_tgt_poll_group_%u", i);
|
|
|
|
|
|
|
|
thread = spdk_thread_create(thread_name, &tmp_cpumask);
|
|
|
|
assert(thread != NULL);
|
|
|
|
|
|
|
|
spdk_thread_send_msg(thread, nvmf_tgt_create_poll_group, NULL);
|
2019-04-22 21:09:51 +00:00
|
|
|
}
|
2017-11-02 23:03:10 +00:00
|
|
|
}
|
|
|
|
|
2017-12-19 23:39:04 +00:00
|
|
|
static void
|
|
|
|
nvmf_tgt_subsystem_started(struct spdk_nvmf_subsystem *subsystem,
|
|
|
|
void *cb_arg, int status)
|
|
|
|
{
|
|
|
|
subsystem = spdk_nvmf_subsystem_get_next(subsystem);
|
2020-08-13 00:09:22 +00:00
|
|
|
int rc;
|
2017-12-19 23:39:04 +00:00
|
|
|
|
|
|
|
if (subsystem) {
|
2020-08-13 00:09:22 +00:00
|
|
|
rc = spdk_nvmf_subsystem_start(subsystem, nvmf_tgt_subsystem_started, NULL);
|
|
|
|
if (rc) {
|
|
|
|
g_tgt_state = NVMF_TGT_FINI_STOP_SUBSYSTEMS;
|
|
|
|
SPDK_ERRLOG("Unable to start NVMe-oF subsystem. Stopping app.\n");
|
|
|
|
nvmf_tgt_advance_state();
|
|
|
|
}
|
2017-12-19 23:39:04 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-03-10 00:28:52 +00:00
|
|
|
g_tgt_state = NVMF_TGT_INIT_START_ACCEPTOR;
|
2018-03-08 20:26:44 +00:00
|
|
|
nvmf_tgt_advance_state();
|
2017-12-19 23:39:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nvmf_tgt_subsystem_stopped(struct spdk_nvmf_subsystem *subsystem,
|
|
|
|
void *cb_arg, int status)
|
|
|
|
{
|
|
|
|
subsystem = spdk_nvmf_subsystem_get_next(subsystem);
|
2020-08-13 00:09:22 +00:00
|
|
|
int rc;
|
2017-12-19 23:39:04 +00:00
|
|
|
|
|
|
|
if (subsystem) {
|
2020-08-13 00:09:22 +00:00
|
|
|
rc = spdk_nvmf_subsystem_stop(subsystem, nvmf_tgt_subsystem_stopped, NULL);
|
|
|
|
if (rc) {
|
|
|
|
SPDK_ERRLOG("Unable to stop NVMe-oF subsystem. Trying others.\n");
|
|
|
|
nvmf_tgt_subsystem_stopped(subsystem, NULL, 0);
|
|
|
|
}
|
2017-12-19 23:39:04 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-03-10 00:28:52 +00:00
|
|
|
g_tgt_state = NVMF_TGT_FINI_DESTROY_POLL_GROUPS;
|
2018-03-08 20:26:44 +00:00
|
|
|
nvmf_tgt_advance_state();
|
2017-12-19 23:39:04 +00:00
|
|
|
}
|
|
|
|
|
2018-06-05 22:34:04 +00:00
|
|
|
static void
|
|
|
|
nvmf_tgt_destroy_done(void *ctx, int status)
|
|
|
|
{
|
|
|
|
g_tgt_state = NVMF_TGT_STOPPED;
|
2018-08-14 05:34:29 +00:00
|
|
|
|
2018-06-07 23:00:26 +00:00
|
|
|
free(g_spdk_nvmf_tgt_conf);
|
2018-09-26 10:33:05 +00:00
|
|
|
g_spdk_nvmf_tgt_conf = NULL;
|
2018-06-05 22:34:04 +00:00
|
|
|
nvmf_tgt_advance_state();
|
|
|
|
}
|
|
|
|
|
2018-08-27 22:27:47 +00:00
|
|
|
static void
|
|
|
|
nvmf_tgt_parse_conf_done(int status)
|
|
|
|
{
|
|
|
|
g_tgt_state = (status == 0) ? NVMF_TGT_INIT_CREATE_POLL_GROUPS : NVMF_TGT_ERROR;
|
|
|
|
nvmf_tgt_advance_state();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nvmf_tgt_parse_conf_start(void *ctx)
|
|
|
|
{
|
2020-05-10 19:34:39 +00:00
|
|
|
if (nvmf_parse_conf(nvmf_tgt_parse_conf_done)) {
|
|
|
|
SPDK_ERRLOG("nvmf_parse_conf() failed\n");
|
2018-08-27 22:27:47 +00:00
|
|
|
g_tgt_state = NVMF_TGT_ERROR;
|
|
|
|
nvmf_tgt_advance_state();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-31 17:40:03 +00:00
|
|
|
static void
|
|
|
|
fixup_identify_ctrlr(struct spdk_nvmf_request *req)
|
|
|
|
{
|
|
|
|
uint32_t length;
|
|
|
|
int rc;
|
|
|
|
struct spdk_nvme_ctrlr_data *nvme_cdata;
|
|
|
|
struct spdk_nvme_ctrlr_data nvmf_cdata = {};
|
|
|
|
struct spdk_nvmf_ctrlr *ctrlr = spdk_nvmf_request_get_ctrlr(req);
|
|
|
|
struct spdk_nvme_cpl *rsp = spdk_nvmf_request_get_response(req);
|
|
|
|
|
|
|
|
/* This is the identify data from the NVMe drive */
|
|
|
|
spdk_nvmf_request_get_data(req, (void **)&nvme_cdata, &length);
|
|
|
|
|
|
|
|
/* Get the NVMF identify data */
|
|
|
|
rc = spdk_nvmf_ctrlr_identify_ctrlr(ctrlr, &nvmf_cdata);
|
|
|
|
if (rc != SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) {
|
|
|
|
rsp->status.sct = SPDK_NVME_SCT_GENERIC;
|
|
|
|
rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Fixup NVMF identify data with NVMe identify data */
|
|
|
|
|
|
|
|
/* Serial Number (SN) */
|
|
|
|
memcpy(&nvmf_cdata.sn[0], &nvme_cdata->sn[0], sizeof(nvmf_cdata.sn));
|
|
|
|
/* Model Number (MN) */
|
|
|
|
memcpy(&nvmf_cdata.mn[0], &nvme_cdata->mn[0], sizeof(nvmf_cdata.mn));
|
|
|
|
/* Firmware Revision (FR) */
|
|
|
|
memcpy(&nvmf_cdata.fr[0], &nvme_cdata->fr[0], sizeof(nvmf_cdata.fr));
|
|
|
|
/* IEEE OUI Identifier (IEEE) */
|
|
|
|
memcpy(&nvmf_cdata.ieee[0], &nvme_cdata->ieee[0], sizeof(nvmf_cdata.ieee));
|
|
|
|
/* FRU Globally Unique Identifier (FGUID) */
|
|
|
|
|
|
|
|
/* Copy the fixed up data back to the response */
|
|
|
|
memcpy(nvme_cdata, &nvmf_cdata, length);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2020-05-10 19:34:39 +00:00
|
|
|
nvmf_custom_identify_hdlr(struct spdk_nvmf_request *req)
|
2020-01-31 17:40:03 +00:00
|
|
|
{
|
|
|
|
struct spdk_nvme_cmd *cmd = spdk_nvmf_request_get_cmd(req);
|
|
|
|
struct spdk_bdev *bdev;
|
|
|
|
struct spdk_bdev_desc *desc;
|
|
|
|
struct spdk_io_channel *ch;
|
|
|
|
struct spdk_nvmf_subsystem *subsys;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (cmd->cdw10_bits.identify.cns != SPDK_NVME_IDENTIFY_CTRLR) {
|
|
|
|
return -1; /* continue */
|
|
|
|
}
|
|
|
|
|
|
|
|
subsys = spdk_nvmf_request_get_subsystem(req);
|
|
|
|
if (subsys == NULL) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Only procss this request if it has exactly one namespace */
|
|
|
|
if (spdk_nvmf_subsystem_get_max_nsid(subsys) != 1) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Forward to first namespace if it supports NVME admin commands */
|
|
|
|
rc = spdk_nvmf_request_get_bdev(1, req, &bdev, &desc, &ch);
|
|
|
|
if (rc) {
|
|
|
|
/* No bdev found for this namespace. Continue. */
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_NVME_ADMIN)) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(bdev, desc, ch, req, fixup_identify_ctrlr);
|
|
|
|
}
|
|
|
|
|
2016-06-06 21:44:30 +00:00
|
|
|
static void
|
2018-03-08 20:26:44 +00:00
|
|
|
nvmf_tgt_advance_state(void)
|
2016-06-06 21:44:30 +00:00
|
|
|
{
|
2017-11-01 23:24:18 +00:00
|
|
|
enum nvmf_tgt_state prev_state;
|
2017-11-02 23:03:10 +00:00
|
|
|
int rc = -1;
|
2020-08-13 00:09:22 +00:00
|
|
|
int ret;
|
2017-11-01 23:24:18 +00:00
|
|
|
|
|
|
|
do {
|
2018-03-10 00:28:52 +00:00
|
|
|
prev_state = g_tgt_state;
|
2017-11-01 23:24:18 +00:00
|
|
|
|
2018-03-10 00:28:52 +00:00
|
|
|
switch (g_tgt_state) {
|
2017-11-01 23:24:18 +00:00
|
|
|
case NVMF_TGT_INIT_NONE: {
|
2018-03-10 00:28:52 +00:00
|
|
|
g_tgt_state = NVMF_TGT_INIT_PARSE_CONFIG;
|
2017-11-01 23:24:18 +00:00
|
|
|
break;
|
2017-09-25 22:27:01 +00:00
|
|
|
}
|
2017-11-01 23:24:18 +00:00
|
|
|
case NVMF_TGT_INIT_PARSE_CONFIG:
|
2018-08-27 22:27:47 +00:00
|
|
|
/* Send message to self to call parse conf func.
|
|
|
|
* Prevents it from possibly performing cb before getting
|
|
|
|
* out of this function, which causes problems. */
|
|
|
|
spdk_thread_send_msg(spdk_get_thread(), nvmf_tgt_parse_conf_start, NULL);
|
2017-11-01 23:24:18 +00:00
|
|
|
break;
|
2017-11-17 18:43:45 +00:00
|
|
|
case NVMF_TGT_INIT_CREATE_POLL_GROUPS:
|
2020-01-07 23:01:43 +00:00
|
|
|
/* Config parsed */
|
|
|
|
if (g_spdk_nvmf_tgt_conf->admin_passthru.identify_ctrlr) {
|
|
|
|
SPDK_NOTICELOG("Custom identify ctrlr handler enabled\n");
|
2020-05-10 19:34:39 +00:00
|
|
|
spdk_nvmf_set_custom_admin_cmd_hdlr(SPDK_NVME_OPC_IDENTIFY, nvmf_custom_identify_hdlr);
|
2020-01-07 23:01:43 +00:00
|
|
|
}
|
2020-02-03 01:53:46 +00:00
|
|
|
/* Create poll group threads, and send a message to each thread
|
|
|
|
* and create a poll group.
|
|
|
|
*/
|
|
|
|
nvmf_tgt_create_poll_groups();
|
2017-11-01 23:24:18 +00:00
|
|
|
break;
|
2017-12-19 23:39:04 +00:00
|
|
|
case NVMF_TGT_INIT_START_SUBSYSTEMS: {
|
|
|
|
struct spdk_nvmf_subsystem *subsystem;
|
|
|
|
|
2018-03-10 00:33:41 +00:00
|
|
|
subsystem = spdk_nvmf_subsystem_get_first(g_spdk_nvmf_tgt);
|
2017-12-19 23:39:04 +00:00
|
|
|
|
|
|
|
if (subsystem) {
|
2020-08-13 00:09:22 +00:00
|
|
|
ret = spdk_nvmf_subsystem_start(subsystem, nvmf_tgt_subsystem_started, NULL);
|
|
|
|
if (ret) {
|
|
|
|
SPDK_ERRLOG("Unable to start NVMe-oF subsystem. Stopping app.\n");
|
|
|
|
g_tgt_state = NVMF_TGT_FINI_STOP_SUBSYSTEMS;
|
|
|
|
}
|
2017-12-19 23:39:04 +00:00
|
|
|
} else {
|
2018-03-10 00:28:52 +00:00
|
|
|
g_tgt_state = NVMF_TGT_INIT_START_ACCEPTOR;
|
2017-12-19 23:39:04 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2017-11-01 23:24:18 +00:00
|
|
|
case NVMF_TGT_INIT_START_ACCEPTOR:
|
2020-04-14 06:49:46 +00:00
|
|
|
g_acceptor_poller = SPDK_POLLER_REGISTER(acceptor_poll, g_spdk_nvmf_tgt,
|
2018-06-07 23:00:26 +00:00
|
|
|
g_spdk_nvmf_tgt_conf->acceptor_poll_rate);
|
2018-03-10 00:28:52 +00:00
|
|
|
g_tgt_state = NVMF_TGT_RUNNING;
|
2017-11-01 23:24:18 +00:00
|
|
|
break;
|
|
|
|
case NVMF_TGT_RUNNING:
|
2018-03-08 20:26:44 +00:00
|
|
|
spdk_subsystem_init_next(0);
|
2017-11-01 23:24:18 +00:00
|
|
|
break;
|
2017-12-19 23:39:04 +00:00
|
|
|
case NVMF_TGT_FINI_STOP_SUBSYSTEMS: {
|
|
|
|
struct spdk_nvmf_subsystem *subsystem;
|
|
|
|
|
2018-03-10 00:33:41 +00:00
|
|
|
subsystem = spdk_nvmf_subsystem_get_first(g_spdk_nvmf_tgt);
|
2017-12-19 23:39:04 +00:00
|
|
|
|
|
|
|
if (subsystem) {
|
2020-08-13 00:09:22 +00:00
|
|
|
ret = spdk_nvmf_subsystem_stop(subsystem, nvmf_tgt_subsystem_stopped, NULL);
|
|
|
|
if (ret) {
|
|
|
|
nvmf_tgt_subsystem_stopped(subsystem, NULL, 0);
|
|
|
|
}
|
2017-12-19 23:39:04 +00:00
|
|
|
} else {
|
2018-03-10 00:28:52 +00:00
|
|
|
g_tgt_state = NVMF_TGT_FINI_DESTROY_POLL_GROUPS;
|
2017-12-19 23:39:04 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2017-11-17 18:43:45 +00:00
|
|
|
case NVMF_TGT_FINI_DESTROY_POLL_GROUPS:
|
2020-02-03 01:53:46 +00:00
|
|
|
/* Send a message to each poll group thread, and terminate the thread */
|
2020-02-03 01:10:34 +00:00
|
|
|
nvmf_tgt_destroy_poll_groups();
|
2017-11-15 17:48:32 +00:00
|
|
|
break;
|
2018-07-27 18:38:38 +00:00
|
|
|
case NVMF_TGT_FINI_STOP_ACCEPTOR:
|
|
|
|
spdk_poller_unregister(&g_acceptor_poller);
|
|
|
|
g_tgt_state = NVMF_TGT_FINI_FREE_RESOURCES;
|
|
|
|
break;
|
2017-11-01 23:24:18 +00:00
|
|
|
case NVMF_TGT_FINI_FREE_RESOURCES:
|
2018-06-05 22:34:04 +00:00
|
|
|
spdk_nvmf_tgt_destroy(g_spdk_nvmf_tgt, nvmf_tgt_destroy_done, NULL);
|
2017-11-01 23:24:18 +00:00
|
|
|
break;
|
|
|
|
case NVMF_TGT_STOPPED:
|
2018-03-08 20:26:44 +00:00
|
|
|
spdk_subsystem_fini_next();
|
2017-11-01 23:24:18 +00:00
|
|
|
return;
|
|
|
|
case NVMF_TGT_ERROR:
|
2018-03-08 20:26:44 +00:00
|
|
|
spdk_subsystem_init_next(rc);
|
2017-11-01 23:24:18 +00:00
|
|
|
return;
|
|
|
|
}
|
2016-06-06 21:44:30 +00:00
|
|
|
|
2018-03-10 00:28:52 +00:00
|
|
|
} while (g_tgt_state != prev_state);
|
2016-06-06 21:44:30 +00:00
|
|
|
}
|
|
|
|
|
2018-03-08 20:26:44 +00:00
|
|
|
static void
|
2020-05-10 19:34:39 +00:00
|
|
|
nvmf_subsystem_init(void)
|
2016-06-06 21:44:30 +00:00
|
|
|
{
|
2018-03-10 00:28:52 +00:00
|
|
|
g_tgt_state = NVMF_TGT_INIT_NONE;
|
2018-03-08 20:26:44 +00:00
|
|
|
nvmf_tgt_advance_state();
|
2016-06-06 21:44:30 +00:00
|
|
|
}
|
2018-03-08 20:26:44 +00:00
|
|
|
|
2018-05-07 18:26:13 +00:00
|
|
|
static void
|
2020-05-10 19:34:39 +00:00
|
|
|
nvmf_subsystem_write_config_json(struct spdk_json_write_ctx *w)
|
2018-05-07 18:26:13 +00:00
|
|
|
{
|
|
|
|
spdk_json_write_array_begin(w);
|
|
|
|
|
|
|
|
spdk_json_write_object_begin(w);
|
2019-09-23 10:35:43 +00:00
|
|
|
spdk_json_write_named_string(w, "method", "nvmf_set_config");
|
2018-05-07 18:26:13 +00:00
|
|
|
|
|
|
|
spdk_json_write_named_object_begin(w, "params");
|
|
|
|
spdk_json_write_named_uint32(w, "acceptor_poll_rate", g_spdk_nvmf_tgt_conf->acceptor_poll_rate);
|
2020-01-07 23:01:43 +00:00
|
|
|
spdk_json_write_named_object_begin(w, "admin_cmd_passthru");
|
|
|
|
spdk_json_write_named_bool(w, "identify_ctrlr",
|
|
|
|
g_spdk_nvmf_tgt_conf->admin_passthru.identify_ctrlr);
|
|
|
|
spdk_json_write_object_end(w);
|
2018-05-07 18:26:13 +00:00
|
|
|
spdk_json_write_object_end(w);
|
|
|
|
spdk_json_write_object_end(w);
|
|
|
|
|
|
|
|
spdk_nvmf_tgt_write_config_json(w, g_spdk_nvmf_tgt);
|
|
|
|
spdk_json_write_array_end(w);
|
|
|
|
}
|
|
|
|
|
2018-03-09 17:36:14 +00:00
|
|
|
static struct spdk_subsystem g_spdk_subsystem_nvmf = {
|
|
|
|
.name = "nvmf",
|
2020-05-10 19:34:39 +00:00
|
|
|
.init = nvmf_subsystem_init,
|
|
|
|
.fini = nvmf_subsystem_fini,
|
|
|
|
.write_config_json = nvmf_subsystem_write_config_json,
|
2018-03-09 17:36:14 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
SPDK_SUBSYSTEM_REGISTER(g_spdk_subsystem_nvmf)
|
2018-03-08 20:26:44 +00:00
|
|
|
SPDK_SUBSYSTEM_DEPEND(nvmf, bdev)
|
2020-01-28 21:47:46 +00:00
|
|
|
SPDK_SUBSYSTEM_DEPEND(nvmf, sock)
|