nvmf: Rename spdk_nvmf_session to spdk_nvmf_ctrlr

This is just a rename - the functionality hasn't changed.
Use the same terminology as the specification (which is controller)
so those familiar with the specification can more easily
approach the code base.

This is still conceptually equivalent to a "session" in the
networking sense.

Change-Id: I388b56df62d19560224c4adc2a03c71eae6fed0d
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.gerrithub.io/371746
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
Ben Walker 2017-07-13 14:18:08 -07:00 committed by Daniel Verkamp
parent 5e79d6b8c6
commit 03788f93df
23 changed files with 432 additions and 434 deletions

View File

@ -146,7 +146,7 @@ disconnect_event(void *arg1, void *arg2)
{
struct spdk_nvmf_conn *conn = arg1;
spdk_nvmf_session_disconnect(conn);
spdk_nvmf_ctrlr_disconnect(conn);
}
static void

View File

@ -56,7 +56,7 @@ int spdk_nvmf_tgt_fini(void);
int spdk_nvmf_check_pools(void);
struct spdk_nvmf_subsystem;
struct spdk_nvmf_session;
struct spdk_nvmf_ctrlr;
struct spdk_nvmf_conn;
struct spdk_nvmf_request;
struct spdk_bdev;
@ -84,7 +84,7 @@ struct spdk_nvmf_subsystem_allowed_listener {
/*
* The NVMf subsystem, as indicated in the specification, is a collection
* of virtual controller sessions. Any individual controller session has
* of controllers. Any individual controller has
* access to all the NVMe device/namespaces maintained by the subsystem.
*/
struct spdk_nvmf_subsystem {
@ -107,7 +107,7 @@ struct spdk_nvmf_subsystem {
spdk_nvmf_subsystem_connect_fn connect_cb;
spdk_nvmf_subsystem_disconnect_fn disconnect_cb;
TAILQ_HEAD(, spdk_nvmf_session) sessions;
TAILQ_HEAD(, spdk_nvmf_ctrlr) ctrlrs;
TAILQ_HEAD(, spdk_nvmf_host) hosts;
@ -172,6 +172,6 @@ void spdk_nvmf_acceptor_poll(void);
void spdk_nvmf_handle_connect(struct spdk_nvmf_request *req);
void spdk_nvmf_session_disconnect(struct spdk_nvmf_conn *conn);
void spdk_nvmf_ctrlr_disconnect(struct spdk_nvmf_conn *conn);
#endif

View File

@ -37,9 +37,9 @@ include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
CFLAGS += $(ENV_CFLAGS)
LIBNAME = nvmf
C_SRCS = subsystem.c nvmf.c \
request.c session.c transport.c \
ctrlr_discovery.c ctrlr_bdev.c
C_SRCS = ctrlr.c ctrlr_discovery.c ctrlr_bdev.c \
subsystem.c nvmf.c \
request.c transport.c
C_SRCS-$(CONFIG_RDMA) += rdma.c

View File

@ -33,7 +33,7 @@
#include "spdk/stdinc.h"
#include "session.h"
#include "ctrlr.h"
#include "nvmf_internal.h"
#include "request.h"
#include "subsystem.h"
@ -48,136 +48,136 @@
#define MIN_KEEP_ALIVE_TIMEOUT 10000
static void
nvmf_init_discovery_session_properties(struct spdk_nvmf_session *session)
nvmf_init_discovery_ctrlr_properties(struct spdk_nvmf_ctrlr *ctrlr)
{
session->vcdata.maxcmd = g_nvmf_tgt.max_queue_depth;
ctrlr->vcdata.maxcmd = g_nvmf_tgt.max_queue_depth;
/* extended data for get log page supportted */
session->vcdata.lpa.edlp = 1;
session->vcdata.cntlid = session->cntlid;
session->vcdata.nvmf_specific.ioccsz = sizeof(struct spdk_nvme_cmd) / 16;
session->vcdata.nvmf_specific.iorcsz = sizeof(struct spdk_nvme_cpl) / 16;
session->vcdata.nvmf_specific.icdoff = 0; /* offset starts directly after SQE */
session->vcdata.nvmf_specific.ctrattr.ctrlr_model = SPDK_NVMF_CTRLR_MODEL_DYNAMIC;
session->vcdata.nvmf_specific.msdbd = 1; /* target supports single SGL in capsule */
session->vcdata.sgls.keyed_sgl = 1;
session->vcdata.sgls.sgl_offset = 1;
ctrlr->vcdata.lpa.edlp = 1;
ctrlr->vcdata.cntlid = ctrlr->cntlid;
ctrlr->vcdata.nvmf_specific.ioccsz = sizeof(struct spdk_nvme_cmd) / 16;
ctrlr->vcdata.nvmf_specific.iorcsz = sizeof(struct spdk_nvme_cpl) / 16;
ctrlr->vcdata.nvmf_specific.icdoff = 0; /* offset starts directly after SQE */
ctrlr->vcdata.nvmf_specific.ctrattr.ctrlr_model = SPDK_NVMF_CTRLR_MODEL_DYNAMIC;
ctrlr->vcdata.nvmf_specific.msdbd = 1; /* target supports single SGL in capsule */
ctrlr->vcdata.sgls.keyed_sgl = 1;
ctrlr->vcdata.sgls.sgl_offset = 1;
strncpy((char *)session->vcdata.subnqn, SPDK_NVMF_DISCOVERY_NQN, sizeof(session->vcdata.subnqn));
strncpy((char *)ctrlr->vcdata.subnqn, SPDK_NVMF_DISCOVERY_NQN, sizeof(ctrlr->vcdata.subnqn));
/* Properties */
session->vcprop.cap.raw = 0;
session->vcprop.cap.bits.cqr = 1; /* NVMF specification required */
session->vcprop.cap.bits.mqes = (session->vcdata.maxcmd - 1); /* max queue depth */
session->vcprop.cap.bits.ams = 0; /* optional arb mechanisms */
session->vcprop.cap.bits.dstrd = 0; /* fixed to 0 for NVMf */
session->vcprop.cap.bits.css_nvm = 1; /* NVM command set */
session->vcprop.cap.bits.mpsmin = 0; /* 2 ^ 12 + mpsmin == 4k */
session->vcprop.cap.bits.mpsmax = 0; /* 2 ^ 12 + mpsmax == 4k */
ctrlr->vcprop.cap.raw = 0;
ctrlr->vcprop.cap.bits.cqr = 1; /* NVMe-oF specification required */
ctrlr->vcprop.cap.bits.mqes = ctrlr->vcdata.maxcmd - 1; /* max queue depth */
ctrlr->vcprop.cap.bits.ams = 0; /* optional arb mechanisms */
ctrlr->vcprop.cap.bits.dstrd = 0; /* fixed to 0 for NVMe-oF */
ctrlr->vcprop.cap.bits.css_nvm = 1; /* NVM command set */
ctrlr->vcprop.cap.bits.mpsmin = 0; /* 2 ^ (12 + mpsmin) == 4k */
ctrlr->vcprop.cap.bits.mpsmax = 0; /* 2 ^ (12 + mpsmax) == 4k */
/* Version Supported: 1.2.1 */
session->vcprop.vs.bits.mjr = 1;
session->vcprop.vs.bits.mnr = 2;
session->vcprop.vs.bits.ter = 1;
session->vcdata.ver = session->vcprop.vs;
ctrlr->vcprop.vs.bits.mjr = 1;
ctrlr->vcprop.vs.bits.mnr = 2;
ctrlr->vcprop.vs.bits.ter = 1;
ctrlr->vcdata.ver = ctrlr->vcprop.vs;
session->vcprop.cc.raw = 0;
ctrlr->vcprop.cc.raw = 0;
session->vcprop.csts.raw = 0;
session->vcprop.csts.bits.rdy = 0; /* Init controller as not ready */
ctrlr->vcprop.csts.raw = 0;
ctrlr->vcprop.csts.bits.rdy = 0; /* Init controller as not ready */
}
static void
nvmf_init_nvme_session_properties(struct spdk_nvmf_session *session)
nvmf_init_nvme_ctrlr_properties(struct spdk_nvmf_ctrlr *ctrlr)
{
assert((g_nvmf_tgt.max_io_size % 4096) == 0);
/* Init the controller details */
session->subsys->ops->ctrlr_get_data(session);
ctrlr->subsys->ops->ctrlr_get_data(ctrlr);
session->vcdata.aerl = 0;
session->vcdata.cntlid = session->cntlid;
session->vcdata.kas = 10;
session->vcdata.maxcmd = g_nvmf_tgt.max_queue_depth;
session->vcdata.mdts = spdk_u32log2(g_nvmf_tgt.max_io_size / 4096);
session->vcdata.sgls.keyed_sgl = 1;
session->vcdata.sgls.sgl_offset = 1;
ctrlr->vcdata.aerl = 0;
ctrlr->vcdata.cntlid = ctrlr->cntlid;
ctrlr->vcdata.kas = 10;
ctrlr->vcdata.maxcmd = g_nvmf_tgt.max_queue_depth;
ctrlr->vcdata.mdts = spdk_u32log2(g_nvmf_tgt.max_io_size / 4096);
ctrlr->vcdata.sgls.keyed_sgl = 1;
ctrlr->vcdata.sgls.sgl_offset = 1;
session->vcdata.nvmf_specific.ioccsz = sizeof(struct spdk_nvme_cmd) / 16;
session->vcdata.nvmf_specific.iorcsz = sizeof(struct spdk_nvme_cpl) / 16;
session->vcdata.nvmf_specific.icdoff = 0; /* offset starts directly after SQE */
session->vcdata.nvmf_specific.ctrattr.ctrlr_model = SPDK_NVMF_CTRLR_MODEL_DYNAMIC;
session->vcdata.nvmf_specific.msdbd = 1; /* target supports single SGL in capsule */
ctrlr->vcdata.nvmf_specific.ioccsz = sizeof(struct spdk_nvme_cmd) / 16;
ctrlr->vcdata.nvmf_specific.iorcsz = sizeof(struct spdk_nvme_cpl) / 16;
ctrlr->vcdata.nvmf_specific.icdoff = 0; /* offset starts directly after SQE */
ctrlr->vcdata.nvmf_specific.ctrattr.ctrlr_model = SPDK_NVMF_CTRLR_MODEL_DYNAMIC;
ctrlr->vcdata.nvmf_specific.msdbd = 1; /* target supports single SGL in capsule */
/* TODO: this should be set by the transport */
session->vcdata.nvmf_specific.ioccsz += g_nvmf_tgt.in_capsule_data_size / 16;
ctrlr->vcdata.nvmf_specific.ioccsz += g_nvmf_tgt.in_capsule_data_size / 16;
strncpy((char *)session->vcdata.subnqn, session->subsys->subnqn, sizeof(session->vcdata.subnqn));
strncpy((char *)ctrlr->vcdata.subnqn, ctrlr->subsys->subnqn, sizeof(ctrlr->vcdata.subnqn));
SPDK_TRACELOG(SPDK_TRACE_NVMF, " ctrlr data: maxcmd %x\n",
session->vcdata.maxcmd);
ctrlr->vcdata.maxcmd);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " ext ctrlr data: ioccsz %x\n",
session->vcdata.nvmf_specific.ioccsz);
ctrlr->vcdata.nvmf_specific.ioccsz);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " ext ctrlr data: iorcsz %x\n",
session->vcdata.nvmf_specific.iorcsz);
ctrlr->vcdata.nvmf_specific.iorcsz);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " ext ctrlr data: icdoff %x\n",
session->vcdata.nvmf_specific.icdoff);
ctrlr->vcdata.nvmf_specific.icdoff);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " ext ctrlr data: ctrattr %x\n",
*(uint8_t *)&session->vcdata.nvmf_specific.ctrattr);
*(uint8_t *)&ctrlr->vcdata.nvmf_specific.ctrattr);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " ext ctrlr data: msdbd %x\n",
session->vcdata.nvmf_specific.msdbd);
ctrlr->vcdata.nvmf_specific.msdbd);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " sgls data: 0x%x\n",
*(uint32_t *)&session->vcdata.sgls);
*(uint32_t *)&ctrlr->vcdata.sgls);
session->vcprop.cap.raw = 0;
session->vcprop.cap.bits.cqr = 1;
session->vcprop.cap.bits.mqes = (session->vcdata.maxcmd - 1); /* max queue depth */
session->vcprop.cap.bits.ams = 0; /* optional arb mechanisms */
session->vcprop.cap.bits.to = 1; /* ready timeout - 500 msec units */
session->vcprop.cap.bits.dstrd = 0; /* fixed to 0 for NVMf */
session->vcprop.cap.bits.css_nvm = 1; /* NVM command set */
session->vcprop.cap.bits.mpsmin = 0; /* 2 ^ 12 + mpsmin == 4k */
session->vcprop.cap.bits.mpsmax = 0; /* 2 ^ 12 + mpsmax == 4k */
ctrlr->vcprop.cap.raw = 0;
ctrlr->vcprop.cap.bits.cqr = 1;
ctrlr->vcprop.cap.bits.mqes = ctrlr->vcdata.maxcmd - 1; /* max queue depth */
ctrlr->vcprop.cap.bits.ams = 0; /* optional arb mechanisms */
ctrlr->vcprop.cap.bits.to = 1; /* ready timeout - 500 msec units */
ctrlr->vcprop.cap.bits.dstrd = 0; /* fixed to 0 for NVMe-oF */
ctrlr->vcprop.cap.bits.css_nvm = 1; /* NVM command set */
ctrlr->vcprop.cap.bits.mpsmin = 0; /* 2 ^ (12 + mpsmin) == 4k */
ctrlr->vcprop.cap.bits.mpsmax = 0; /* 2 ^ (12 + mpsmax) == 4k */
/* Report at least version 1.2.1 */
if (session->vcprop.vs.raw < SPDK_NVME_VERSION(1, 2, 1)) {
session->vcprop.vs.bits.mjr = 1;
session->vcprop.vs.bits.mnr = 2;
session->vcprop.vs.bits.ter = 1;
session->vcdata.ver = session->vcprop.vs;
if (ctrlr->vcprop.vs.raw < SPDK_NVME_VERSION(1, 2, 1)) {
ctrlr->vcprop.vs.bits.mjr = 1;
ctrlr->vcprop.vs.bits.mnr = 2;
ctrlr->vcprop.vs.bits.ter = 1;
ctrlr->vcdata.ver = ctrlr->vcprop.vs;
}
session->vcprop.cc.raw = 0;
session->vcprop.cc.bits.en = 0; /* Init controller disabled */
ctrlr->vcprop.cc.raw = 0;
ctrlr->vcprop.cc.bits.en = 0; /* Init controller disabled */
session->vcprop.csts.raw = 0;
session->vcprop.csts.bits.rdy = 0; /* Init controller as not ready */
ctrlr->vcprop.csts.raw = 0;
ctrlr->vcprop.csts.bits.rdy = 0; /* Init controller as not ready */
SPDK_TRACELOG(SPDK_TRACE_NVMF, " cap %" PRIx64 "\n",
session->vcprop.cap.raw);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " vs %x\n", session->vcprop.vs.raw);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " cc %x\n", session->vcprop.cc.raw);
ctrlr->vcprop.cap.raw);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " vs %x\n", ctrlr->vcprop.vs.raw);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " cc %x\n", ctrlr->vcprop.cc.raw);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " csts %x\n",
session->vcprop.csts.raw);
ctrlr->vcprop.csts.raw);
}
static void session_destruct(struct spdk_nvmf_session *session)
static void ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr)
{
TAILQ_REMOVE(&session->subsys->sessions, session, link);
session->transport->session_fini(session);
TAILQ_REMOVE(&ctrlr->subsys->ctrlrs, ctrlr, link);
ctrlr->transport->ctrlr_fini(ctrlr);
}
void
spdk_nvmf_session_destruct(struct spdk_nvmf_session *session)
spdk_nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr)
{
while (!TAILQ_EMPTY(&session->connections)) {
struct spdk_nvmf_conn *conn = TAILQ_FIRST(&session->connections);
while (!TAILQ_EMPTY(&ctrlr->connections)) {
struct spdk_nvmf_conn *conn = TAILQ_FIRST(&ctrlr->connections);
TAILQ_REMOVE(&session->connections, conn, link);
session->num_connections--;
TAILQ_REMOVE(&ctrlr->connections, conn, link);
ctrlr->num_connections--;
conn->transport->conn_fini(conn);
}
session_destruct(session);
ctrlr_destruct(ctrlr);
}
static void
@ -190,7 +190,7 @@ invalid_connect_response(struct spdk_nvmf_fabric_connect_rsp *rsp, uint8_t iattr
}
static uint16_t
spdk_nvmf_session_gen_cntlid(void)
spdk_nvmf_ctrlr_gen_cntlid(void)
{
static uint16_t cntlid = 0; /* cntlid is static, so its value is preserved */
struct spdk_nvmf_subsystem *subsystem;
@ -211,8 +211,8 @@ spdk_nvmf_session_gen_cntlid(void)
}
/* Check if a subsystem with this cntlid currently exists. This could
* happen for a very long-lived session on a target with many short-lived
* sessions, where cntlid wraps around.
* happen for a very long-lived ctrlr on a target with many short-lived
* ctrlrs, where cntlid wraps around.
*/
subsystem = spdk_nvmf_find_subsystem_with_cntlid(cntlid);
@ -228,12 +228,12 @@ spdk_nvmf_session_gen_cntlid(void)
}
void
spdk_nvmf_session_connect(struct spdk_nvmf_conn *conn,
struct spdk_nvmf_fabric_connect_cmd *cmd,
struct spdk_nvmf_fabric_connect_data *data,
struct spdk_nvmf_fabric_connect_rsp *rsp)
spdk_nvmf_ctrlr_connect(struct spdk_nvmf_conn *conn,
struct spdk_nvmf_fabric_connect_cmd *cmd,
struct spdk_nvmf_fabric_connect_data *data,
struct spdk_nvmf_fabric_connect_rsp *rsp)
{
struct spdk_nvmf_session *session;
struct spdk_nvmf_ctrlr *ctrlr;
struct spdk_nvmf_subsystem *subsystem;
#define INVALID_CONNECT_CMD(field) invalid_connect_response(rsp, 0, offsetof(struct spdk_nvmf_fabric_connect_cmd, field))
@ -287,131 +287,131 @@ spdk_nvmf_session_connect(struct spdk_nvmf_conn *conn,
return;
}
/* Establish a new session */
session = conn->transport->session_init();
if (session == NULL) {
/* Establish a new ctrlr */
ctrlr = conn->transport->ctrlr_init();
if (ctrlr == NULL) {
SPDK_ERRLOG("Memory allocation failure\n");
rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
return;
}
TAILQ_INIT(&session->connections);
TAILQ_INIT(&ctrlr->connections);
session->cntlid = spdk_nvmf_session_gen_cntlid();
if (session->cntlid == 0) {
ctrlr->cntlid = spdk_nvmf_ctrlr_gen_cntlid();
if (ctrlr->cntlid == 0) {
/* Unable to get a cntlid */
SPDK_ERRLOG("Reached max simultaneous sessions\n");
SPDK_ERRLOG("Reached max simultaneous ctrlrs\n");
rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
return;
}
session->kato = cmd->kato;
session->async_event_config.raw = 0;
session->num_connections = 0;
session->subsys = subsystem;
session->max_connections_allowed = g_nvmf_tgt.max_queues_per_session;
ctrlr->kato = cmd->kato;
ctrlr->async_event_config.raw = 0;
ctrlr->num_connections = 0;
ctrlr->subsys = subsystem;
ctrlr->max_connections_allowed = g_nvmf_tgt.max_queues_per_ctrlr;
memcpy(session->hostid, data->hostid, sizeof(session->hostid));
memcpy(ctrlr->hostid, data->hostid, sizeof(ctrlr->hostid));
if (conn->transport->session_add_conn(session, conn)) {
if (conn->transport->ctrlr_add_conn(ctrlr, conn)) {
rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
conn->transport->session_fini(session);
free(session);
conn->transport->ctrlr_fini(ctrlr);
free(ctrlr);
return;
}
if (subsystem->subtype == SPDK_NVMF_SUBTYPE_NVME) {
nvmf_init_nvme_session_properties(session);
nvmf_init_nvme_ctrlr_properties(ctrlr);
} else {
nvmf_init_discovery_session_properties(session);
nvmf_init_discovery_ctrlr_properties(ctrlr);
}
TAILQ_INSERT_TAIL(&subsystem->sessions, session, link);
TAILQ_INSERT_TAIL(&subsystem->ctrlrs, ctrlr, link);
} else {
struct spdk_nvmf_session *tmp;
struct spdk_nvmf_ctrlr *tmp;
conn->type = CONN_TYPE_IOQ;
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Connect I/O Queue for controller id 0x%x\n", data->cntlid);
session = NULL;
TAILQ_FOREACH(tmp, &subsystem->sessions, link) {
ctrlr = NULL;
TAILQ_FOREACH(tmp, &subsystem->ctrlrs, link) {
if (tmp->cntlid == data->cntlid) {
session = tmp;
ctrlr = tmp;
break;
}
}
if (session == NULL) {
if (ctrlr == NULL) {
SPDK_ERRLOG("Unknown controller ID 0x%x\n", data->cntlid);
INVALID_CONNECT_DATA(cntlid);
return;
}
if (!session->vcprop.cc.bits.en) {
if (!ctrlr->vcprop.cc.bits.en) {
SPDK_ERRLOG("Got I/O connect before ctrlr was enabled\n");
INVALID_CONNECT_CMD(qid);
return;
}
if (1u << session->vcprop.cc.bits.iosqes != sizeof(struct spdk_nvme_cmd)) {
if (1u << ctrlr->vcprop.cc.bits.iosqes != sizeof(struct spdk_nvme_cmd)) {
SPDK_ERRLOG("Got I/O connect with invalid IOSQES %u\n",
session->vcprop.cc.bits.iosqes);
ctrlr->vcprop.cc.bits.iosqes);
INVALID_CONNECT_CMD(qid);
return;
}
if (1u << session->vcprop.cc.bits.iocqes != sizeof(struct spdk_nvme_cpl)) {
if (1u << ctrlr->vcprop.cc.bits.iocqes != sizeof(struct spdk_nvme_cpl)) {
SPDK_ERRLOG("Got I/O connect with invalid IOCQES %u\n",
session->vcprop.cc.bits.iocqes);
ctrlr->vcprop.cc.bits.iocqes);
INVALID_CONNECT_CMD(qid);
return;
}
/* check if we would exceed session connection limit */
if (session->num_connections >= session->max_connections_allowed) {
SPDK_ERRLOG("connection limit %d\n", session->num_connections);
/* check if we would exceed ctrlr connection limit */
if (ctrlr->num_connections >= ctrlr->max_connections_allowed) {
SPDK_ERRLOG("connection limit %d\n", ctrlr->num_connections);
rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
rsp->status.sc = SPDK_NVMF_FABRIC_SC_CONTROLLER_BUSY;
return;
}
if (conn->transport->session_add_conn(session, conn)) {
if (conn->transport->ctrlr_add_conn(ctrlr, conn)) {
INVALID_CONNECT_CMD(qid);
return;
}
}
session->num_connections++;
TAILQ_INSERT_HEAD(&session->connections, conn, link);
conn->sess = session;
ctrlr->num_connections++;
TAILQ_INSERT_HEAD(&ctrlr->connections, conn, link);
conn->ctrlr = ctrlr;
rsp->status.sc = SPDK_NVME_SC_SUCCESS;
rsp->status_code_specific.success.cntlid = session->vcdata.cntlid;
rsp->status_code_specific.success.cntlid = ctrlr->vcdata.cntlid;
SPDK_TRACELOG(SPDK_TRACE_NVMF, "connect capsule response: cntlid = 0x%04x\n",
rsp->status_code_specific.success.cntlid);
}
void
spdk_nvmf_session_disconnect(struct spdk_nvmf_conn *conn)
spdk_nvmf_ctrlr_disconnect(struct spdk_nvmf_conn *conn)
{
struct spdk_nvmf_session *session = conn->sess;
struct spdk_nvmf_ctrlr *ctrlr = conn->ctrlr;
assert(session != NULL);
session->num_connections--;
TAILQ_REMOVE(&session->connections, conn, link);
assert(ctrlr != NULL);
ctrlr->num_connections--;
TAILQ_REMOVE(&ctrlr->connections, conn, link);
conn->transport->session_remove_conn(session, conn);
conn->transport->ctrlr_remove_conn(ctrlr, conn);
conn->transport->conn_fini(conn);
if (session->num_connections == 0) {
session_destruct(session);
if (ctrlr->num_connections == 0) {
ctrlr_destruct(ctrlr);
}
}
struct spdk_nvmf_conn *
spdk_nvmf_session_get_conn(struct spdk_nvmf_session *session, uint16_t qid)
spdk_nvmf_ctrlr_get_conn(struct spdk_nvmf_ctrlr *ctrlr, uint16_t qid)
{
struct spdk_nvmf_conn *conn;
TAILQ_FOREACH(conn, &session->connections, link) {
TAILQ_FOREACH(conn, &ctrlr->connections, link) {
if (conn->qid == qid) {
return conn;
}
@ -427,44 +427,44 @@ spdk_nvmf_conn_get_request(struct spdk_nvmf_conn *conn, uint16_t cid)
}
static uint64_t
nvmf_prop_get_cap(struct spdk_nvmf_session *session)
nvmf_prop_get_cap(struct spdk_nvmf_ctrlr *ctrlr)
{
return session->vcprop.cap.raw;
return ctrlr->vcprop.cap.raw;
}
static uint64_t
nvmf_prop_get_vs(struct spdk_nvmf_session *session)
nvmf_prop_get_vs(struct spdk_nvmf_ctrlr *ctrlr)
{
return session->vcprop.vs.raw;
return ctrlr->vcprop.vs.raw;
}
static uint64_t
nvmf_prop_get_cc(struct spdk_nvmf_session *session)
nvmf_prop_get_cc(struct spdk_nvmf_ctrlr *ctrlr)
{
return session->vcprop.cc.raw;
return ctrlr->vcprop.cc.raw;
}
static bool
nvmf_prop_set_cc(struct spdk_nvmf_session *session, uint64_t value)
nvmf_prop_set_cc(struct spdk_nvmf_ctrlr *ctrlr, uint64_t value)
{
union spdk_nvme_cc_register cc, diff;
cc.raw = (uint32_t)value;
SPDK_TRACELOG(SPDK_TRACE_NVMF, "cur CC: 0x%08x\n", session->vcprop.cc.raw);
SPDK_TRACELOG(SPDK_TRACE_NVMF, "cur CC: 0x%08x\n", ctrlr->vcprop.cc.raw);
SPDK_TRACELOG(SPDK_TRACE_NVMF, "new CC: 0x%08x\n", cc.raw);
/*
* Calculate which bits changed between the current and new CC.
* Mark each bit as 0 once it is handled to determine if any unhandled bits were changed.
*/
diff.raw = cc.raw ^ session->vcprop.cc.raw;
diff.raw = cc.raw ^ ctrlr->vcprop.cc.raw;
if (diff.bits.en) {
if (cc.bits.en) {
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Property Set CC Enable!\n");
session->vcprop.cc.bits.en = 1;
session->vcprop.csts.bits.rdy = 1;
ctrlr->vcprop.cc.bits.en = 1;
ctrlr->vcprop.csts.bits.rdy = 1;
} else {
SPDK_ERRLOG("CC.EN transition from 1 to 0 (reset) not implemented!\n");
@ -477,12 +477,12 @@ nvmf_prop_set_cc(struct spdk_nvmf_session *session, uint64_t value)
cc.bits.shn == SPDK_NVME_SHN_ABRUPT) {
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Property Set CC Shutdown %u%ub!\n",
cc.bits.shn >> 1, cc.bits.shn & 1);
session->vcprop.cc.bits.shn = cc.bits.shn;
session->vcprop.cc.bits.en = 0;
session->vcprop.csts.bits.rdy = 0;
session->vcprop.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
ctrlr->vcprop.cc.bits.shn = cc.bits.shn;
ctrlr->vcprop.cc.bits.en = 0;
ctrlr->vcprop.csts.bits.rdy = 0;
ctrlr->vcprop.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
} else if (cc.bits.shn == 0) {
session->vcprop.cc.bits.shn = 0;
ctrlr->vcprop.cc.bits.shn = 0;
} else {
SPDK_ERRLOG("Prop Set CC: Invalid SHN value %u%ub\n",
cc.bits.shn >> 1, cc.bits.shn & 1);
@ -494,14 +494,14 @@ nvmf_prop_set_cc(struct spdk_nvmf_session *session, uint64_t value)
if (diff.bits.iosqes) {
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Prop Set IOSQES = %u (%u bytes)\n",
cc.bits.iosqes, 1u << cc.bits.iosqes);
session->vcprop.cc.bits.iosqes = cc.bits.iosqes;
ctrlr->vcprop.cc.bits.iosqes = cc.bits.iosqes;
diff.bits.iosqes = 0;
}
if (diff.bits.iocqes) {
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Prop Set IOCQES = %u (%u bytes)\n",
cc.bits.iocqes, 1u << cc.bits.iocqes);
session->vcprop.cc.bits.iocqes = cc.bits.iocqes;
ctrlr->vcprop.cc.bits.iocqes = cc.bits.iocqes;
diff.bits.iocqes = 0;
}
@ -514,17 +514,17 @@ nvmf_prop_set_cc(struct spdk_nvmf_session *session, uint64_t value)
}
static uint64_t
nvmf_prop_get_csts(struct spdk_nvmf_session *session)
nvmf_prop_get_csts(struct spdk_nvmf_ctrlr *ctrlr)
{
return session->vcprop.csts.raw;
return ctrlr->vcprop.csts.raw;
}
struct nvmf_prop {
uint32_t ofst;
uint8_t size;
char name[11];
uint64_t (*get_cb)(struct spdk_nvmf_session *session);
bool (*set_cb)(struct spdk_nvmf_session *session, uint64_t value);
uint64_t (*get_cb)(struct spdk_nvmf_ctrlr *ctrlr);
bool (*set_cb)(struct spdk_nvmf_ctrlr *ctrlr, uint64_t value);
};
#define PROP(field, size, get_cb, set_cb) \
@ -559,7 +559,7 @@ find_prop(uint32_t ofst)
}
void
spdk_nvmf_property_get(struct spdk_nvmf_session *session,
spdk_nvmf_property_get(struct spdk_nvmf_ctrlr *ctrlr,
struct spdk_nvmf_fabric_prop_get_cmd *cmd,
struct spdk_nvmf_fabric_prop_get_rsp *response)
{
@ -592,12 +592,12 @@ spdk_nvmf_property_get(struct spdk_nvmf_session *session,
return;
}
response->value.u64 = prop->get_cb(session);
response->value.u64 = prop->get_cb(ctrlr);
SPDK_TRACELOG(SPDK_TRACE_NVMF, "response value: 0x%" PRIx64 "\n", response->value.u64);
}
void
spdk_nvmf_property_set(struct spdk_nvmf_session *session,
spdk_nvmf_property_set(struct spdk_nvmf_ctrlr *ctrlr,
struct spdk_nvmf_fabric_prop_set_cmd *cmd,
struct spdk_nvme_cpl *response)
{
@ -627,7 +627,7 @@ spdk_nvmf_property_set(struct spdk_nvmf_session *session,
value = (uint32_t)value;
}
if (!prop->set_cb(session, value)) {
if (!prop->set_cb(ctrlr, value)) {
SPDK_ERRLOG("prop set_cb failed\n");
response->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_PARAM;
return;
@ -635,27 +635,27 @@ spdk_nvmf_property_set(struct spdk_nvmf_session *session,
}
int
spdk_nvmf_session_poll(struct spdk_nvmf_session *session)
spdk_nvmf_ctrlr_poll(struct spdk_nvmf_ctrlr *ctrlr)
{
struct spdk_nvmf_conn *conn, *tmp;
struct spdk_nvmf_subsystem *subsys = session->subsys;
struct spdk_nvmf_subsystem *subsys = ctrlr->subsys;
if (subsys->is_removed) {
if (session->aer_req) {
struct spdk_nvmf_request *aer = session->aer_req;
if (ctrlr->aer_req) {
struct spdk_nvmf_request *aer = ctrlr->aer_req;
aer->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
aer->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
aer->rsp->nvme_cpl.status.dnr = 0;
spdk_nvmf_request_complete(aer);
session->aer_req = NULL;
ctrlr->aer_req = NULL;
}
}
TAILQ_FOREACH_SAFE(conn, &session->connections, link, tmp) {
TAILQ_FOREACH_SAFE(conn, &ctrlr->connections, link, tmp) {
if (conn->transport->conn_poll(conn) < 0) {
SPDK_ERRLOG("Transport poll failed for conn %p; closing connection\n", conn);
spdk_nvmf_session_disconnect(conn);
spdk_nvmf_ctrlr_disconnect(conn);
}
}
@ -663,7 +663,7 @@ spdk_nvmf_session_poll(struct spdk_nvmf_session *session)
}
int
spdk_nvmf_session_set_features_host_identifier(struct spdk_nvmf_request *req)
spdk_nvmf_ctrlr_set_features_host_identifier(struct spdk_nvmf_request *req)
{
struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
@ -673,9 +673,9 @@ spdk_nvmf_session_set_features_host_identifier(struct spdk_nvmf_request *req)
}
int
spdk_nvmf_session_get_features_host_identifier(struct spdk_nvmf_request *req)
spdk_nvmf_ctrlr_get_features_host_identifier(struct spdk_nvmf_request *req)
{
struct spdk_nvmf_session *session = req->conn->sess;
struct spdk_nvmf_ctrlr *ctrlr = req->conn->ctrlr;
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
@ -687,20 +687,20 @@ spdk_nvmf_session_get_features_host_identifier(struct spdk_nvmf_request *req)
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
if (req->data == NULL || req->length < sizeof(session->hostid)) {
if (req->data == NULL || req->length < sizeof(ctrlr->hostid)) {
SPDK_ERRLOG("Invalid data buffer for Get Features - Host Identifier\n");
response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
memcpy(req->data, session->hostid, sizeof(session->hostid));
memcpy(req->data, ctrlr->hostid, sizeof(ctrlr->hostid));
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
int
spdk_nvmf_session_set_features_keep_alive_timer(struct spdk_nvmf_request *req)
spdk_nvmf_ctrlr_set_features_keep_alive_timer(struct spdk_nvmf_request *req)
{
struct spdk_nvmf_session *session = req->conn->sess;
struct spdk_nvmf_ctrlr *ctrlr = req->conn->ctrlr;
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
@ -709,31 +709,31 @@ spdk_nvmf_session_set_features_keep_alive_timer(struct spdk_nvmf_request *req)
if (cmd->cdw11 == 0) {
rsp->status.sc = SPDK_NVME_SC_KEEP_ALIVE_INVALID;
} else if (cmd->cdw11 < MIN_KEEP_ALIVE_TIMEOUT) {
session->kato = MIN_KEEP_ALIVE_TIMEOUT;
ctrlr->kato = MIN_KEEP_ALIVE_TIMEOUT;
} else {
session->kato = cmd->cdw11;
ctrlr->kato = cmd->cdw11;
}
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Set Features - Keep Alive Timer set to %u ms\n", session->kato);
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Set Features - Keep Alive Timer set to %u ms\n", ctrlr->kato);
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
int
spdk_nvmf_session_get_features_keep_alive_timer(struct spdk_nvmf_request *req)
spdk_nvmf_ctrlr_get_features_keep_alive_timer(struct spdk_nvmf_request *req)
{
struct spdk_nvmf_session *session = req->conn->sess;
struct spdk_nvmf_ctrlr *ctrlr = req->conn->ctrlr;
struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Get Features - Keep Alive Timer\n");
rsp->cdw0 = session->kato;
rsp->cdw0 = ctrlr->kato;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
int
spdk_nvmf_session_set_features_number_of_queues(struct spdk_nvmf_request *req)
spdk_nvmf_ctrlr_set_features_number_of_queues(struct spdk_nvmf_request *req)
{
struct spdk_nvmf_session *session = req->conn->sess;
struct spdk_nvmf_ctrlr *ctrlr = req->conn->ctrlr;
struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
uint32_t nr_io_queues;
@ -741,10 +741,10 @@ spdk_nvmf_session_set_features_number_of_queues(struct spdk_nvmf_request *req)
req->cmd->nvme_cmd.cdw11);
/* Extra 1 connection for Admin queue */
nr_io_queues = session->max_connections_allowed - 1;
nr_io_queues = ctrlr->max_connections_allowed - 1;
/* verify that the contoller is ready to process commands */
if (session->num_connections > 1) {
if (ctrlr->num_connections > 1) {
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Queue pairs already active!\n");
rsp->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
} else {
@ -757,15 +757,15 @@ spdk_nvmf_session_set_features_number_of_queues(struct spdk_nvmf_request *req)
}
int
spdk_nvmf_session_get_features_number_of_queues(struct spdk_nvmf_request *req)
spdk_nvmf_ctrlr_get_features_number_of_queues(struct spdk_nvmf_request *req)
{
struct spdk_nvmf_session *session = req->conn->sess;
struct spdk_nvmf_ctrlr *ctrlr = req->conn->ctrlr;
struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
uint32_t nr_io_queues;
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Get Features - Number of Queues\n");
nr_io_queues = session->max_connections_allowed - 1;
nr_io_queues = ctrlr->max_connections_allowed - 1;
/* Number of IO queues has a zero based value */
rsp->cdw0 = ((nr_io_queues - 1) << 16) |
@ -775,44 +775,44 @@ spdk_nvmf_session_get_features_number_of_queues(struct spdk_nvmf_request *req)
}
int
spdk_nvmf_session_set_features_async_event_configuration(struct spdk_nvmf_request *req)
spdk_nvmf_ctrlr_set_features_async_event_configuration(struct spdk_nvmf_request *req)
{
struct spdk_nvmf_session *session = req->conn->sess;
struct spdk_nvmf_ctrlr *ctrlr = req->conn->ctrlr;
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Set Features - Async Event Configuration, cdw11 0x%08x\n",
cmd->cdw11);
session->async_event_config.raw = cmd->cdw11;
ctrlr->async_event_config.raw = cmd->cdw11;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
int
spdk_nvmf_session_get_features_async_event_configuration(struct spdk_nvmf_request *req)
spdk_nvmf_ctrlr_get_features_async_event_configuration(struct spdk_nvmf_request *req)
{
struct spdk_nvmf_session *session = req->conn->sess;
struct spdk_nvmf_ctrlr *ctrlr = req->conn->ctrlr;
struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Get Features - Async Event Configuration\n");
rsp->cdw0 = session->async_event_config.raw;
rsp->cdw0 = ctrlr->async_event_config.raw;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
int
spdk_nvmf_session_async_event_request(struct spdk_nvmf_request *req)
spdk_nvmf_ctrlr_async_event_request(struct spdk_nvmf_request *req)
{
struct spdk_nvmf_session *session = req->conn->sess;
struct spdk_nvmf_ctrlr *ctrlr = req->conn->ctrlr;
struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Async Event Request\n");
assert(session->vcdata.aerl + 1 == 1);
if (session->aer_req != NULL) {
assert(ctrlr->vcdata.aerl + 1 == 1);
if (ctrlr->aer_req != NULL) {
SPDK_TRACELOG(SPDK_TRACE_NVMF, "AERL exceeded\n");
rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
rsp->status.sc = SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
session->aer_req = req;
ctrlr->aer_req = req;
return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
}

View File

@ -31,8 +31,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef NVMF_SESSION_H
#define NVMF_SESSION_H
#ifndef SPDK_NVMF_CTRLR_H
#define SPDK_NVMF_CTRLR_H
#include "spdk/stdinc.h"
@ -40,7 +40,7 @@
#include "spdk/queue.h"
/* define a virtual controller limit to the number of QPs supported */
#define MAX_SESSION_IO_QUEUES 64
#define MAX_QPAIRS_PER_CTRLR 64
struct spdk_nvmf_transport;
struct spdk_nvmf_request;
@ -52,7 +52,7 @@ enum conn_type {
struct spdk_nvmf_conn {
const struct spdk_nvmf_transport *transport;
struct spdk_nvmf_session *sess;
struct spdk_nvmf_ctrlr *ctrlr;
enum conn_type type;
uint16_t qid;
@ -63,11 +63,10 @@ struct spdk_nvmf_conn {
};
/*
* This structure maintains the NVMf virtual controller session
* state. Each NVMf session permits some number of connections.
* At least one admin connection and additional IOQ connections.
* This structure represents an NVMe-oF controller,
* which is like a "session" in networking terms.
*/
struct spdk_nvmf_session {
struct spdk_nvmf_ctrlr {
uint16_t cntlid;
struct spdk_nvmf_subsystem *subsys;
@ -95,44 +94,44 @@ struct spdk_nvmf_session {
uint8_t hostid[16];
const struct spdk_nvmf_transport *transport;
TAILQ_ENTRY(spdk_nvmf_session) link;
TAILQ_ENTRY(spdk_nvmf_ctrlr) link;
};
void spdk_nvmf_session_connect(struct spdk_nvmf_conn *conn,
struct spdk_nvmf_fabric_connect_cmd *cmd,
struct spdk_nvmf_fabric_connect_data *data,
struct spdk_nvmf_fabric_connect_rsp *rsp);
void spdk_nvmf_ctrlr_connect(struct spdk_nvmf_conn *conn,
struct spdk_nvmf_fabric_connect_cmd *cmd,
struct spdk_nvmf_fabric_connect_data *data,
struct spdk_nvmf_fabric_connect_rsp *rsp);
struct spdk_nvmf_conn *spdk_nvmf_session_get_conn(struct spdk_nvmf_session *session, uint16_t qid);
struct spdk_nvmf_conn *spdk_nvmf_ctrlr_get_conn(struct spdk_nvmf_ctrlr *ctrlr, uint16_t qid);
struct spdk_nvmf_request *spdk_nvmf_conn_get_request(struct spdk_nvmf_conn *conn, uint16_t cid);
void
spdk_nvmf_property_get(struct spdk_nvmf_session *session,
spdk_nvmf_property_get(struct spdk_nvmf_ctrlr *ctrlr,
struct spdk_nvmf_fabric_prop_get_cmd *cmd,
struct spdk_nvmf_fabric_prop_get_rsp *response);
void
spdk_nvmf_property_set(struct spdk_nvmf_session *session,
spdk_nvmf_property_set(struct spdk_nvmf_ctrlr *ctrlr,
struct spdk_nvmf_fabric_prop_set_cmd *cmd,
struct spdk_nvme_cpl *rsp);
int spdk_nvmf_session_poll(struct spdk_nvmf_session *session);
int spdk_nvmf_ctrlr_poll(struct spdk_nvmf_ctrlr *ctrlr);
void spdk_nvmf_session_destruct(struct spdk_nvmf_session *session);
void spdk_nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr);
int spdk_nvmf_session_set_features_host_identifier(struct spdk_nvmf_request *req);
int spdk_nvmf_session_get_features_host_identifier(struct spdk_nvmf_request *req);
int spdk_nvmf_ctrlr_set_features_host_identifier(struct spdk_nvmf_request *req);
int spdk_nvmf_ctrlr_get_features_host_identifier(struct spdk_nvmf_request *req);
int spdk_nvmf_session_set_features_keep_alive_timer(struct spdk_nvmf_request *req);
int spdk_nvmf_session_get_features_keep_alive_timer(struct spdk_nvmf_request *req);
int spdk_nvmf_ctrlr_set_features_keep_alive_timer(struct spdk_nvmf_request *req);
int spdk_nvmf_ctrlr_get_features_keep_alive_timer(struct spdk_nvmf_request *req);
int spdk_nvmf_session_set_features_number_of_queues(struct spdk_nvmf_request *req);
int spdk_nvmf_session_get_features_number_of_queues(struct spdk_nvmf_request *req);
int spdk_nvmf_ctrlr_set_features_number_of_queues(struct spdk_nvmf_request *req);
int spdk_nvmf_ctrlr_get_features_number_of_queues(struct spdk_nvmf_request *req);
int spdk_nvmf_session_set_features_async_event_configuration(struct spdk_nvmf_request *req);
int spdk_nvmf_session_get_features_async_event_configuration(struct spdk_nvmf_request *req);
int spdk_nvmf_ctrlr_set_features_async_event_configuration(struct spdk_nvmf_request *req);
int spdk_nvmf_ctrlr_get_features_async_event_configuration(struct spdk_nvmf_request *req);
int spdk_nvmf_session_async_event_request(struct spdk_nvmf_request *req);
int spdk_nvmf_ctrlr_async_event_request(struct spdk_nvmf_request *req);
#endif

View File

@ -34,7 +34,7 @@
#include "spdk/stdinc.h"
#include "subsystem.h"
#include "session.h"
#include "ctrlr.h"
#include "request.h"
#include "spdk/bdev.h"
@ -61,12 +61,12 @@ struct __attribute__((packed)) nvme_read_cdw12 {
uint8_t lr : 1; /* limited retry */
};
static void nvmf_bdev_set_dsm(struct spdk_nvmf_session *session)
static void nvmf_bdev_set_dsm(struct spdk_nvmf_ctrlr *ctrlr)
{
uint32_t i;
for (i = 0; i < session->subsys->dev.max_nsid; i++) {
struct spdk_bdev *bdev = session->subsys->dev.ns_list[i];
for (i = 0; i < ctrlr->subsys->dev.max_nsid; i++) {
struct spdk_bdev *bdev = ctrlr->subsys->dev.ns_list[i];
if (bdev == NULL) {
continue;
@ -81,40 +81,40 @@ static void nvmf_bdev_set_dsm(struct spdk_nvmf_session *session)
}
SPDK_TRACELOG(SPDK_TRACE_NVMF, "All devices in Subsystem %s support unmap - enabling DSM\n",
spdk_nvmf_subsystem_get_nqn(session->subsys));
session->vcdata.oncs.dsm = 1;
spdk_nvmf_subsystem_get_nqn(ctrlr->subsys));
ctrlr->vcdata.oncs.dsm = 1;
}
static void
nvmf_bdev_ctrlr_get_data(struct spdk_nvmf_session *session)
nvmf_bdev_ctrlr_get_data(struct spdk_nvmf_ctrlr *ctrlr)
{
struct spdk_nvmf_subsystem *subsys = session->subsys;
struct spdk_nvmf_subsystem *subsys = ctrlr->subsys;
memset(&session->vcdata, 0, sizeof(struct spdk_nvme_ctrlr_data));
spdk_strcpy_pad(session->vcdata.fr, FW_VERSION, sizeof(session->vcdata.fr), ' ');
spdk_strcpy_pad(session->vcdata.mn, MODEL_NUMBER, sizeof(session->vcdata.mn), ' ');
spdk_strcpy_pad(session->vcdata.sn, spdk_nvmf_subsystem_get_sn(subsys),
sizeof(session->vcdata.sn), ' ');
session->vcdata.rab = 6;
session->vcdata.ver.bits.mjr = 1;
session->vcdata.ver.bits.mnr = 2;
session->vcdata.ver.bits.ter = 1;
session->vcdata.ctratt.host_id_exhid_supported = 1;
session->vcdata.aerl = 0;
session->vcdata.frmw.slot1_ro = 1;
session->vcdata.frmw.num_slots = 1;
session->vcdata.lpa.edlp = 1;
session->vcdata.elpe = 127;
session->vcdata.sqes.min = 0x06;
session->vcdata.sqes.max = 0x06;
session->vcdata.cqes.min = 0x04;
session->vcdata.cqes.max = 0x04;
session->vcdata.maxcmd = 1024;
session->vcdata.nn = subsys->dev.max_nsid;
session->vcdata.vwc.present = 1;
session->vcdata.sgls.supported = 1;
strncpy(session->vcdata.subnqn, session->subsys->subnqn, sizeof(session->vcdata.subnqn));
nvmf_bdev_set_dsm(session);
memset(&ctrlr->vcdata, 0, sizeof(struct spdk_nvme_ctrlr_data));
spdk_strcpy_pad(ctrlr->vcdata.fr, FW_VERSION, sizeof(ctrlr->vcdata.fr), ' ');
spdk_strcpy_pad(ctrlr->vcdata.mn, MODEL_NUMBER, sizeof(ctrlr->vcdata.mn), ' ');
spdk_strcpy_pad(ctrlr->vcdata.sn, spdk_nvmf_subsystem_get_sn(subsys),
sizeof(ctrlr->vcdata.sn), ' ');
ctrlr->vcdata.rab = 6;
ctrlr->vcdata.ver.bits.mjr = 1;
ctrlr->vcdata.ver.bits.mnr = 2;
ctrlr->vcdata.ver.bits.ter = 1;
ctrlr->vcdata.ctratt.host_id_exhid_supported = 1;
ctrlr->vcdata.aerl = 0;
ctrlr->vcdata.frmw.slot1_ro = 1;
ctrlr->vcdata.frmw.num_slots = 1;
ctrlr->vcdata.lpa.edlp = 1;
ctrlr->vcdata.elpe = 127;
ctrlr->vcdata.sqes.min = 0x06;
ctrlr->vcdata.sqes.max = 0x06;
ctrlr->vcdata.cqes.min = 0x04;
ctrlr->vcdata.cqes.max = 0x04;
ctrlr->vcdata.maxcmd = 1024;
ctrlr->vcdata.nn = subsys->dev.max_nsid;
ctrlr->vcdata.vwc.present = 1;
ctrlr->vcdata.sgls.supported = 1;
strncpy(ctrlr->vcdata.subnqn, ctrlr->subsys->subnqn, sizeof(ctrlr->vcdata.subnqn));
nvmf_bdev_set_dsm(ctrlr);
}
static void
@ -211,9 +211,9 @@ identify_ns(struct spdk_nvmf_subsystem *subsystem,
}
static int
identify_ctrlr(struct spdk_nvmf_session *session, struct spdk_nvme_ctrlr_data *cdata)
identify_ctrlr(struct spdk_nvmf_ctrlr *ctrlr, struct spdk_nvme_ctrlr_data *cdata)
{
*cdata = session->vcdata;
*cdata = ctrlr->vcdata;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
@ -253,10 +253,10 @@ static int
nvmf_bdev_ctrlr_identify(struct spdk_nvmf_request *req)
{
uint8_t cns;
struct spdk_nvmf_session *session = req->conn->sess;
struct spdk_nvmf_ctrlr *ctrlr = req->conn->ctrlr;
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
struct spdk_nvmf_subsystem *subsystem = session->subsys;
struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys;
if (req->data == NULL || req->length < 4096) {
SPDK_ERRLOG("identify command with invalid buffer\n");
@ -271,7 +271,7 @@ nvmf_bdev_ctrlr_identify(struct spdk_nvmf_request *req)
case SPDK_NVME_IDENTIFY_NS:
return identify_ns(subsystem, cmd, rsp, req->data);
case SPDK_NVME_IDENTIFY_CTRLR:
return identify_ctrlr(session, req->data);
return identify_ctrlr(ctrlr, req->data);
case SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST:
return identify_active_ns_list(subsystem, cmd, rsp, req->data);
default:
@ -284,7 +284,7 @@ nvmf_bdev_ctrlr_identify(struct spdk_nvmf_request *req)
static int
nvmf_bdev_ctrlr_abort(struct spdk_nvmf_request *req)
{
struct spdk_nvmf_session *session = req->conn->sess;
struct spdk_nvmf_ctrlr *ctrlr = req->conn->ctrlr;
struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
uint32_t cdw10 = cmd->cdw10;
@ -297,7 +297,7 @@ nvmf_bdev_ctrlr_abort(struct spdk_nvmf_request *req)
rsp->cdw0 = 1; /* Command not aborted */
conn = spdk_nvmf_session_get_conn(session, sqid);
conn = spdk_nvmf_ctrlr_get_conn(ctrlr, sqid);
if (conn == NULL) {
SPDK_TRACELOG(SPDK_TRACE_NVMF, "sqid %u not found\n", sqid);
rsp->status.sct = SPDK_NVME_SCT_GENERIC;
@ -306,7 +306,7 @@ nvmf_bdev_ctrlr_abort(struct spdk_nvmf_request *req)
}
/*
* NOTE: This relies on the assumption that all connections for a session will be handled
* NOTE: This relies on the assumption that all connections for a ctrlr will be handled
* on the same thread. If this assumption becomes untrue, this will need to pass a message
* to the thread handling conn, and the abort will need to be asynchronous.
*/
@ -319,8 +319,8 @@ nvmf_bdev_ctrlr_abort(struct spdk_nvmf_request *req)
}
if (spdk_nvmf_request_abort(req_to_abort) == 0) {
SPDK_TRACELOG(SPDK_TRACE_NVMF, "abort session=%p req=%p sqid=%u cid=%u successful\n",
session, req_to_abort, sqid, cid);
SPDK_TRACELOG(SPDK_TRACE_NVMF, "abort ctrlr=%p req=%p sqid=%u cid=%u successful\n",
ctrlr, req_to_abort, sqid, cid);
rsp->cdw0 = 0; /* Command successfully aborted */
}
rsp->status.sct = SPDK_NVME_SCT_GENERIC;
@ -338,16 +338,16 @@ nvmf_bdev_ctrlr_get_features(struct spdk_nvmf_request *req)
feature = cmd->cdw10 & 0xff; /* mask out the FID value */
switch (feature) {
case SPDK_NVME_FEAT_NUMBER_OF_QUEUES:
return spdk_nvmf_session_get_features_number_of_queues(req);
return spdk_nvmf_ctrlr_get_features_number_of_queues(req);
case SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE:
response->cdw0 = 1;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
case SPDK_NVME_FEAT_KEEP_ALIVE_TIMER:
return spdk_nvmf_session_get_features_keep_alive_timer(req);
return spdk_nvmf_ctrlr_get_features_keep_alive_timer(req);
case SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION:
return spdk_nvmf_session_get_features_async_event_configuration(req);
return spdk_nvmf_ctrlr_get_features_async_event_configuration(req);
case SPDK_NVME_FEAT_HOST_IDENTIFIER:
return spdk_nvmf_session_get_features_host_identifier(req);
return spdk_nvmf_ctrlr_get_features_host_identifier(req);
default:
SPDK_ERRLOG("Get Features command with unsupported feature ID 0x%02x\n", feature);
response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
@ -365,13 +365,13 @@ nvmf_bdev_ctrlr_set_features(struct spdk_nvmf_request *req)
feature = cmd->cdw10 & 0xff; /* mask out the FID value */
switch (feature) {
case SPDK_NVME_FEAT_NUMBER_OF_QUEUES:
return spdk_nvmf_session_set_features_number_of_queues(req);
return spdk_nvmf_ctrlr_set_features_number_of_queues(req);
case SPDK_NVME_FEAT_KEEP_ALIVE_TIMER:
return spdk_nvmf_session_set_features_keep_alive_timer(req);
return spdk_nvmf_ctrlr_set_features_keep_alive_timer(req);
case SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION:
return spdk_nvmf_session_set_features_async_event_configuration(req);
return spdk_nvmf_ctrlr_set_features_async_event_configuration(req);
case SPDK_NVME_FEAT_HOST_IDENTIFIER:
return spdk_nvmf_session_set_features_host_identifier(req);
return spdk_nvmf_ctrlr_set_features_host_identifier(req);
default:
SPDK_ERRLOG("Set Features command with unsupported feature ID 0x%02x\n", feature);
response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
@ -400,18 +400,17 @@ nvmf_bdev_ctrlr_process_admin_cmd(struct spdk_nvmf_request *req)
case SPDK_NVME_OPC_SET_FEATURES:
return nvmf_bdev_ctrlr_set_features(req);
case SPDK_NVME_OPC_ASYNC_EVENT_REQUEST:
return spdk_nvmf_session_async_event_request(req);
return spdk_nvmf_ctrlr_async_event_request(req);
case SPDK_NVME_OPC_KEEP_ALIVE:
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Keep Alive\n");
/*
* To handle keep alive just clear or reset the
* session based keep alive duration counter.
* ctrlr based keep alive duration counter.
* When added, a separate timer based process
* will monitor if the time since last recorded
* keep alive has exceeded the max duration and
* take appropriate action.
*/
//session->keep_alive_timestamp = ;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
case SPDK_NVME_OPC_CREATE_IO_SQ:
@ -610,7 +609,7 @@ nvmf_bdev_ctrlr_process_io_cmd(struct spdk_nvmf_request *req)
struct spdk_bdev *bdev;
struct spdk_bdev_desc *desc;
struct spdk_io_channel *ch;
struct spdk_nvmf_subsystem *subsystem = req->conn->sess->subsys;
struct spdk_nvmf_subsystem *subsystem = req->conn->ctrlr->subsys;
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;

View File

@ -38,7 +38,7 @@
#include "spdk/stdinc.h"
#include "nvmf_internal.h"
#include "session.h"
#include "ctrlr.h"
#include "subsystem.h"
#include "request.h"
#include "transport.h"
@ -155,7 +155,7 @@ nvmf_get_log_page_len(struct spdk_nvme_cmd *cmd)
static int
nvmf_discovery_ctrlr_process_admin_cmd(struct spdk_nvmf_request *req)
{
struct spdk_nvmf_session *session = req->conn->sess;
struct spdk_nvmf_ctrlr *ctrlr = req->conn->ctrlr;
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
uint64_t log_page_offset;
@ -175,7 +175,7 @@ nvmf_discovery_ctrlr_process_admin_cmd(struct spdk_nvmf_request *req)
/* Only identify controller can be supported */
if ((cmd->cdw10 & 0xFF) == SPDK_NVME_IDENTIFY_CTRLR) {
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Identify Controller\n");
memcpy(req->data, (char *)&session->vcdata, sizeof(struct spdk_nvme_ctrlr_data));
memcpy(req->data, (char *)&ctrlr->vcdata, sizeof(struct spdk_nvme_ctrlr_data));
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
} else {
SPDK_ERRLOG("Unsupported identify command\n");
@ -225,7 +225,7 @@ nvmf_discovery_ctrlr_process_io_cmd(struct spdk_nvmf_request *req)
}
static void
nvmf_discovery_ctrlr_get_data(struct spdk_nvmf_session *session)
nvmf_discovery_ctrlr_get_data(struct spdk_nvmf_ctrlr *ctrlr)
{
}

View File

@ -49,12 +49,12 @@ SPDK_LOG_REGISTER_TRACE_FLAG("nvmf", SPDK_TRACE_NVMF)
struct spdk_nvmf_tgt g_nvmf_tgt;
int
spdk_nvmf_tgt_init(uint16_t max_queue_depth, uint16_t max_queues_per_sess,
spdk_nvmf_tgt_init(uint16_t max_queue_depth, uint16_t max_queues_per_ctrlr,
uint32_t in_capsule_data_size, uint32_t max_io_size)
{
int rc;
g_nvmf_tgt.max_queues_per_session = max_queues_per_sess;
g_nvmf_tgt.max_queues_per_ctrlr = max_queues_per_ctrlr;
g_nvmf_tgt.max_queue_depth = max_queue_depth;
g_nvmf_tgt.in_capsule_data_size = in_capsule_data_size;
g_nvmf_tgt.max_io_size = max_io_size;
@ -65,7 +65,7 @@ spdk_nvmf_tgt_init(uint16_t max_queue_depth, uint16_t max_queues_per_sess,
TAILQ_INIT(&g_nvmf_tgt.subsystems);
TAILQ_INIT(&g_nvmf_tgt.listen_addrs);
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Max Queues Per Session: %d\n", max_queues_per_sess);
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Max Queues Per Controller: %d\n", max_queues_per_ctrlr);
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Max Queue Depth: %d\n", max_queue_depth);
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Max In Capsule Data: %d bytes\n", in_capsule_data_size);
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Max I/O Size: %d bytes\n", max_io_size);

View File

@ -42,7 +42,7 @@
#include "spdk/queue.h"
#include "spdk/util.h"
#define SPDK_NVMF_DEFAULT_NUM_SESSIONS_PER_LCORE 1
#define SPDK_NVMF_DEFAULT_NUM_CTRLRS_PER_LCORE 1
struct spdk_nvmf_ctrlr_ops {
/**
@ -53,7 +53,7 @@ struct spdk_nvmf_ctrlr_ops {
/**
* Get NVMe identify controller data.
*/
void (*ctrlr_get_data)(struct spdk_nvmf_session *session);
void (*ctrlr_get_data)(struct spdk_nvmf_ctrlr *ctrlr);
/**
* Process admin command.
@ -78,7 +78,7 @@ struct spdk_nvmf_ctrlr_ops {
struct spdk_nvmf_tgt {
uint16_t max_queue_depth;
uint16_t max_queues_per_session;
uint16_t max_queues_per_ctrlr;
uint32_t in_capsule_data_size;
uint32_t max_io_size;
uint64_t discovery_genctr;

View File

@ -39,7 +39,7 @@
#include "nvmf_internal.h"
#include "request.h"
#include "session.h"
#include "ctrlr.h"
#include "subsystem.h"
#include "transport.h"
@ -166,8 +166,8 @@ struct spdk_nvmf_rdma_conn {
/* List of RDMA connections that have not yet received a CONNECT capsule */
static TAILQ_HEAD(, spdk_nvmf_rdma_conn) g_pending_conns = TAILQ_HEAD_INITIALIZER(g_pending_conns);
struct spdk_nvmf_rdma_session {
struct spdk_nvmf_session session;
struct spdk_nvmf_rdma_ctrlr {
struct spdk_nvmf_ctrlr ctrlr;
SLIST_HEAD(, spdk_nvmf_rdma_buf) data_buf_pool;
@ -217,11 +217,11 @@ get_rdma_req(struct spdk_nvmf_request *req)
req));
}
static inline struct spdk_nvmf_rdma_session *
get_rdma_sess(struct spdk_nvmf_session *sess)
static inline struct spdk_nvmf_rdma_ctrlr *
get_rdma_ctrlr(struct spdk_nvmf_ctrlr *ctrlr)
{
return (struct spdk_nvmf_rdma_session *)((uintptr_t)sess - offsetof(struct spdk_nvmf_rdma_session,
session));
return (struct spdk_nvmf_rdma_ctrlr *)((uintptr_t)ctrlr - offsetof(struct spdk_nvmf_rdma_ctrlr,
ctrlr));
}
static void
@ -659,7 +659,7 @@ static int
nvmf_rdma_disconnect(struct rdma_cm_event *evt)
{
struct spdk_nvmf_conn *conn;
struct spdk_nvmf_session *session;
struct spdk_nvmf_ctrlr *ctrlr;
struct spdk_nvmf_subsystem *subsystem;
struct spdk_nvmf_rdma_conn *rdma_conn;
@ -678,16 +678,16 @@ nvmf_rdma_disconnect(struct rdma_cm_event *evt)
rdma_conn = get_rdma_conn(conn);
session = conn->sess;
if (session == NULL) {
/* No session has been established yet. That means the conn
ctrlr = conn->ctrlr;
if (ctrlr == NULL) {
/* No ctrlr has been established yet. That means the conn
* must be in the pending connections list. Remove it. */
TAILQ_REMOVE(&g_pending_conns, rdma_conn, link);
spdk_nvmf_rdma_conn_destroy(rdma_conn);
return 0;
}
subsystem = session->subsys;
subsystem = ctrlr->subsys;
subsystem->disconnect_cb(subsystem->cb_ctx, conn);
@ -728,7 +728,7 @@ spdk_nvmf_request_prep_data(struct spdk_nvmf_request *req)
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
struct spdk_nvmf_rdma_request *rdma_req = get_rdma_req(req);
struct spdk_nvmf_rdma_session *rdma_sess;
struct spdk_nvmf_rdma_ctrlr *rdma_ctrlr;
struct spdk_nvme_sgl_descriptor *sgl;
req->length = 0;
@ -779,9 +779,9 @@ spdk_nvmf_request_prep_data(struct spdk_nvmf_request *req)
rdma_req->data.wr.wr.rdma.rkey = sgl->keyed.key;
rdma_req->data.wr.wr.rdma.remote_addr = sgl->address;
rdma_sess = get_rdma_sess(req->conn->sess);
if (!rdma_sess) {
/* The only time a connection won't have a session
rdma_ctrlr = get_rdma_ctrlr(req->conn->ctrlr);
if (!rdma_ctrlr) {
/* The only time a connection won't have a ctrlr
* is when this is the CONNECT request.
*/
assert(cmd->opc == SPDK_NVME_OPC_FABRIC);
@ -794,8 +794,8 @@ spdk_nvmf_request_prep_data(struct spdk_nvmf_request *req)
rdma_req->data.sgl[0].lkey = get_rdma_conn(req->conn)->bufs_mr->lkey;
rdma_req->data_from_pool = false;
} else {
req->data = SLIST_FIRST(&rdma_sess->data_buf_pool);
rdma_req->data.sgl[0].lkey = rdma_sess->buf_mr->lkey;
req->data = SLIST_FIRST(&rdma_ctrlr->data_buf_pool);
rdma_req->data.sgl[0].lkey = rdma_ctrlr->buf_mr->lkey;
rdma_req->data_from_pool = true;
if (!req->data) {
/* No available buffers. Queue this request up. */
@ -806,7 +806,7 @@ spdk_nvmf_request_prep_data(struct spdk_nvmf_request *req)
}
SPDK_TRACELOG(SPDK_TRACE_RDMA, "Request %p took buffer from central pool\n", req);
SLIST_REMOVE_HEAD(&rdma_sess->data_buf_pool, link);
SLIST_REMOVE_HEAD(&rdma_ctrlr->data_buf_pool, link);
}
rdma_req->data.sgl[0].addr = (uintptr_t)req->data;
@ -860,21 +860,21 @@ static int
spdk_nvmf_rdma_handle_pending_rdma_rw(struct spdk_nvmf_conn *conn)
{
struct spdk_nvmf_rdma_conn *rdma_conn = get_rdma_conn(conn);
struct spdk_nvmf_rdma_session *rdma_sess;
struct spdk_nvmf_rdma_ctrlr *rdma_ctrlr;
struct spdk_nvmf_rdma_request *rdma_req, *tmp;
int rc;
int count = 0;
/* First, try to assign free data buffers to requests that need one */
if (conn->sess) {
rdma_sess = get_rdma_sess(conn->sess);
if (conn->ctrlr) {
rdma_ctrlr = get_rdma_ctrlr(conn->ctrlr);
TAILQ_FOREACH_SAFE(rdma_req, &rdma_conn->pending_data_buf_queue, link, tmp) {
assert(rdma_req->req.data == NULL);
rdma_req->req.data = SLIST_FIRST(&rdma_sess->data_buf_pool);
rdma_req->req.data = SLIST_FIRST(&rdma_ctrlr->data_buf_pool);
if (!rdma_req->req.data) {
break;
}
SLIST_REMOVE_HEAD(&rdma_sess->data_buf_pool, link);
SLIST_REMOVE_HEAD(&rdma_ctrlr->data_buf_pool, link);
rdma_req->data.sgl[0].addr = (uintptr_t)rdma_req->req.data;
TAILQ_REMOVE(&rdma_conn->pending_data_buf_queue, rdma_req, link);
if (rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
@ -1191,65 +1191,65 @@ spdk_nvmf_rdma_discover(struct spdk_nvmf_listen_addr *listen_addr,
entry->tsas.rdma.rdma_cms = SPDK_NVMF_RDMA_CMS_RDMA_CM;
}
static struct spdk_nvmf_session *
spdk_nvmf_rdma_session_init(void)
static struct spdk_nvmf_ctrlr *
spdk_nvmf_rdma_ctrlr_init(void)
{
struct spdk_nvmf_rdma_session *rdma_sess;
struct spdk_nvmf_rdma_ctrlr *rdma_ctrlr;
int i;
struct spdk_nvmf_rdma_buf *buf;
rdma_sess = calloc(1, sizeof(*rdma_sess));
if (!rdma_sess) {
rdma_ctrlr = calloc(1, sizeof(*rdma_ctrlr));
if (!rdma_ctrlr) {
return NULL;
}
/* TODO: Make the number of elements in this pool configurable. For now, one full queue
* worth seems reasonable.
*/
rdma_sess->buf = spdk_dma_zmalloc(g_rdma.max_queue_depth * g_rdma.max_io_size,
0x20000, NULL);
if (!rdma_sess->buf) {
rdma_ctrlr->buf = spdk_dma_zmalloc(g_rdma.max_queue_depth * g_rdma.max_io_size,
0x20000, NULL);
if (!rdma_ctrlr->buf) {
SPDK_ERRLOG("Large buffer pool allocation failed (%d x %d)\n",
g_rdma.max_queue_depth, g_rdma.max_io_size);
free(rdma_sess);
free(rdma_ctrlr);
return NULL;
}
SLIST_INIT(&rdma_sess->data_buf_pool);
SLIST_INIT(&rdma_ctrlr->data_buf_pool);
for (i = 0; i < g_rdma.max_queue_depth; i++) {
buf = (struct spdk_nvmf_rdma_buf *)(rdma_sess->buf + (i * g_rdma.max_io_size));
SLIST_INSERT_HEAD(&rdma_sess->data_buf_pool, buf, link);
buf = (struct spdk_nvmf_rdma_buf *)(rdma_ctrlr->buf + (i * g_rdma.max_io_size));
SLIST_INSERT_HEAD(&rdma_ctrlr->data_buf_pool, buf, link);
}
rdma_sess->session.transport = &spdk_nvmf_transport_rdma;
rdma_ctrlr->ctrlr.transport = &spdk_nvmf_transport_rdma;
return &rdma_sess->session;
return &rdma_ctrlr->ctrlr;
}
static void
spdk_nvmf_rdma_session_fini(struct spdk_nvmf_session *session)
spdk_nvmf_rdma_ctrlr_fini(struct spdk_nvmf_ctrlr *ctrlr)
{
struct spdk_nvmf_rdma_session *rdma_sess = get_rdma_sess(session);
struct spdk_nvmf_rdma_ctrlr *rdma_ctrlr = get_rdma_ctrlr(ctrlr);
if (!rdma_sess) {
if (!rdma_ctrlr) {
return;
}
ibv_dereg_mr(rdma_sess->buf_mr);
spdk_dma_free(rdma_sess->buf);
free(rdma_sess);
ibv_dereg_mr(rdma_ctrlr->buf_mr);
spdk_dma_free(rdma_ctrlr->buf);
free(rdma_ctrlr);
}
static int
spdk_nvmf_rdma_session_add_conn(struct spdk_nvmf_session *session,
struct spdk_nvmf_conn *conn)
spdk_nvmf_rdma_ctrlr_add_conn(struct spdk_nvmf_ctrlr *ctrlr,
struct spdk_nvmf_conn *conn)
{
struct spdk_nvmf_rdma_session *rdma_sess = get_rdma_sess(session);
struct spdk_nvmf_rdma_ctrlr *rdma_ctrlr = get_rdma_ctrlr(ctrlr);
struct spdk_nvmf_rdma_conn *rdma_conn = get_rdma_conn(conn);
if (rdma_sess->verbs != NULL) {
if (rdma_sess->verbs != rdma_conn->cm_id->verbs) {
SPDK_ERRLOG("Two connections belonging to the same session cannot connect using different RDMA devices.\n");
if (rdma_ctrlr->verbs != NULL) {
if (rdma_ctrlr->verbs != rdma_conn->cm_id->verbs) {
SPDK_ERRLOG("Two connections belonging to the same ctrlr cannot connect using different RDMA devices.\n");
return -1;
}
@ -1257,28 +1257,28 @@ spdk_nvmf_rdma_session_add_conn(struct spdk_nvmf_session *session,
return 0;
}
rdma_sess->verbs = rdma_conn->cm_id->verbs;
rdma_sess->buf_mr = ibv_reg_mr(rdma_conn->cm_id->pd, rdma_sess->buf,
g_rdma.max_queue_depth * g_rdma.max_io_size,
IBV_ACCESS_LOCAL_WRITE |
IBV_ACCESS_REMOTE_WRITE);
if (!rdma_sess->buf_mr) {
rdma_ctrlr->verbs = rdma_conn->cm_id->verbs;
rdma_ctrlr->buf_mr = ibv_reg_mr(rdma_conn->cm_id->pd, rdma_ctrlr->buf,
g_rdma.max_queue_depth * g_rdma.max_io_size,
IBV_ACCESS_LOCAL_WRITE |
IBV_ACCESS_REMOTE_WRITE);
if (!rdma_ctrlr->buf_mr) {
SPDK_ERRLOG("Large buffer pool registration failed (%d x %d)\n",
g_rdma.max_queue_depth, g_rdma.max_io_size);
spdk_dma_free(rdma_sess->buf);
free(rdma_sess);
spdk_dma_free(rdma_ctrlr->buf);
free(rdma_ctrlr);
return -1;
}
SPDK_TRACELOG(SPDK_TRACE_RDMA, "Session Shared Data Pool: %p Length: %x LKey: %x\n",
rdma_sess->buf, g_rdma.max_queue_depth * g_rdma.max_io_size, rdma_sess->buf_mr->lkey);
SPDK_TRACELOG(SPDK_TRACE_RDMA, "Controller session Shared Data Pool: %p Length: %x LKey: %x\n",
rdma_ctrlr->buf, g_rdma.max_queue_depth * g_rdma.max_io_size, rdma_ctrlr->buf_mr->lkey);
return 0;
}
static int
spdk_nvmf_rdma_session_remove_conn(struct spdk_nvmf_session *session,
struct spdk_nvmf_conn *conn)
spdk_nvmf_rdma_ctrlr_remove_conn(struct spdk_nvmf_ctrlr *ctrlr,
struct spdk_nvmf_conn *conn)
{
return 0;
}
@ -1304,15 +1304,15 @@ request_release_buffer(struct spdk_nvmf_request *req)
{
struct spdk_nvmf_rdma_request *rdma_req = get_rdma_req(req);
struct spdk_nvmf_conn *conn = req->conn;
struct spdk_nvmf_rdma_session *rdma_sess;
struct spdk_nvmf_rdma_ctrlr *rdma_ctrlr;
struct spdk_nvmf_rdma_buf *buf;
if (rdma_req->data_from_pool) {
/* Put the buffer back in the pool */
rdma_sess = get_rdma_sess(conn->sess);
rdma_ctrlr = get_rdma_ctrlr(conn->ctrlr);
buf = req->data;
SLIST_INSERT_HEAD(&rdma_sess->data_buf_pool, buf, link);
SLIST_INSERT_HEAD(&rdma_ctrlr->data_buf_pool, buf, link);
req->data = NULL;
req->length = 0;
rdma_req->data_from_pool = false;
@ -1585,10 +1585,10 @@ const struct spdk_nvmf_transport spdk_nvmf_transport_rdma = {
.listen_addr_remove = spdk_nvmf_rdma_listen_remove,
.listen_addr_discover = spdk_nvmf_rdma_discover,
.session_init = spdk_nvmf_rdma_session_init,
.session_fini = spdk_nvmf_rdma_session_fini,
.session_add_conn = spdk_nvmf_rdma_session_add_conn,
.session_remove_conn = spdk_nvmf_rdma_session_remove_conn,
.ctrlr_init = spdk_nvmf_rdma_ctrlr_init,
.ctrlr_fini = spdk_nvmf_rdma_ctrlr_fini,
.ctrlr_add_conn = spdk_nvmf_rdma_ctrlr_add_conn,
.ctrlr_remove_conn = spdk_nvmf_rdma_ctrlr_remove_conn,
.req_complete = spdk_nvmf_rdma_request_complete,

View File

@ -35,7 +35,7 @@
#include "nvmf_internal.h"
#include "request.h"
#include "session.h"
#include "ctrlr.h"
#include "subsystem.h"
#include "transport.h"
@ -76,7 +76,7 @@ nvmf_process_property_get(struct spdk_nvmf_request *req)
cmd = &req->cmd->prop_get_cmd;
response = &req->rsp->prop_get_rsp;
spdk_nvmf_property_get(req->conn->sess, cmd, response);
spdk_nvmf_property_get(req->conn->ctrlr, cmd, response);
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
@ -88,7 +88,7 @@ nvmf_process_property_set(struct spdk_nvmf_request *req)
cmd = &req->cmd->prop_set_cmd;
spdk_nvmf_property_set(req->conn->sess, cmd, &req->rsp->nvme_cpl);
spdk_nvmf_property_set(req->conn->ctrlr, cmd, &req->rsp->nvme_cpl);
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
@ -102,7 +102,7 @@ spdk_nvmf_handle_connect(struct spdk_nvmf_request *req)
struct spdk_nvmf_fabric_connect_rsp *response = &req->rsp->connect_rsp;
struct spdk_nvmf_conn *conn = req->conn;
spdk_nvmf_session_connect(conn, connect, connect_data, response);
spdk_nvmf_ctrlr_connect(conn, connect, connect_data, response);
SPDK_TRACELOG(SPDK_TRACE_NVMF, "connect capsule response: cntlid = 0x%04x\n",
response->status_code_specific.success.cntlid);
@ -187,8 +187,8 @@ nvmf_process_fabrics_command(struct spdk_nvmf_request *req)
cap_hdr = &req->cmd->nvmf_cmd;
if (conn->sess == NULL) {
/* No session established yet; the only valid command is Connect */
if (conn->ctrlr == NULL) {
/* No ctrlr established yet; the only valid command is Connect */
if (cap_hdr->fctype == SPDK_NVMF_FABRIC_COMMAND_CONNECT) {
return nvmf_process_connect(req);
} else {
@ -199,7 +199,7 @@ nvmf_process_fabrics_command(struct spdk_nvmf_request *req)
}
} else if (conn->type == CONN_TYPE_AQ) {
/*
* Session is established, and this is an admin queue.
* Controller session is established, and this is an admin queue.
* Disallow Connect and allow other fabrics commands.
*/
switch (cap_hdr->fctype) {
@ -214,7 +214,7 @@ nvmf_process_fabrics_command(struct spdk_nvmf_request *req)
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
} else {
/* Session is established, and this is an I/O queue */
/* Controller session is established, and this is an I/O queue */
/* For now, no I/O-specific Fabrics commands are implemented (other than Connect) */
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Unexpected I/O fctype 0x%x\n", cap_hdr->fctype);
req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE;
@ -269,7 +269,7 @@ nvmf_trace_command(union nvmf_h2c_msg *h2c_msg, enum conn_type conn_type)
int
spdk_nvmf_request_exec(struct spdk_nvmf_request *req)
{
struct spdk_nvmf_session *session = req->conn->sess;
struct spdk_nvmf_ctrlr *ctrlr = req->conn->ctrlr;
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
spdk_nvmf_request_exec_status status;
@ -278,7 +278,7 @@ spdk_nvmf_request_exec(struct spdk_nvmf_request *req)
if (cmd->opc == SPDK_NVME_OPC_FABRIC) {
status = nvmf_process_fabrics_command(req);
} else if (session == NULL || !session->vcprop.cc.bits.en) {
} else if (ctrlr == NULL || !ctrlr->vcprop.cc.bits.en) {
/* Only Fabric commands are allowed when the controller is disabled */
SPDK_ERRLOG("Non-Fabric command sent to disabled controller\n");
rsp->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
@ -286,7 +286,7 @@ spdk_nvmf_request_exec(struct spdk_nvmf_request *req)
} else {
struct spdk_nvmf_subsystem *subsystem;
subsystem = session->subsys;
subsystem = ctrlr->subsys;
assert(subsystem != NULL);
if (subsystem->is_removed) {

View File

@ -34,7 +34,7 @@
#include "spdk/stdinc.h"
#include "nvmf_internal.h"
#include "session.h"
#include "ctrlr.h"
#include "subsystem.h"
#include "transport.h"
@ -67,11 +67,11 @@ struct spdk_nvmf_subsystem *
spdk_nvmf_find_subsystem_with_cntlid(uint16_t cntlid)
{
struct spdk_nvmf_subsystem *subsystem;
struct spdk_nvmf_session *session;
struct spdk_nvmf_ctrlr *ctrlr;
TAILQ_FOREACH(subsystem, &g_nvmf_tgt.subsystems, entries) {
TAILQ_FOREACH(session, &subsystem->sessions, link) {
if (session->cntlid == cntlid) {
TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) {
if (ctrlr->cntlid == cntlid) {
return subsystem;
}
}
@ -112,12 +112,12 @@ spdk_nvmf_subsystem_start(struct spdk_nvmf_subsystem *subsystem)
static bool
nvmf_subsystem_removable(struct spdk_nvmf_subsystem *subsystem)
{
struct spdk_nvmf_session *session;
struct spdk_nvmf_ctrlr *ctrlr;
struct spdk_nvmf_conn *conn;
if (subsystem->is_removed) {
TAILQ_FOREACH(session, &subsystem->sessions, link) {
TAILQ_FOREACH(conn, &session->connections, link) {
TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) {
TAILQ_FOREACH(conn, &ctrlr->connections, link) {
if (!conn->transport->conn_is_idle(conn)) {
return false;
}
@ -131,16 +131,16 @@ nvmf_subsystem_removable(struct spdk_nvmf_subsystem *subsystem)
void
spdk_nvmf_subsystem_poll(struct spdk_nvmf_subsystem *subsystem)
{
struct spdk_nvmf_session *session;
struct spdk_nvmf_ctrlr *ctrlr;
/* Check the backing physical device for completions. */
if (subsystem->ops->poll_for_completions) {
subsystem->ops->poll_for_completions(subsystem);
}
TAILQ_FOREACH(session, &subsystem->sessions, link) {
/* For each connection in the session, check for completions */
spdk_nvmf_session_poll(session);
TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) {
/* For each connection in the ctrlr, check for completions */
spdk_nvmf_ctrlr_poll(ctrlr);
}
if (nvmf_subsystem_removable(subsystem)) {
@ -204,7 +204,7 @@ spdk_nvmf_create_subsystem(const char *nqn,
snprintf(subsystem->subnqn, sizeof(subsystem->subnqn), "%s", nqn);
TAILQ_INIT(&subsystem->allowed_listeners);
TAILQ_INIT(&subsystem->hosts);
TAILQ_INIT(&subsystem->sessions);
TAILQ_INIT(&subsystem->ctrlrs);
if (type == SPDK_NVMF_SUBTYPE_DISCOVERY) {
subsystem->ops = &spdk_nvmf_discovery_ctrlr_ops;
@ -223,7 +223,7 @@ spdk_nvmf_delete_subsystem(struct spdk_nvmf_subsystem *subsystem)
{
struct spdk_nvmf_subsystem_allowed_listener *allowed_listener, *allowed_listener_tmp;
struct spdk_nvmf_host *host, *host_tmp;
struct spdk_nvmf_session *session, *session_tmp;
struct spdk_nvmf_ctrlr *ctrlr, *ctrlr_tmp;
if (!subsystem) {
return;
@ -244,8 +244,8 @@ spdk_nvmf_delete_subsystem(struct spdk_nvmf_subsystem *subsystem)
free(host);
}
TAILQ_FOREACH_SAFE(session, &subsystem->sessions, link, session_tmp) {
spdk_nvmf_session_destruct(session);
TAILQ_FOREACH_SAFE(ctrlr, &subsystem->ctrlrs, link, ctrlr_tmp) {
spdk_nvmf_ctrlr_destruct(ctrlr);
}
if (subsystem->ops->detach) {

View File

@ -82,24 +82,24 @@ struct spdk_nvmf_transport {
struct spdk_nvmf_discovery_log_page_entry *entry);
/**
* Create a new session
* Create a new ctrlr
*/
struct spdk_nvmf_session *(*session_init)(void);
struct spdk_nvmf_ctrlr *(*ctrlr_init)(void);
/**
* Destroy a session
* Destroy a ctrlr
*/
void (*session_fini)(struct spdk_nvmf_session *session);
void (*ctrlr_fini)(struct spdk_nvmf_ctrlr *ctrlr);
/**
* Add a connection to a session
* Add a connection to a ctrlr
*/
int (*session_add_conn)(struct spdk_nvmf_session *session, struct spdk_nvmf_conn *conn);
int (*ctrlr_add_conn)(struct spdk_nvmf_ctrlr *ctrlr, struct spdk_nvmf_conn *conn);
/**
* Remove a connection from a session
* Remove a connection from a ctrlr
*/
int (*session_remove_conn)(struct spdk_nvmf_session *session, struct spdk_nvmf_conn *conn);
int (*ctrlr_remove_conn)(struct spdk_nvmf_ctrlr *ctrlr, struct spdk_nvmf_conn *conn);
/*
* Signal request completion, which sends a response

View File

@ -34,7 +34,7 @@
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
DIRS-y = request.c session.c subsystem.c ctrlr_discovery.c ctrlr_bdev.c
DIRS-y = request.c ctrlr.c subsystem.c ctrlr_discovery.c ctrlr_bdev.c
.PHONY: all clean $(DIRS-y)

1
test/unit/lib/nvmf/ctrlr.c/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
ctrlr_ut

View File

@ -33,6 +33,6 @@
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
TEST_FILE = session_ut.c
TEST_FILE = ctrlr_ut.c
include $(SPDK_ROOT_DIR)/mk/nvmf.unittest.mk

View File

@ -35,7 +35,7 @@
#include "spdk_cunit.h"
#include "session.c"
#include "ctrlr.c"
SPDK_LOG_REGISTER_TRACE_FLAG("nvmf", SPDK_TRACE_NVMF)

View File

@ -41,7 +41,7 @@
SPDK_LOG_REGISTER_TRACE_FLAG("nvmf", SPDK_TRACE_NVMF)
struct spdk_nvmf_conn *
spdk_nvmf_session_get_conn(struct spdk_nvmf_session *session, uint16_t qid)
spdk_nvmf_ctrlr_get_conn(struct spdk_nvmf_ctrlr *ctrlr, uint16_t qid)
{
return NULL;
}
@ -53,53 +53,53 @@ spdk_nvmf_conn_get_request(struct spdk_nvmf_conn *conn, uint16_t cid)
}
int
spdk_nvmf_session_get_features_number_of_queues(struct spdk_nvmf_request *req)
spdk_nvmf_ctrlr_get_features_number_of_queues(struct spdk_nvmf_request *req)
{
return -1;
}
int spdk_nvmf_session_set_features_number_of_queues(struct spdk_nvmf_request *req)
int spdk_nvmf_ctrlr_set_features_number_of_queues(struct spdk_nvmf_request *req)
{
return -1;
}
int
spdk_nvmf_session_set_features_host_identifier(struct spdk_nvmf_request *req)
spdk_nvmf_ctrlr_set_features_host_identifier(struct spdk_nvmf_request *req)
{
return -1;
}
int
spdk_nvmf_session_get_features_host_identifier(struct spdk_nvmf_request *req)
spdk_nvmf_ctrlr_get_features_host_identifier(struct spdk_nvmf_request *req)
{
return -1;
}
int
spdk_nvmf_session_set_features_keep_alive_timer(struct spdk_nvmf_request *req)
spdk_nvmf_ctrlr_set_features_keep_alive_timer(struct spdk_nvmf_request *req)
{
return -1;
}
int
spdk_nvmf_session_get_features_keep_alive_timer(struct spdk_nvmf_request *req)
spdk_nvmf_ctrlr_get_features_keep_alive_timer(struct spdk_nvmf_request *req)
{
return -1;
}
int
spdk_nvmf_session_set_features_async_event_configuration(struct spdk_nvmf_request *req)
spdk_nvmf_ctrlr_set_features_async_event_configuration(struct spdk_nvmf_request *req)
{
return -1;
}
int
spdk_nvmf_session_get_features_async_event_configuration(struct spdk_nvmf_request *req)
spdk_nvmf_ctrlr_get_features_async_event_configuration(struct spdk_nvmf_request *req)
{
return -1;
}
int
spdk_nvmf_session_async_event_request(struct spdk_nvmf_request *req)
spdk_nvmf_ctrlr_async_event_request(struct spdk_nvmf_request *req)
{
return -1;
}

View File

@ -132,12 +132,12 @@ spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
}
void
spdk_nvmf_session_destruct(struct spdk_nvmf_session *session)
spdk_nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr)
{
}
int
spdk_nvmf_session_poll(struct spdk_nvmf_session *session)
spdk_nvmf_ctrlr_poll(struct spdk_nvmf_ctrlr *ctrlr)
{
return -1;
}
@ -150,7 +150,7 @@ test_process_discovery_cmd(void)
/* random request length value for testing */
int req_length = 122;
struct spdk_nvmf_conn req_conn = {};
struct spdk_nvmf_session req_sess = {};
struct spdk_nvmf_ctrlr req_ctrlr = {};
struct spdk_nvme_ctrlr_data req_data = {};
struct spdk_nvmf_discovery_log_page req_page = {};
union nvmf_h2c_msg req_cmd = {};
@ -168,7 +168,7 @@ test_process_discovery_cmd(void)
/* IDENTIFY opcode return value check */
req.cmd->nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY;
req.cmd->nvme_cmd.cdw10 = SPDK_NVME_IDENTIFY_CTRLR;
req.conn->sess = &req_sess;
req.conn->ctrlr = &req_ctrlr;
req.data = &req_data;
ret = nvmf_discovery_ctrlr_process_admin_cmd(&req);
CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_SUCCESS);

View File

@ -45,10 +45,10 @@ void spdk_trace_record(uint16_t tpoint_id, uint16_t poller_id, uint32_t size,
}
void
spdk_nvmf_session_connect(struct spdk_nvmf_conn *conn,
struct spdk_nvmf_fabric_connect_cmd *cmd,
struct spdk_nvmf_fabric_connect_data *data,
struct spdk_nvmf_fabric_connect_rsp *rsp)
spdk_nvmf_ctrlr_connect(struct spdk_nvmf_conn *conn,
struct spdk_nvmf_fabric_connect_cmd *cmd,
struct spdk_nvmf_fabric_connect_data *data,
struct spdk_nvmf_fabric_connect_rsp *rsp)
{
}
@ -97,19 +97,19 @@ struct spdk_nvme_ns *spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint3
}
void
spdk_nvmf_session_disconnect(struct spdk_nvmf_conn *conn)
spdk_nvmf_ctrlr_disconnect(struct spdk_nvmf_conn *conn)
{
}
void
spdk_nvmf_property_get(struct spdk_nvmf_session *session,
spdk_nvmf_property_get(struct spdk_nvmf_ctrlr *ctrlr,
struct spdk_nvmf_fabric_prop_get_cmd *cmd,
struct spdk_nvmf_fabric_prop_get_rsp *response)
{
}
void
spdk_nvmf_property_set(struct spdk_nvmf_session *session,
spdk_nvmf_property_set(struct spdk_nvmf_ctrlr *ctrlr,
struct spdk_nvmf_fabric_prop_set_cmd *cmd,
struct spdk_nvme_cpl *rsp)
{
@ -145,9 +145,9 @@ test_nvmf_process_fabrics_cmd(void)
req.conn = &req_conn;
req.cmd = &req_cmd;
req.rsp = &req_rsp;
req.conn->sess = NULL;
req.conn->ctrlr = NULL;
/* No session and invalid command check */
/* No ctrlr and invalid command check */
req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET;
ret = nvmf_process_fabrics_command(&req);
CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR);

View File

@ -1 +0,0 @@
session_ut

View File

@ -137,12 +137,12 @@ spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
}
void
spdk_nvmf_session_destruct(struct spdk_nvmf_session *session)
spdk_nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr)
{
}
int
spdk_nvmf_session_poll(struct spdk_nvmf_session *session)
spdk_nvmf_ctrlr_poll(struct spdk_nvmf_ctrlr *ctrlr)
{
return -1;
}

View File

@ -69,10 +69,10 @@ $valgrind test/unit/lib/jsonrpc/jsonrpc_server.c/jsonrpc_server_ut
$valgrind test/unit/lib/log/log.c/log_ut
$valgrind test/unit/lib/nvmf/ctrlr.c/ctrlr_ut
$valgrind test/unit/lib/nvmf/ctrlr_bdev.c/ctrlr_bdev_ut
$valgrind test/unit/lib/nvmf/ctrlr_discovery.c/ctrlr_discovery_ut
$valgrind test/unit/lib/nvmf/request.c/request_ut
$valgrind test/unit/lib/nvmf/session.c/session_ut
$valgrind test/unit/lib/nvmf/subsystem.c/subsystem_ut
$valgrind test/unit/lib/scsi/dev.c/dev_ut