nvmf: Rename spdk_nvmf_conn to spdk_nvmf_qpair

Match the terminology used in the NVMe-oF specification,
which is queue pair. For the RDMA transport, this maps to
an RDMA queue pair, but may map to other things for other
transports. It still is logically a "connection" from
a networking sense.

Change-Id: Ic43a5398e63ac85c93a8e0417e4b0d2905bf2dfc
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.gerrithub.io/371747
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
Ben Walker 2017-07-13 14:30:28 -07:00 committed by Daniel Verkamp
parent 03788f93df
commit 1d304bc5d8
16 changed files with 330 additions and 329 deletions

View File

@ -144,19 +144,19 @@ connect_cb(void *cb_ctx, struct spdk_nvmf_request *req)
static void static void
disconnect_event(void *arg1, void *arg2) disconnect_event(void *arg1, void *arg2)
{ {
struct spdk_nvmf_conn *conn = arg1; struct spdk_nvmf_qpair *qpair = arg1;
spdk_nvmf_ctrlr_disconnect(conn); spdk_nvmf_ctrlr_disconnect(qpair);
} }
static void static void
disconnect_cb(void *cb_ctx, struct spdk_nvmf_conn *conn) disconnect_cb(void *cb_ctx, struct spdk_nvmf_qpair *qpair)
{ {
struct nvmf_tgt_subsystem *app_subsys = cb_ctx; struct nvmf_tgt_subsystem *app_subsys = cb_ctx;
struct spdk_event *event; struct spdk_event *event;
/* Pass an event to the core that owns this connection */ /* Pass an event to the core that owns this connection */
event = spdk_event_allocate(app_subsys->lcore, disconnect_event, conn, NULL); event = spdk_event_allocate(app_subsys->lcore, disconnect_event, qpair, NULL);
spdk_event_call(event); spdk_event_call(event);
} }

View File

@ -48,7 +48,7 @@
#define MAX_VIRTUAL_NAMESPACE 16 #define MAX_VIRTUAL_NAMESPACE 16
#define MAX_SN_LEN 20 #define MAX_SN_LEN 20
int spdk_nvmf_tgt_init(uint16_t max_queue_depth, uint16_t max_conn_per_sess, int spdk_nvmf_tgt_init(uint16_t max_queue_depth, uint16_t max_qpair_per_ctrlr,
uint32_t in_capsule_data_size, uint32_t max_io_size); uint32_t in_capsule_data_size, uint32_t max_io_size);
int spdk_nvmf_tgt_fini(void); int spdk_nvmf_tgt_fini(void);
@ -57,15 +57,14 @@ int spdk_nvmf_check_pools(void);
struct spdk_nvmf_subsystem; struct spdk_nvmf_subsystem;
struct spdk_nvmf_ctrlr; struct spdk_nvmf_ctrlr;
struct spdk_nvmf_conn; struct spdk_nvmf_qpair;
struct spdk_nvmf_request; struct spdk_nvmf_request;
struct spdk_bdev; struct spdk_bdev;
struct spdk_nvmf_request; struct spdk_nvmf_request;
struct spdk_nvmf_conn;
struct spdk_nvmf_ctrlr_ops; struct spdk_nvmf_ctrlr_ops;
typedef void (*spdk_nvmf_subsystem_connect_fn)(void *cb_ctx, struct spdk_nvmf_request *req); typedef void (*spdk_nvmf_subsystem_connect_fn)(void *cb_ctx, struct spdk_nvmf_request *req);
typedef void (*spdk_nvmf_subsystem_disconnect_fn)(void *cb_ctx, struct spdk_nvmf_conn *conn); typedef void (*spdk_nvmf_subsystem_disconnect_fn)(void *cb_ctx, struct spdk_nvmf_qpair *qpair);
struct spdk_nvmf_listen_addr { struct spdk_nvmf_listen_addr {
struct spdk_nvme_transport_id trid; struct spdk_nvme_transport_id trid;
@ -172,6 +171,6 @@ void spdk_nvmf_acceptor_poll(void);
void spdk_nvmf_handle_connect(struct spdk_nvmf_request *req); void spdk_nvmf_handle_connect(struct spdk_nvmf_request *req);
void spdk_nvmf_ctrlr_disconnect(struct spdk_nvmf_conn *conn); void spdk_nvmf_ctrlr_disconnect(struct spdk_nvmf_qpair *qpair);
#endif #endif

View File

@ -169,12 +169,12 @@ static void ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr)
void void
spdk_nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr) spdk_nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr)
{ {
while (!TAILQ_EMPTY(&ctrlr->connections)) { while (!TAILQ_EMPTY(&ctrlr->qpairs)) {
struct spdk_nvmf_conn *conn = TAILQ_FIRST(&ctrlr->connections); struct spdk_nvmf_qpair *qpair = TAILQ_FIRST(&ctrlr->qpairs);
TAILQ_REMOVE(&ctrlr->connections, conn, link); TAILQ_REMOVE(&ctrlr->qpairs, qpair, link);
ctrlr->num_connections--; ctrlr->num_qpairs--;
conn->transport->conn_fini(conn); qpair->transport->qpair_fini(qpair);
} }
ctrlr_destruct(ctrlr); ctrlr_destruct(ctrlr);
@ -228,7 +228,7 @@ spdk_nvmf_ctrlr_gen_cntlid(void)
} }
void void
spdk_nvmf_ctrlr_connect(struct spdk_nvmf_conn *conn, spdk_nvmf_ctrlr_connect(struct spdk_nvmf_qpair *qpair,
struct spdk_nvmf_fabric_connect_cmd *cmd, struct spdk_nvmf_fabric_connect_cmd *cmd,
struct spdk_nvmf_fabric_connect_data *data, struct spdk_nvmf_fabric_connect_data *data,
struct spdk_nvmf_fabric_connect_rsp *rsp) struct spdk_nvmf_fabric_connect_rsp *rsp)
@ -272,11 +272,11 @@ spdk_nvmf_ctrlr_connect(struct spdk_nvmf_conn *conn,
INVALID_CONNECT_CMD(sqsize); INVALID_CONNECT_CMD(sqsize);
return; return;
} }
conn->sq_head_max = cmd->sqsize; qpair->sq_head_max = cmd->sqsize;
conn->qid = cmd->qid; qpair->qid = cmd->qid;
if (cmd->qid == 0) { if (cmd->qid == 0) {
conn->type = CONN_TYPE_AQ; qpair->type = QPAIR_TYPE_AQ;
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Connect Admin Queue for controller ID 0x%x\n", data->cntlid); SPDK_TRACELOG(SPDK_TRACE_NVMF, "Connect Admin Queue for controller ID 0x%x\n", data->cntlid);
@ -288,14 +288,14 @@ spdk_nvmf_ctrlr_connect(struct spdk_nvmf_conn *conn,
} }
/* Establish a new ctrlr */ /* Establish a new ctrlr */
ctrlr = conn->transport->ctrlr_init(); ctrlr = qpair->transport->ctrlr_init();
if (ctrlr == NULL) { if (ctrlr == NULL) {
SPDK_ERRLOG("Memory allocation failure\n"); SPDK_ERRLOG("Memory allocation failure\n");
rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
return; return;
} }
TAILQ_INIT(&ctrlr->connections); TAILQ_INIT(&ctrlr->qpairs);
ctrlr->cntlid = spdk_nvmf_ctrlr_gen_cntlid(); ctrlr->cntlid = spdk_nvmf_ctrlr_gen_cntlid();
if (ctrlr->cntlid == 0) { if (ctrlr->cntlid == 0) {
@ -304,17 +304,18 @@ spdk_nvmf_ctrlr_connect(struct spdk_nvmf_conn *conn,
rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
return; return;
} }
ctrlr->kato = cmd->kato; ctrlr->kato = cmd->kato;
ctrlr->async_event_config.raw = 0; ctrlr->async_event_config.raw = 0;
ctrlr->num_connections = 0; ctrlr->num_qpairs = 0;
ctrlr->subsys = subsystem; ctrlr->subsys = subsystem;
ctrlr->max_connections_allowed = g_nvmf_tgt.max_queues_per_ctrlr; ctrlr->max_qpairs_allowed = g_nvmf_tgt.max_qpairs_per_ctrlr;
memcpy(ctrlr->hostid, data->hostid, sizeof(ctrlr->hostid)); memcpy(ctrlr->hostid, data->hostid, sizeof(ctrlr->hostid));
if (conn->transport->ctrlr_add_conn(ctrlr, conn)) { if (qpair->transport->ctrlr_add_qpair(ctrlr, qpair)) {
rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
conn->transport->ctrlr_fini(ctrlr); qpair->transport->ctrlr_fini(ctrlr);
free(ctrlr); free(ctrlr);
return; return;
} }
@ -329,7 +330,7 @@ spdk_nvmf_ctrlr_connect(struct spdk_nvmf_conn *conn,
} else { } else {
struct spdk_nvmf_ctrlr *tmp; struct spdk_nvmf_ctrlr *tmp;
conn->type = CONN_TYPE_IOQ; qpair->type = QPAIR_TYPE_IOQ;
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Connect I/O Queue for controller id 0x%x\n", data->cntlid); SPDK_TRACELOG(SPDK_TRACE_NVMF, "Connect I/O Queue for controller id 0x%x\n", data->cntlid);
ctrlr = NULL; ctrlr = NULL;
@ -366,22 +367,22 @@ spdk_nvmf_ctrlr_connect(struct spdk_nvmf_conn *conn,
} }
/* check if we would exceed ctrlr connection limit */ /* check if we would exceed ctrlr connection limit */
if (ctrlr->num_connections >= ctrlr->max_connections_allowed) { if (ctrlr->num_qpairs >= ctrlr->max_qpairs_allowed) {
SPDK_ERRLOG("connection limit %d\n", ctrlr->num_connections); SPDK_ERRLOG("qpair limit %d\n", ctrlr->num_qpairs);
rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
rsp->status.sc = SPDK_NVMF_FABRIC_SC_CONTROLLER_BUSY; rsp->status.sc = SPDK_NVMF_FABRIC_SC_CONTROLLER_BUSY;
return; return;
} }
if (conn->transport->ctrlr_add_conn(ctrlr, conn)) { if (qpair->transport->ctrlr_add_qpair(ctrlr, qpair)) {
INVALID_CONNECT_CMD(qid); INVALID_CONNECT_CMD(qid);
return; return;
} }
} }
ctrlr->num_connections++; ctrlr->num_qpairs++;
TAILQ_INSERT_HEAD(&ctrlr->connections, conn, link); TAILQ_INSERT_HEAD(&ctrlr->qpairs, qpair, link);
conn->ctrlr = ctrlr; qpair->ctrlr = ctrlr;
rsp->status.sc = SPDK_NVME_SC_SUCCESS; rsp->status.sc = SPDK_NVME_SC_SUCCESS;
rsp->status_code_specific.success.cntlid = ctrlr->vcdata.cntlid; rsp->status_code_specific.success.cntlid = ctrlr->vcdata.cntlid;
@ -390,39 +391,39 @@ spdk_nvmf_ctrlr_connect(struct spdk_nvmf_conn *conn,
} }
void void
spdk_nvmf_ctrlr_disconnect(struct spdk_nvmf_conn *conn) spdk_nvmf_ctrlr_disconnect(struct spdk_nvmf_qpair *qpair)
{ {
struct spdk_nvmf_ctrlr *ctrlr = conn->ctrlr; struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr;
assert(ctrlr != NULL); assert(ctrlr != NULL);
ctrlr->num_connections--; ctrlr->num_qpairs--;
TAILQ_REMOVE(&ctrlr->connections, conn, link); TAILQ_REMOVE(&ctrlr->qpairs, qpair, link);
conn->transport->ctrlr_remove_conn(ctrlr, conn); qpair->transport->ctrlr_remove_qpair(ctrlr, qpair);
conn->transport->conn_fini(conn); qpair->transport->qpair_fini(qpair);
if (ctrlr->num_connections == 0) { if (ctrlr->num_qpairs == 0) {
ctrlr_destruct(ctrlr); ctrlr_destruct(ctrlr);
} }
} }
struct spdk_nvmf_conn * struct spdk_nvmf_qpair *
spdk_nvmf_ctrlr_get_conn(struct spdk_nvmf_ctrlr *ctrlr, uint16_t qid) spdk_nvmf_ctrlr_get_qpair(struct spdk_nvmf_ctrlr *ctrlr, uint16_t qid)
{ {
struct spdk_nvmf_conn *conn; struct spdk_nvmf_qpair *qpair;
TAILQ_FOREACH(conn, &ctrlr->connections, link) { TAILQ_FOREACH(qpair, &ctrlr->qpairs, link) {
if (conn->qid == qid) { if (qpair->qid == qid) {
return conn; return qpair;
} }
} }
return NULL; return NULL;
} }
struct spdk_nvmf_request * struct spdk_nvmf_request *
spdk_nvmf_conn_get_request(struct spdk_nvmf_conn *conn, uint16_t cid) spdk_nvmf_qpair_get_request(struct spdk_nvmf_qpair *qpair, uint16_t cid)
{ {
/* TODO: track list of outstanding requests in conn? */ /* TODO: track list of outstanding requests in qpair? */
return NULL; return NULL;
} }
@ -637,7 +638,7 @@ spdk_nvmf_property_set(struct spdk_nvmf_ctrlr *ctrlr,
int int
spdk_nvmf_ctrlr_poll(struct spdk_nvmf_ctrlr *ctrlr) spdk_nvmf_ctrlr_poll(struct spdk_nvmf_ctrlr *ctrlr)
{ {
struct spdk_nvmf_conn *conn, *tmp; struct spdk_nvmf_qpair *qpair, *tmp;
struct spdk_nvmf_subsystem *subsys = ctrlr->subsys; struct spdk_nvmf_subsystem *subsys = ctrlr->subsys;
if (subsys->is_removed) { if (subsys->is_removed) {
@ -652,10 +653,10 @@ spdk_nvmf_ctrlr_poll(struct spdk_nvmf_ctrlr *ctrlr)
} }
} }
TAILQ_FOREACH_SAFE(conn, &ctrlr->connections, link, tmp) { TAILQ_FOREACH_SAFE(qpair, &ctrlr->qpairs, link, tmp) {
if (conn->transport->conn_poll(conn) < 0) { if (qpair->transport->qpair_poll(qpair) < 0) {
SPDK_ERRLOG("Transport poll failed for conn %p; closing connection\n", conn); SPDK_ERRLOG("Transport poll failed for qpair %p; closing connection\n", qpair);
spdk_nvmf_ctrlr_disconnect(conn); spdk_nvmf_ctrlr_disconnect(qpair);
} }
} }
@ -675,7 +676,7 @@ spdk_nvmf_ctrlr_set_features_host_identifier(struct spdk_nvmf_request *req)
int int
spdk_nvmf_ctrlr_get_features_host_identifier(struct spdk_nvmf_request *req) spdk_nvmf_ctrlr_get_features_host_identifier(struct spdk_nvmf_request *req)
{ {
struct spdk_nvmf_ctrlr *ctrlr = req->conn->ctrlr; struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
@ -700,7 +701,7 @@ spdk_nvmf_ctrlr_get_features_host_identifier(struct spdk_nvmf_request *req)
int int
spdk_nvmf_ctrlr_set_features_keep_alive_timer(struct spdk_nvmf_request *req) spdk_nvmf_ctrlr_set_features_keep_alive_timer(struct spdk_nvmf_request *req)
{ {
struct spdk_nvmf_ctrlr *ctrlr = req->conn->ctrlr; struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
@ -722,7 +723,7 @@ spdk_nvmf_ctrlr_set_features_keep_alive_timer(struct spdk_nvmf_request *req)
int int
spdk_nvmf_ctrlr_get_features_keep_alive_timer(struct spdk_nvmf_request *req) spdk_nvmf_ctrlr_get_features_keep_alive_timer(struct spdk_nvmf_request *req)
{ {
struct spdk_nvmf_ctrlr *ctrlr = req->conn->ctrlr; struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Get Features - Keep Alive Timer\n"); SPDK_TRACELOG(SPDK_TRACE_NVMF, "Get Features - Keep Alive Timer\n");
@ -733,7 +734,7 @@ spdk_nvmf_ctrlr_get_features_keep_alive_timer(struct spdk_nvmf_request *req)
int int
spdk_nvmf_ctrlr_set_features_number_of_queues(struct spdk_nvmf_request *req) spdk_nvmf_ctrlr_set_features_number_of_queues(struct spdk_nvmf_request *req)
{ {
struct spdk_nvmf_ctrlr *ctrlr = req->conn->ctrlr; struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
uint32_t nr_io_queues; uint32_t nr_io_queues;
@ -741,10 +742,10 @@ spdk_nvmf_ctrlr_set_features_number_of_queues(struct spdk_nvmf_request *req)
req->cmd->nvme_cmd.cdw11); req->cmd->nvme_cmd.cdw11);
/* Extra 1 connection for Admin queue */ /* Extra 1 connection for Admin queue */
nr_io_queues = ctrlr->max_connections_allowed - 1; nr_io_queues = ctrlr->max_qpairs_allowed - 1;
/* verify that the contoller is ready to process commands */ /* verify that the contoller is ready to process commands */
if (ctrlr->num_connections > 1) { if (ctrlr->num_qpairs > 1) {
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Queue pairs already active!\n"); SPDK_TRACELOG(SPDK_TRACE_NVMF, "Queue pairs already active!\n");
rsp->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR; rsp->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
} else { } else {
@ -759,13 +760,13 @@ spdk_nvmf_ctrlr_set_features_number_of_queues(struct spdk_nvmf_request *req)
int int
spdk_nvmf_ctrlr_get_features_number_of_queues(struct spdk_nvmf_request *req) spdk_nvmf_ctrlr_get_features_number_of_queues(struct spdk_nvmf_request *req)
{ {
struct spdk_nvmf_ctrlr *ctrlr = req->conn->ctrlr; struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
uint32_t nr_io_queues; uint32_t nr_io_queues;
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Get Features - Number of Queues\n"); SPDK_TRACELOG(SPDK_TRACE_NVMF, "Get Features - Number of Queues\n");
nr_io_queues = ctrlr->max_connections_allowed - 1; nr_io_queues = ctrlr->max_qpairs_allowed - 1;
/* Number of IO queues has a zero based value */ /* Number of IO queues has a zero based value */
rsp->cdw0 = ((nr_io_queues - 1) << 16) | rsp->cdw0 = ((nr_io_queues - 1) << 16) |
@ -777,7 +778,7 @@ spdk_nvmf_ctrlr_get_features_number_of_queues(struct spdk_nvmf_request *req)
int int
spdk_nvmf_ctrlr_set_features_async_event_configuration(struct spdk_nvmf_request *req) spdk_nvmf_ctrlr_set_features_async_event_configuration(struct spdk_nvmf_request *req)
{ {
struct spdk_nvmf_ctrlr *ctrlr = req->conn->ctrlr; struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Set Features - Async Event Configuration, cdw11 0x%08x\n", SPDK_TRACELOG(SPDK_TRACE_NVMF, "Set Features - Async Event Configuration, cdw11 0x%08x\n",
@ -789,7 +790,7 @@ spdk_nvmf_ctrlr_set_features_async_event_configuration(struct spdk_nvmf_request
int int
spdk_nvmf_ctrlr_get_features_async_event_configuration(struct spdk_nvmf_request *req) spdk_nvmf_ctrlr_get_features_async_event_configuration(struct spdk_nvmf_request *req)
{ {
struct spdk_nvmf_ctrlr *ctrlr = req->conn->ctrlr; struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Get Features - Async Event Configuration\n"); SPDK_TRACELOG(SPDK_TRACE_NVMF, "Get Features - Async Event Configuration\n");
@ -800,7 +801,7 @@ spdk_nvmf_ctrlr_get_features_async_event_configuration(struct spdk_nvmf_request
int int
spdk_nvmf_ctrlr_async_event_request(struct spdk_nvmf_request *req) spdk_nvmf_ctrlr_async_event_request(struct spdk_nvmf_request *req)
{ {
struct spdk_nvmf_ctrlr *ctrlr = req->conn->ctrlr; struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Async Event Request\n"); SPDK_TRACELOG(SPDK_TRACE_NVMF, "Async Event Request\n");

View File

@ -45,21 +45,21 @@
struct spdk_nvmf_transport; struct spdk_nvmf_transport;
struct spdk_nvmf_request; struct spdk_nvmf_request;
enum conn_type { enum spdk_nvmf_qpair_type {
CONN_TYPE_AQ = 0, QPAIR_TYPE_AQ = 0,
CONN_TYPE_IOQ = 1, QPAIR_TYPE_IOQ = 1,
}; };
struct spdk_nvmf_conn { struct spdk_nvmf_qpair {
const struct spdk_nvmf_transport *transport; const struct spdk_nvmf_transport *transport;
struct spdk_nvmf_ctrlr *ctrlr; struct spdk_nvmf_ctrlr *ctrlr;
enum conn_type type; enum spdk_nvmf_qpair_type type;
uint16_t qid; uint16_t qid;
uint16_t sq_head; uint16_t sq_head;
uint16_t sq_head_max; uint16_t sq_head_max;
TAILQ_ENTRY(spdk_nvmf_conn) link; TAILQ_ENTRY(spdk_nvmf_qpair) link;
}; };
/* /*
@ -78,9 +78,9 @@ struct spdk_nvmf_ctrlr {
} vcprop; /* virtual controller properties */ } vcprop; /* virtual controller properties */
struct spdk_nvme_ctrlr_data vcdata; /* virtual controller data */ struct spdk_nvme_ctrlr_data vcdata; /* virtual controller data */
TAILQ_HEAD(connection_q, spdk_nvmf_conn) connections; TAILQ_HEAD(, spdk_nvmf_qpair) qpairs;
int num_connections; int num_qpairs;
int max_connections_allowed; int max_qpairs_allowed;
uint32_t kato; uint32_t kato;
union { union {
uint32_t raw; uint32_t raw;
@ -97,14 +97,14 @@ struct spdk_nvmf_ctrlr {
TAILQ_ENTRY(spdk_nvmf_ctrlr) link; TAILQ_ENTRY(spdk_nvmf_ctrlr) link;
}; };
void spdk_nvmf_ctrlr_connect(struct spdk_nvmf_conn *conn, void spdk_nvmf_ctrlr_connect(struct spdk_nvmf_qpair *qpair,
struct spdk_nvmf_fabric_connect_cmd *cmd, struct spdk_nvmf_fabric_connect_cmd *cmd,
struct spdk_nvmf_fabric_connect_data *data, struct spdk_nvmf_fabric_connect_data *data,
struct spdk_nvmf_fabric_connect_rsp *rsp); struct spdk_nvmf_fabric_connect_rsp *rsp);
struct spdk_nvmf_conn *spdk_nvmf_ctrlr_get_conn(struct spdk_nvmf_ctrlr *ctrlr, uint16_t qid); struct spdk_nvmf_qpair *spdk_nvmf_ctrlr_get_qpair(struct spdk_nvmf_ctrlr *ctrlr, uint16_t qid);
struct spdk_nvmf_request *spdk_nvmf_conn_get_request(struct spdk_nvmf_conn *conn, uint16_t cid); struct spdk_nvmf_request *spdk_nvmf_qpair_get_request(struct spdk_nvmf_qpair *qpair, uint16_t cid);
void void
spdk_nvmf_property_get(struct spdk_nvmf_ctrlr *ctrlr, spdk_nvmf_property_get(struct spdk_nvmf_ctrlr *ctrlr,

View File

@ -253,7 +253,7 @@ static int
nvmf_bdev_ctrlr_identify(struct spdk_nvmf_request *req) nvmf_bdev_ctrlr_identify(struct spdk_nvmf_request *req)
{ {
uint8_t cns; uint8_t cns;
struct spdk_nvmf_ctrlr *ctrlr = req->conn->ctrlr; struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys; struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys;
@ -284,21 +284,21 @@ nvmf_bdev_ctrlr_identify(struct spdk_nvmf_request *req)
static int static int
nvmf_bdev_ctrlr_abort(struct spdk_nvmf_request *req) nvmf_bdev_ctrlr_abort(struct spdk_nvmf_request *req)
{ {
struct spdk_nvmf_ctrlr *ctrlr = req->conn->ctrlr; struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
uint32_t cdw10 = cmd->cdw10; uint32_t cdw10 = cmd->cdw10;
uint16_t cid = cdw10 >> 16; uint16_t cid = cdw10 >> 16;
uint16_t sqid = cdw10 & 0xFFFFu; uint16_t sqid = cdw10 & 0xFFFFu;
struct spdk_nvmf_conn *conn; struct spdk_nvmf_qpair *qpair;
struct spdk_nvmf_request *req_to_abort; struct spdk_nvmf_request *req_to_abort;
SPDK_TRACELOG(SPDK_TRACE_NVMF, "abort sqid=%u cid=%u\n", sqid, cid); SPDK_TRACELOG(SPDK_TRACE_NVMF, "abort sqid=%u cid=%u\n", sqid, cid);
rsp->cdw0 = 1; /* Command not aborted */ rsp->cdw0 = 1; /* Command not aborted */
conn = spdk_nvmf_ctrlr_get_conn(ctrlr, sqid); qpair = spdk_nvmf_ctrlr_get_qpair(ctrlr, sqid);
if (conn == NULL) { if (qpair == NULL) {
SPDK_TRACELOG(SPDK_TRACE_NVMF, "sqid %u not found\n", sqid); SPDK_TRACELOG(SPDK_TRACE_NVMF, "sqid %u not found\n", sqid);
rsp->status.sct = SPDK_NVME_SCT_GENERIC; rsp->status.sct = SPDK_NVME_SCT_GENERIC;
rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
@ -308,9 +308,9 @@ nvmf_bdev_ctrlr_abort(struct spdk_nvmf_request *req)
/* /*
* NOTE: This relies on the assumption that all connections for a ctrlr will be handled * NOTE: This relies on the assumption that all connections for a ctrlr will be handled
* on the same thread. If this assumption becomes untrue, this will need to pass a message * on the same thread. If this assumption becomes untrue, this will need to pass a message
* to the thread handling conn, and the abort will need to be asynchronous. * to the thread handling qpair, and the abort will need to be asynchronous.
*/ */
req_to_abort = spdk_nvmf_conn_get_request(conn, cid); req_to_abort = spdk_nvmf_qpair_get_request(qpair, cid);
if (req_to_abort == NULL) { if (req_to_abort == NULL) {
SPDK_TRACELOG(SPDK_TRACE_NVMF, "cid %u not found\n", cid); SPDK_TRACELOG(SPDK_TRACE_NVMF, "cid %u not found\n", cid);
rsp->status.sct = SPDK_NVME_SCT_GENERIC; rsp->status.sct = SPDK_NVME_SCT_GENERIC;
@ -609,7 +609,7 @@ nvmf_bdev_ctrlr_process_io_cmd(struct spdk_nvmf_request *req)
struct spdk_bdev *bdev; struct spdk_bdev *bdev;
struct spdk_bdev_desc *desc; struct spdk_bdev_desc *desc;
struct spdk_io_channel *ch; struct spdk_io_channel *ch;
struct spdk_nvmf_subsystem *subsystem = req->conn->ctrlr->subsys; struct spdk_nvmf_subsystem *subsystem = req->qpair->ctrlr->subsys;
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;

View File

@ -155,7 +155,7 @@ nvmf_get_log_page_len(struct spdk_nvme_cmd *cmd)
static int static int
nvmf_discovery_ctrlr_process_admin_cmd(struct spdk_nvmf_request *req) nvmf_discovery_ctrlr_process_admin_cmd(struct spdk_nvmf_request *req)
{ {
struct spdk_nvmf_ctrlr *ctrlr = req->conn->ctrlr; struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
uint64_t log_page_offset; uint64_t log_page_offset;

View File

@ -49,12 +49,12 @@ SPDK_LOG_REGISTER_TRACE_FLAG("nvmf", SPDK_TRACE_NVMF)
struct spdk_nvmf_tgt g_nvmf_tgt; struct spdk_nvmf_tgt g_nvmf_tgt;
int int
spdk_nvmf_tgt_init(uint16_t max_queue_depth, uint16_t max_queues_per_ctrlr, spdk_nvmf_tgt_init(uint16_t max_queue_depth, uint16_t max_qpairs_per_ctrlr,
uint32_t in_capsule_data_size, uint32_t max_io_size) uint32_t in_capsule_data_size, uint32_t max_io_size)
{ {
int rc; int rc;
g_nvmf_tgt.max_queues_per_ctrlr = max_queues_per_ctrlr; g_nvmf_tgt.max_qpairs_per_ctrlr = max_qpairs_per_ctrlr;
g_nvmf_tgt.max_queue_depth = max_queue_depth; g_nvmf_tgt.max_queue_depth = max_queue_depth;
g_nvmf_tgt.in_capsule_data_size = in_capsule_data_size; g_nvmf_tgt.in_capsule_data_size = in_capsule_data_size;
g_nvmf_tgt.max_io_size = max_io_size; g_nvmf_tgt.max_io_size = max_io_size;
@ -65,7 +65,7 @@ spdk_nvmf_tgt_init(uint16_t max_queue_depth, uint16_t max_queues_per_ctrlr,
TAILQ_INIT(&g_nvmf_tgt.subsystems); TAILQ_INIT(&g_nvmf_tgt.subsystems);
TAILQ_INIT(&g_nvmf_tgt.listen_addrs); TAILQ_INIT(&g_nvmf_tgt.listen_addrs);
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Max Queues Per Controller: %d\n", max_queues_per_ctrlr); SPDK_TRACELOG(SPDK_TRACE_NVMF, "Max Queue Pairs Per Controller: %d\n", max_qpairs_per_ctrlr);
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Max Queue Depth: %d\n", max_queue_depth); SPDK_TRACELOG(SPDK_TRACE_NVMF, "Max Queue Depth: %d\n", max_queue_depth);
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Max In Capsule Data: %d bytes\n", in_capsule_data_size); SPDK_TRACELOG(SPDK_TRACE_NVMF, "Max In Capsule Data: %d bytes\n", in_capsule_data_size);
SPDK_TRACELOG(SPDK_TRACE_NVMF, "Max I/O Size: %d bytes\n", max_io_size); SPDK_TRACELOG(SPDK_TRACE_NVMF, "Max I/O Size: %d bytes\n", max_io_size);

View File

@ -78,7 +78,7 @@ struct spdk_nvmf_ctrlr_ops {
struct spdk_nvmf_tgt { struct spdk_nvmf_tgt {
uint16_t max_queue_depth; uint16_t max_queue_depth;
uint16_t max_queues_per_ctrlr; uint16_t max_qpairs_per_ctrlr;
uint32_t in_capsule_data_size; uint32_t in_capsule_data_size;
uint32_t max_io_size; uint32_t max_io_size;
uint64_t discovery_genctr; uint64_t discovery_genctr;

View File

@ -103,8 +103,8 @@ struct spdk_nvmf_rdma_request {
TAILQ_ENTRY(spdk_nvmf_rdma_request) link; TAILQ_ENTRY(spdk_nvmf_rdma_request) link;
}; };
struct spdk_nvmf_rdma_conn { struct spdk_nvmf_rdma_qpair {
struct spdk_nvmf_conn conn; struct spdk_nvmf_qpair qpair;
struct rdma_cm_id *cm_id; struct rdma_cm_id *cm_id;
struct ibv_cq *cq; struct ibv_cq *cq;
@ -160,11 +160,11 @@ struct spdk_nvmf_rdma_conn {
void *bufs; void *bufs;
struct ibv_mr *bufs_mr; struct ibv_mr *bufs_mr;
TAILQ_ENTRY(spdk_nvmf_rdma_conn) link; TAILQ_ENTRY(spdk_nvmf_rdma_qpair) link;
}; };
/* List of RDMA connections that have not yet received a CONNECT capsule */ /* List of RDMA connections that have not yet received a CONNECT capsule */
static TAILQ_HEAD(, spdk_nvmf_rdma_conn) g_pending_conns = TAILQ_HEAD_INITIALIZER(g_pending_conns); static TAILQ_HEAD(, spdk_nvmf_rdma_qpair) g_pending_conns = TAILQ_HEAD_INITIALIZER(g_pending_conns);
struct spdk_nvmf_rdma_ctrlr { struct spdk_nvmf_rdma_ctrlr {
struct spdk_nvmf_ctrlr ctrlr; struct spdk_nvmf_ctrlr ctrlr;
@ -204,10 +204,11 @@ static struct spdk_nvmf_rdma g_rdma = {
.listen_addrs = TAILQ_HEAD_INITIALIZER(g_rdma.listen_addrs), .listen_addrs = TAILQ_HEAD_INITIALIZER(g_rdma.listen_addrs),
}; };
static inline struct spdk_nvmf_rdma_conn * static inline struct spdk_nvmf_rdma_qpair *
get_rdma_conn(struct spdk_nvmf_conn *conn) get_rdma_qpair(struct spdk_nvmf_qpair *qpair)
{ {
return (struct spdk_nvmf_rdma_conn *)((uintptr_t)conn - offsetof(struct spdk_nvmf_rdma_conn, conn)); return (struct spdk_nvmf_rdma_qpair *)((uintptr_t)qpair - offsetof(struct spdk_nvmf_rdma_qpair,
qpair));
} }
static inline struct spdk_nvmf_rdma_request * static inline struct spdk_nvmf_rdma_request *
@ -225,75 +226,75 @@ get_rdma_ctrlr(struct spdk_nvmf_ctrlr *ctrlr)
} }
static void static void
spdk_nvmf_rdma_conn_destroy(struct spdk_nvmf_rdma_conn *rdma_conn) spdk_nvmf_rdma_qpair_destroy(struct spdk_nvmf_rdma_qpair *rdma_qpair)
{ {
if (rdma_conn->cmds_mr) { if (rdma_qpair->cmds_mr) {
ibv_dereg_mr(rdma_conn->cmds_mr); ibv_dereg_mr(rdma_qpair->cmds_mr);
} }
if (rdma_conn->cpls_mr) { if (rdma_qpair->cpls_mr) {
ibv_dereg_mr(rdma_conn->cpls_mr); ibv_dereg_mr(rdma_qpair->cpls_mr);
} }
if (rdma_conn->bufs_mr) { if (rdma_qpair->bufs_mr) {
ibv_dereg_mr(rdma_conn->bufs_mr); ibv_dereg_mr(rdma_qpair->bufs_mr);
} }
if (rdma_conn->cm_id) { if (rdma_qpair->cm_id) {
rdma_destroy_qp(rdma_conn->cm_id); rdma_destroy_qp(rdma_qpair->cm_id);
rdma_destroy_id(rdma_conn->cm_id); rdma_destroy_id(rdma_qpair->cm_id);
} }
if (rdma_conn->cq) { if (rdma_qpair->cq) {
ibv_destroy_cq(rdma_conn->cq); ibv_destroy_cq(rdma_qpair->cq);
} }
/* Free all memory */ /* Free all memory */
spdk_dma_free(rdma_conn->cmds); spdk_dma_free(rdma_qpair->cmds);
spdk_dma_free(rdma_conn->cpls); spdk_dma_free(rdma_qpair->cpls);
spdk_dma_free(rdma_conn->bufs); spdk_dma_free(rdma_qpair->bufs);
free(rdma_conn->reqs); free(rdma_qpair->reqs);
free(rdma_conn); free(rdma_qpair);
} }
static struct spdk_nvmf_rdma_conn * static struct spdk_nvmf_rdma_qpair *
spdk_nvmf_rdma_conn_create(struct rdma_cm_id *id, struct ibv_comp_channel *channel, spdk_nvmf_rdma_qpair_create(struct rdma_cm_id *id, struct ibv_comp_channel *channel,
uint16_t max_queue_depth, uint16_t max_rw_depth, uint32_t subsystem_id) uint16_t max_queue_depth, uint16_t max_rw_depth, uint32_t subsystem_id)
{ {
struct spdk_nvmf_rdma_conn *rdma_conn; struct spdk_nvmf_rdma_qpair *rdma_qpair;
struct spdk_nvmf_conn *conn; struct spdk_nvmf_qpair *qpair;
int rc, i; int rc, i;
struct ibv_qp_init_attr attr; struct ibv_qp_init_attr attr;
struct spdk_nvmf_rdma_recv *rdma_recv; struct spdk_nvmf_rdma_recv *rdma_recv;
struct spdk_nvmf_rdma_request *rdma_req; struct spdk_nvmf_rdma_request *rdma_req;
rdma_conn = calloc(1, sizeof(struct spdk_nvmf_rdma_conn)); rdma_qpair = calloc(1, sizeof(struct spdk_nvmf_rdma_qpair));
if (rdma_conn == NULL) { if (rdma_qpair == NULL) {
SPDK_ERRLOG("Could not allocate new connection.\n"); SPDK_ERRLOG("Could not allocate new connection.\n");
return NULL; return NULL;
} }
rdma_conn->max_queue_depth = max_queue_depth; rdma_qpair->max_queue_depth = max_queue_depth;
rdma_conn->max_rw_depth = max_rw_depth; rdma_qpair->max_rw_depth = max_rw_depth;
TAILQ_INIT(&rdma_conn->incoming_queue); TAILQ_INIT(&rdma_qpair->incoming_queue);
TAILQ_INIT(&rdma_conn->free_queue); TAILQ_INIT(&rdma_qpair->free_queue);
TAILQ_INIT(&rdma_conn->pending_data_buf_queue); TAILQ_INIT(&rdma_qpair->pending_data_buf_queue);
TAILQ_INIT(&rdma_conn->pending_rdma_rw_queue); TAILQ_INIT(&rdma_qpair->pending_rdma_rw_queue);
rdma_conn->cq = ibv_create_cq(id->verbs, max_queue_depth * 3, rdma_conn, channel, 0); rdma_qpair->cq = ibv_create_cq(id->verbs, max_queue_depth * 3, rdma_qpair, channel, 0);
if (!rdma_conn->cq) { if (!rdma_qpair->cq) {
SPDK_ERRLOG("Unable to create completion queue\n"); SPDK_ERRLOG("Unable to create completion queue\n");
SPDK_ERRLOG("Completion Channel: %p Id: %p Verbs: %p\n", channel, id, id->verbs); SPDK_ERRLOG("Completion Channel: %p Id: %p Verbs: %p\n", channel, id, id->verbs);
SPDK_ERRLOG("Errno %d: %s\n", errno, strerror(errno)); SPDK_ERRLOG("Errno %d: %s\n", errno, strerror(errno));
rdma_destroy_id(id); rdma_destroy_id(id);
spdk_nvmf_rdma_conn_destroy(rdma_conn); spdk_nvmf_rdma_qpair_destroy(rdma_qpair);
return NULL; return NULL;
} }
memset(&attr, 0, sizeof(struct ibv_qp_init_attr)); memset(&attr, 0, sizeof(struct ibv_qp_init_attr));
attr.qp_type = IBV_QPT_RC; attr.qp_type = IBV_QPT_RC;
attr.send_cq = rdma_conn->cq; attr.send_cq = rdma_qpair->cq;
attr.recv_cq = rdma_conn->cq; attr.recv_cq = rdma_qpair->cq;
attr.cap.max_send_wr = max_queue_depth * 2; /* SEND, READ, and WRITE operations */ attr.cap.max_send_wr = max_queue_depth * 2; /* SEND, READ, and WRITE operations */
attr.cap.max_recv_wr = max_queue_depth; /* RECV operations */ attr.cap.max_recv_wr = max_queue_depth; /* RECV operations */
attr.cap.max_send_sge = NVMF_DEFAULT_TX_SGE; attr.cap.max_send_sge = NVMF_DEFAULT_TX_SGE;
@ -304,69 +305,69 @@ spdk_nvmf_rdma_conn_create(struct rdma_cm_id *id, struct ibv_comp_channel *chann
SPDK_ERRLOG("rdma_create_qp failed\n"); SPDK_ERRLOG("rdma_create_qp failed\n");
SPDK_ERRLOG("Errno %d: %s\n", errno, strerror(errno)); SPDK_ERRLOG("Errno %d: %s\n", errno, strerror(errno));
rdma_destroy_id(id); rdma_destroy_id(id);
spdk_nvmf_rdma_conn_destroy(rdma_conn); spdk_nvmf_rdma_qpair_destroy(rdma_qpair);
return NULL; return NULL;
} }
conn = &rdma_conn->conn; qpair = &rdma_qpair->qpair;
conn->transport = &spdk_nvmf_transport_rdma; qpair->transport = &spdk_nvmf_transport_rdma;
id->context = conn; id->context = qpair;
rdma_conn->cm_id = id; rdma_qpair->cm_id = id;
SPDK_TRACELOG(SPDK_TRACE_RDMA, "New RDMA Connection: %p\n", conn); SPDK_TRACELOG(SPDK_TRACE_RDMA, "New RDMA Connection: %p\n", qpair);
rdma_conn->reqs = calloc(max_queue_depth, sizeof(*rdma_conn->reqs)); rdma_qpair->reqs = calloc(max_queue_depth, sizeof(*rdma_qpair->reqs));
rdma_conn->recvs = calloc(max_queue_depth, sizeof(*rdma_conn->recvs)); rdma_qpair->recvs = calloc(max_queue_depth, sizeof(*rdma_qpair->recvs));
rdma_conn->cmds = spdk_dma_zmalloc(max_queue_depth * sizeof(*rdma_conn->cmds), rdma_qpair->cmds = spdk_dma_zmalloc(max_queue_depth * sizeof(*rdma_qpair->cmds),
0x1000, NULL); 0x1000, NULL);
rdma_conn->cpls = spdk_dma_zmalloc(max_queue_depth * sizeof(*rdma_conn->cpls), rdma_qpair->cpls = spdk_dma_zmalloc(max_queue_depth * sizeof(*rdma_qpair->cpls),
0x1000, NULL); 0x1000, NULL);
rdma_conn->bufs = spdk_dma_zmalloc(max_queue_depth * g_rdma.in_capsule_data_size, rdma_qpair->bufs = spdk_dma_zmalloc(max_queue_depth * g_rdma.in_capsule_data_size,
0x1000, NULL); 0x1000, NULL);
if (!rdma_conn->reqs || !rdma_conn->recvs || !rdma_conn->cmds || if (!rdma_qpair->reqs || !rdma_qpair->recvs || !rdma_qpair->cmds ||
!rdma_conn->cpls || !rdma_conn->bufs) { !rdma_qpair->cpls || !rdma_qpair->bufs) {
SPDK_ERRLOG("Unable to allocate sufficient memory for RDMA queue.\n"); SPDK_ERRLOG("Unable to allocate sufficient memory for RDMA queue.\n");
spdk_nvmf_rdma_conn_destroy(rdma_conn); spdk_nvmf_rdma_qpair_destroy(rdma_qpair);
return NULL; return NULL;
} }
rdma_conn->cmds_mr = ibv_reg_mr(id->pd, rdma_conn->cmds, rdma_qpair->cmds_mr = ibv_reg_mr(id->pd, rdma_qpair->cmds,
max_queue_depth * sizeof(*rdma_conn->cmds), max_queue_depth * sizeof(*rdma_qpair->cmds),
IBV_ACCESS_LOCAL_WRITE); IBV_ACCESS_LOCAL_WRITE);
rdma_conn->cpls_mr = ibv_reg_mr(id->pd, rdma_conn->cpls, rdma_qpair->cpls_mr = ibv_reg_mr(id->pd, rdma_qpair->cpls,
max_queue_depth * sizeof(*rdma_conn->cpls), max_queue_depth * sizeof(*rdma_qpair->cpls),
0); 0);
rdma_conn->bufs_mr = ibv_reg_mr(id->pd, rdma_conn->bufs, rdma_qpair->bufs_mr = ibv_reg_mr(id->pd, rdma_qpair->bufs,
max_queue_depth * g_rdma.in_capsule_data_size, max_queue_depth * g_rdma.in_capsule_data_size,
IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_LOCAL_WRITE |
IBV_ACCESS_REMOTE_WRITE); IBV_ACCESS_REMOTE_WRITE);
if (!rdma_conn->cmds_mr || !rdma_conn->cpls_mr || !rdma_conn->bufs_mr) { if (!rdma_qpair->cmds_mr || !rdma_qpair->cpls_mr || !rdma_qpair->bufs_mr) {
SPDK_ERRLOG("Unable to register required memory for RDMA queue.\n"); SPDK_ERRLOG("Unable to register required memory for RDMA queue.\n");
spdk_nvmf_rdma_conn_destroy(rdma_conn); spdk_nvmf_rdma_qpair_destroy(rdma_qpair);
return NULL; return NULL;
} }
SPDK_TRACELOG(SPDK_TRACE_RDMA, "Command Array: %p Length: %lx LKey: %x\n", SPDK_TRACELOG(SPDK_TRACE_RDMA, "Command Array: %p Length: %lx LKey: %x\n",
rdma_conn->cmds, max_queue_depth * sizeof(*rdma_conn->cmds), rdma_conn->cmds_mr->lkey); rdma_qpair->cmds, max_queue_depth * sizeof(*rdma_qpair->cmds), rdma_qpair->cmds_mr->lkey);
SPDK_TRACELOG(SPDK_TRACE_RDMA, "Completion Array: %p Length: %lx LKey: %x\n", SPDK_TRACELOG(SPDK_TRACE_RDMA, "Completion Array: %p Length: %lx LKey: %x\n",
rdma_conn->cpls, max_queue_depth * sizeof(*rdma_conn->cpls), rdma_conn->cpls_mr->lkey); rdma_qpair->cpls, max_queue_depth * sizeof(*rdma_qpair->cpls), rdma_qpair->cpls_mr->lkey);
SPDK_TRACELOG(SPDK_TRACE_RDMA, "In Capsule Data Array: %p Length: %x LKey: %x\n", SPDK_TRACELOG(SPDK_TRACE_RDMA, "In Capsule Data Array: %p Length: %x LKey: %x\n",
rdma_conn->bufs, max_queue_depth * g_rdma.in_capsule_data_size, rdma_conn->bufs_mr->lkey); rdma_qpair->bufs, max_queue_depth * g_rdma.in_capsule_data_size, rdma_qpair->bufs_mr->lkey);
for (i = 0; i < max_queue_depth; i++) { for (i = 0; i < max_queue_depth; i++) {
struct ibv_recv_wr *bad_wr = NULL; struct ibv_recv_wr *bad_wr = NULL;
rdma_recv = &rdma_conn->recvs[i]; rdma_recv = &rdma_qpair->recvs[i];
/* Set up memory to receive commands */ /* Set up memory to receive commands */
rdma_recv->buf = (void *)((uintptr_t)rdma_conn->bufs + (i * g_rdma.in_capsule_data_size)); rdma_recv->buf = (void *)((uintptr_t)rdma_qpair->bufs + (i * g_rdma.in_capsule_data_size));
rdma_recv->sgl[0].addr = (uintptr_t)&rdma_conn->cmds[i]; rdma_recv->sgl[0].addr = (uintptr_t)&rdma_qpair->cmds[i];
rdma_recv->sgl[0].length = sizeof(rdma_conn->cmds[i]); rdma_recv->sgl[0].length = sizeof(rdma_qpair->cmds[i]);
rdma_recv->sgl[0].lkey = rdma_conn->cmds_mr->lkey; rdma_recv->sgl[0].lkey = rdma_qpair->cmds_mr->lkey;
rdma_recv->sgl[1].addr = (uintptr_t)rdma_recv->buf; rdma_recv->sgl[1].addr = (uintptr_t)rdma_recv->buf;
rdma_recv->sgl[1].length = g_rdma.in_capsule_data_size; rdma_recv->sgl[1].length = g_rdma.in_capsule_data_size;
rdma_recv->sgl[1].lkey = rdma_conn->bufs_mr->lkey; rdma_recv->sgl[1].lkey = rdma_qpair->bufs_mr->lkey;
rdma_recv->wr.wr_id = (uintptr_t)rdma_recv; rdma_recv->wr.wr_id = (uintptr_t)rdma_recv;
rdma_recv->wr.sg_list = rdma_recv->sgl; rdma_recv->wr.sg_list = rdma_recv->sgl;
@ -375,26 +376,26 @@ spdk_nvmf_rdma_conn_create(struct rdma_cm_id *id, struct ibv_comp_channel *chann
rdma_recv->in_use = false; rdma_recv->in_use = false;
#endif #endif
rc = ibv_post_recv(rdma_conn->cm_id->qp, &rdma_recv->wr, &bad_wr); rc = ibv_post_recv(rdma_qpair->cm_id->qp, &rdma_recv->wr, &bad_wr);
if (rc) { if (rc) {
SPDK_ERRLOG("Unable to post capsule for RDMA RECV\n"); SPDK_ERRLOG("Unable to post capsule for RDMA RECV\n");
spdk_nvmf_rdma_conn_destroy(rdma_conn); spdk_nvmf_rdma_qpair_destroy(rdma_qpair);
return NULL; return NULL;
} }
} }
for (i = 0; i < max_queue_depth; i++) { for (i = 0; i < max_queue_depth; i++) {
rdma_req = &rdma_conn->reqs[i]; rdma_req = &rdma_qpair->reqs[i];
rdma_req->req.conn = &rdma_conn->conn; rdma_req->req.qpair = &rdma_qpair->qpair;
rdma_req->req.cmd = NULL; rdma_req->req.cmd = NULL;
/* Set up memory to send responses */ /* Set up memory to send responses */
rdma_req->req.rsp = &rdma_conn->cpls[i]; rdma_req->req.rsp = &rdma_qpair->cpls[i];
rdma_req->rsp.sgl[0].addr = (uintptr_t)&rdma_conn->cpls[i]; rdma_req->rsp.sgl[0].addr = (uintptr_t)&rdma_qpair->cpls[i];
rdma_req->rsp.sgl[0].length = sizeof(rdma_conn->cpls[i]); rdma_req->rsp.sgl[0].length = sizeof(rdma_qpair->cpls[i]);
rdma_req->rsp.sgl[0].lkey = rdma_conn->cpls_mr->lkey; rdma_req->rsp.sgl[0].lkey = rdma_qpair->cpls_mr->lkey;
rdma_req->rsp.wr.wr_id = (uintptr_t)rdma_req; rdma_req->rsp.wr.wr_id = (uintptr_t)rdma_req;
rdma_req->rsp.wr.next = NULL; rdma_req->rsp.wr.next = NULL;
@ -410,10 +411,10 @@ spdk_nvmf_rdma_conn_create(struct rdma_cm_id *id, struct ibv_comp_channel *chann
rdma_req->data.wr.sg_list = rdma_req->data.sgl; rdma_req->data.wr.sg_list = rdma_req->data.sgl;
rdma_req->data.wr.num_sge = SPDK_COUNTOF(rdma_req->data.sgl); rdma_req->data.wr.num_sge = SPDK_COUNTOF(rdma_req->data.sgl);
TAILQ_INSERT_TAIL(&rdma_conn->free_queue, rdma_req, link); TAILQ_INSERT_TAIL(&rdma_qpair->free_queue, rdma_req, link);
} }
return rdma_conn; return rdma_qpair;
} }
static int static int
@ -421,20 +422,20 @@ request_transfer_in(struct spdk_nvmf_request *req)
{ {
int rc; int rc;
struct spdk_nvmf_rdma_request *rdma_req = get_rdma_req(req); struct spdk_nvmf_rdma_request *rdma_req = get_rdma_req(req);
struct spdk_nvmf_conn *conn = req->conn; struct spdk_nvmf_qpair *qpair = req->qpair;
struct spdk_nvmf_rdma_conn *rdma_conn = get_rdma_conn(conn); struct spdk_nvmf_rdma_qpair *rdma_qpair = get_rdma_qpair(qpair);
struct ibv_send_wr *bad_wr = NULL; struct ibv_send_wr *bad_wr = NULL;
assert(req->xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER); assert(req->xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER);
rdma_conn->cur_rdma_rw_depth++; rdma_qpair->cur_rdma_rw_depth++;
SPDK_TRACELOG(SPDK_TRACE_RDMA, "RDMA READ POSTED. Request: %p Connection: %p\n", req, conn); SPDK_TRACELOG(SPDK_TRACE_RDMA, "RDMA READ POSTED. Request: %p Connection: %p\n", req, qpair);
spdk_trace_record(TRACE_RDMA_READ_START, 0, 0, (uintptr_t)req, 0); spdk_trace_record(TRACE_RDMA_READ_START, 0, 0, (uintptr_t)req, 0);
rdma_req->data.wr.opcode = IBV_WR_RDMA_READ; rdma_req->data.wr.opcode = IBV_WR_RDMA_READ;
rdma_req->data.wr.next = NULL; rdma_req->data.wr.next = NULL;
rc = ibv_post_send(rdma_conn->cm_id->qp, &rdma_req->data.wr, &bad_wr); rc = ibv_post_send(rdma_qpair->cm_id->qp, &rdma_req->data.wr, &bad_wr);
if (rc) { if (rc) {
SPDK_ERRLOG("Unable to transfer data from host to target\n"); SPDK_ERRLOG("Unable to transfer data from host to target\n");
return -1; return -1;
@ -448,19 +449,19 @@ request_transfer_out(struct spdk_nvmf_request *req)
{ {
int rc; int rc;
struct spdk_nvmf_rdma_request *rdma_req = get_rdma_req(req); struct spdk_nvmf_rdma_request *rdma_req = get_rdma_req(req);
struct spdk_nvmf_conn *conn = req->conn; struct spdk_nvmf_qpair *qpair = req->qpair;
struct spdk_nvmf_rdma_conn *rdma_conn = get_rdma_conn(conn); struct spdk_nvmf_rdma_qpair *rdma_qpair = get_rdma_qpair(qpair);
struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
struct ibv_recv_wr *bad_recv_wr = NULL; struct ibv_recv_wr *bad_recv_wr = NULL;
struct ibv_send_wr *send_wr, *bad_send_wr = NULL; struct ibv_send_wr *send_wr, *bad_send_wr = NULL;
/* Advance our sq_head pointer */ /* Advance our sq_head pointer */
if (conn->sq_head == conn->sq_head_max) { if (qpair->sq_head == qpair->sq_head_max) {
conn->sq_head = 0; qpair->sq_head = 0;
} else { } else {
conn->sq_head++; qpair->sq_head++;
} }
rsp->sqhd = conn->sq_head; rsp->sqhd = qpair->sq_head;
/* Post the capsule to the recv buffer */ /* Post the capsule to the recv buffer */
assert(rdma_req->recv != NULL); assert(rdma_req->recv != NULL);
@ -469,8 +470,8 @@ request_transfer_out(struct spdk_nvmf_request *req)
rdma_req->recv->in_use = false; rdma_req->recv->in_use = false;
#endif #endif
SPDK_TRACELOG(SPDK_TRACE_RDMA, "RDMA RECV POSTED. Recv: %p Connection: %p\n", rdma_req->recv, SPDK_TRACELOG(SPDK_TRACE_RDMA, "RDMA RECV POSTED. Recv: %p Connection: %p\n", rdma_req->recv,
rdma_conn); rdma_qpair);
rc = ibv_post_recv(rdma_conn->cm_id->qp, &rdma_req->recv->wr, &bad_recv_wr); rc = ibv_post_recv(rdma_qpair->cm_id->qp, &rdma_req->recv->wr, &bad_recv_wr);
if (rc) { if (rc) {
SPDK_ERRLOG("Unable to re-post rx descriptor\n"); SPDK_ERRLOG("Unable to re-post rx descriptor\n");
return rc; return rc;
@ -485,21 +486,21 @@ request_transfer_out(struct spdk_nvmf_request *req)
if (rsp->status.sc == SPDK_NVME_SC_SUCCESS && if (rsp->status.sc == SPDK_NVME_SC_SUCCESS &&
req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) { req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
SPDK_TRACELOG(SPDK_TRACE_RDMA, "RDMA WRITE POSTED. Request: %p Connection: %p\n", req, conn); SPDK_TRACELOG(SPDK_TRACE_RDMA, "RDMA WRITE POSTED. Request: %p Connection: %p\n", req, qpair);
spdk_trace_record(TRACE_RDMA_WRITE_START, 0, 0, (uintptr_t)req, 0); spdk_trace_record(TRACE_RDMA_WRITE_START, 0, 0, (uintptr_t)req, 0);
rdma_conn->cur_rdma_rw_depth++; rdma_qpair->cur_rdma_rw_depth++;
rdma_req->data.wr.opcode = IBV_WR_RDMA_WRITE; rdma_req->data.wr.opcode = IBV_WR_RDMA_WRITE;
rdma_req->data.wr.next = send_wr; rdma_req->data.wr.next = send_wr;
send_wr = &rdma_req->data.wr; send_wr = &rdma_req->data.wr;
} }
SPDK_TRACELOG(SPDK_TRACE_RDMA, "RDMA SEND POSTED. Request: %p Connection: %p\n", req, conn); SPDK_TRACELOG(SPDK_TRACE_RDMA, "RDMA SEND POSTED. Request: %p Connection: %p\n", req, qpair);
spdk_trace_record(TRACE_NVMF_IO_COMPLETE, 0, 0, (uintptr_t)req, 0); spdk_trace_record(TRACE_NVMF_IO_COMPLETE, 0, 0, (uintptr_t)req, 0);
/* Send the completion */ /* Send the completion */
rc = ibv_post_send(rdma_conn->cm_id->qp, send_wr, &bad_send_wr); rc = ibv_post_send(rdma_qpair->cm_id->qp, send_wr, &bad_send_wr);
if (rc) { if (rc) {
SPDK_ERRLOG("Unable to send response capsule\n"); SPDK_ERRLOG("Unable to send response capsule\n");
} }
@ -511,22 +512,22 @@ static int
spdk_nvmf_rdma_request_transfer_data(struct spdk_nvmf_request *req) spdk_nvmf_rdma_request_transfer_data(struct spdk_nvmf_request *req)
{ {
struct spdk_nvmf_rdma_request *rdma_req = get_rdma_req(req); struct spdk_nvmf_rdma_request *rdma_req = get_rdma_req(req);
struct spdk_nvmf_conn *conn = req->conn; struct spdk_nvmf_qpair *qpair = req->qpair;
struct spdk_nvmf_rdma_conn *rdma_conn = get_rdma_conn(conn); struct spdk_nvmf_rdma_qpair *rdma_qpair = get_rdma_qpair(qpair);
if (req->xfer == SPDK_NVME_DATA_NONE) { if (req->xfer == SPDK_NVME_DATA_NONE) {
/* If no data transfer, this can bypass the queue */ /* If no data transfer, this can bypass the queue */
return request_transfer_out(req); return request_transfer_out(req);
} }
if (rdma_conn->cur_rdma_rw_depth < rdma_conn->max_rw_depth) { if (rdma_qpair->cur_rdma_rw_depth < rdma_qpair->max_rw_depth) {
if (req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) { if (req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
return request_transfer_out(req); return request_transfer_out(req);
} else if (req->xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) { } else if (req->xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
return request_transfer_in(req); return request_transfer_in(req);
} }
} else { } else {
TAILQ_INSERT_TAIL(&rdma_conn->pending_rdma_rw_queue, rdma_req, link); TAILQ_INSERT_TAIL(&rdma_qpair->pending_rdma_rw_queue, rdma_req, link);
} }
return 0; return 0;
@ -535,7 +536,7 @@ spdk_nvmf_rdma_request_transfer_data(struct spdk_nvmf_request *req)
static int static int
nvmf_rdma_connect(struct rdma_cm_event *event) nvmf_rdma_connect(struct rdma_cm_event *event)
{ {
struct spdk_nvmf_rdma_conn *rdma_conn = NULL; struct spdk_nvmf_rdma_qpair *rdma_qpair = NULL;
struct spdk_nvmf_rdma_listen_addr *addr; struct spdk_nvmf_rdma_listen_addr *addr;
struct rdma_conn_param *rdma_param = NULL; struct rdma_conn_param *rdma_param = NULL;
struct rdma_conn_param ctrlr_event_data; struct rdma_conn_param ctrlr_event_data;
@ -612,9 +613,9 @@ nvmf_rdma_connect(struct rdma_cm_event *event)
max_queue_depth, max_rw_depth); max_queue_depth, max_rw_depth);
/* Init the NVMf rdma transport connection */ /* Init the NVMf rdma transport connection */
rdma_conn = spdk_nvmf_rdma_conn_create(event->id, addr->comp_channel, max_queue_depth, rdma_qpair = spdk_nvmf_rdma_qpair_create(event->id, addr->comp_channel, max_queue_depth,
max_rw_depth, subsystem_id); max_rw_depth, subsystem_id);
if (rdma_conn == NULL) { if (rdma_qpair == NULL) {
SPDK_ERRLOG("Error on nvmf connection creation\n"); SPDK_ERRLOG("Error on nvmf connection creation\n");
goto err1; goto err1;
} }
@ -638,12 +639,12 @@ nvmf_rdma_connect(struct rdma_cm_event *event)
/* Add this RDMA connection to the global list until a CONNECT capsule /* Add this RDMA connection to the global list until a CONNECT capsule
* is received. */ * is received. */
TAILQ_INSERT_TAIL(&g_pending_conns, rdma_conn, link); TAILQ_INSERT_TAIL(&g_pending_conns, rdma_qpair, link);
return 0; return 0;
err2: err2:
spdk_nvmf_rdma_conn_destroy(rdma_conn); spdk_nvmf_rdma_qpair_destroy(rdma_qpair);
err1: { err1: {
struct spdk_nvmf_rdma_reject_private_data rej_data; struct spdk_nvmf_rdma_reject_private_data rej_data;
@ -658,38 +659,38 @@ err0:
static int static int
nvmf_rdma_disconnect(struct rdma_cm_event *evt) nvmf_rdma_disconnect(struct rdma_cm_event *evt)
{ {
struct spdk_nvmf_conn *conn; struct spdk_nvmf_qpair *qpair;
struct spdk_nvmf_ctrlr *ctrlr; struct spdk_nvmf_ctrlr *ctrlr;
struct spdk_nvmf_subsystem *subsystem; struct spdk_nvmf_subsystem *subsystem;
struct spdk_nvmf_rdma_conn *rdma_conn; struct spdk_nvmf_rdma_qpair *rdma_qpair;
if (evt->id == NULL) { if (evt->id == NULL) {
SPDK_ERRLOG("disconnect request: missing cm_id\n"); SPDK_ERRLOG("disconnect request: missing cm_id\n");
return -1; return -1;
} }
conn = evt->id->context; qpair = evt->id->context;
if (conn == NULL) { if (qpair == NULL) {
SPDK_ERRLOG("disconnect request: no active connection\n"); SPDK_ERRLOG("disconnect request: no active connection\n");
return -1; return -1;
} }
/* ack the disconnect event before rdma_destroy_id */ /* ack the disconnect event before rdma_destroy_id */
rdma_ack_cm_event(evt); rdma_ack_cm_event(evt);
rdma_conn = get_rdma_conn(conn); rdma_qpair = get_rdma_qpair(qpair);
ctrlr = conn->ctrlr; ctrlr = qpair->ctrlr;
if (ctrlr == NULL) { if (ctrlr == NULL) {
/* No ctrlr has been established yet. That means the conn /* No ctrlr has been established yet. That means the qpair
* must be in the pending connections list. Remove it. */ * must be in the pending connections list. Remove it. */
TAILQ_REMOVE(&g_pending_conns, rdma_conn, link); TAILQ_REMOVE(&g_pending_conns, rdma_qpair, link);
spdk_nvmf_rdma_conn_destroy(rdma_conn); spdk_nvmf_rdma_qpair_destroy(rdma_qpair);
return 0; return 0;
} }
subsystem = ctrlr->subsys; subsystem = ctrlr->subsys;
subsystem->disconnect_cb(subsystem->cb_ctx, conn); subsystem->disconnect_cb(subsystem->cb_ctx, qpair);
return 0; return 0;
} }
@ -738,7 +739,7 @@ spdk_nvmf_request_prep_data(struct spdk_nvmf_request *req)
req->xfer = spdk_nvme_opc_get_data_transfer(req->cmd->nvmf_cmd.fctype); req->xfer = spdk_nvme_opc_get_data_transfer(req->cmd->nvmf_cmd.fctype);
} else { } else {
req->xfer = spdk_nvme_opc_get_data_transfer(cmd->opc); req->xfer = spdk_nvme_opc_get_data_transfer(cmd->opc);
if ((req->conn->type == CONN_TYPE_AQ) && if ((req->qpair->type == QPAIR_TYPE_AQ) &&
((cmd->opc == SPDK_NVME_OPC_GET_FEATURES) || ((cmd->opc == SPDK_NVME_OPC_GET_FEATURES) ||
(cmd->opc == SPDK_NVME_OPC_SET_FEATURES))) { (cmd->opc == SPDK_NVME_OPC_SET_FEATURES))) {
switch (cmd->cdw10 & 0xff) { switch (cmd->cdw10 & 0xff) {
@ -779,7 +780,7 @@ spdk_nvmf_request_prep_data(struct spdk_nvmf_request *req)
rdma_req->data.wr.wr.rdma.rkey = sgl->keyed.key; rdma_req->data.wr.wr.rdma.rkey = sgl->keyed.key;
rdma_req->data.wr.wr.rdma.remote_addr = sgl->address; rdma_req->data.wr.wr.rdma.remote_addr = sgl->address;
rdma_ctrlr = get_rdma_ctrlr(req->conn->ctrlr); rdma_ctrlr = get_rdma_ctrlr(req->qpair->ctrlr);
if (!rdma_ctrlr) { if (!rdma_ctrlr) {
/* The only time a connection won't have a ctrlr /* The only time a connection won't have a ctrlr
* is when this is the CONNECT request. * is when this is the CONNECT request.
@ -791,7 +792,7 @@ spdk_nvmf_request_prep_data(struct spdk_nvmf_request *req)
/* Use the in capsule data buffer, even though this isn't in capsule data. */ /* Use the in capsule data buffer, even though this isn't in capsule data. */
SPDK_TRACELOG(SPDK_TRACE_RDMA, "Request using in capsule buffer for non-capsule data\n"); SPDK_TRACELOG(SPDK_TRACE_RDMA, "Request using in capsule buffer for non-capsule data\n");
req->data = rdma_req->recv->buf; req->data = rdma_req->recv->buf;
rdma_req->data.sgl[0].lkey = get_rdma_conn(req->conn)->bufs_mr->lkey; rdma_req->data.sgl[0].lkey = get_rdma_qpair(req->qpair)->bufs_mr->lkey;
rdma_req->data_from_pool = false; rdma_req->data_from_pool = false;
} else { } else {
req->data = SLIST_FIRST(&rdma_ctrlr->data_buf_pool); req->data = SLIST_FIRST(&rdma_ctrlr->data_buf_pool);
@ -857,18 +858,18 @@ spdk_nvmf_request_prep_data(struct spdk_nvmf_request *req)
} }
static int static int
spdk_nvmf_rdma_handle_pending_rdma_rw(struct spdk_nvmf_conn *conn) spdk_nvmf_rdma_handle_pending_rdma_rw(struct spdk_nvmf_qpair *qpair)
{ {
struct spdk_nvmf_rdma_conn *rdma_conn = get_rdma_conn(conn); struct spdk_nvmf_rdma_qpair *rdma_qpair = get_rdma_qpair(qpair);
struct spdk_nvmf_rdma_ctrlr *rdma_ctrlr; struct spdk_nvmf_rdma_ctrlr *rdma_ctrlr;
struct spdk_nvmf_rdma_request *rdma_req, *tmp; struct spdk_nvmf_rdma_request *rdma_req, *tmp;
int rc; int rc;
int count = 0; int count = 0;
/* First, try to assign free data buffers to requests that need one */ /* First, try to assign free data buffers to requests that need one */
if (conn->ctrlr) { if (qpair->ctrlr) {
rdma_ctrlr = get_rdma_ctrlr(conn->ctrlr); rdma_ctrlr = get_rdma_ctrlr(qpair->ctrlr);
TAILQ_FOREACH_SAFE(rdma_req, &rdma_conn->pending_data_buf_queue, link, tmp) { TAILQ_FOREACH_SAFE(rdma_req, &rdma_qpair->pending_data_buf_queue, link, tmp) {
assert(rdma_req->req.data == NULL); assert(rdma_req->req.data == NULL);
rdma_req->req.data = SLIST_FIRST(&rdma_ctrlr->data_buf_pool); rdma_req->req.data = SLIST_FIRST(&rdma_ctrlr->data_buf_pool);
if (!rdma_req->req.data) { if (!rdma_req->req.data) {
@ -876,9 +877,9 @@ spdk_nvmf_rdma_handle_pending_rdma_rw(struct spdk_nvmf_conn *conn)
} }
SLIST_REMOVE_HEAD(&rdma_ctrlr->data_buf_pool, link); SLIST_REMOVE_HEAD(&rdma_ctrlr->data_buf_pool, link);
rdma_req->data.sgl[0].addr = (uintptr_t)rdma_req->req.data; rdma_req->data.sgl[0].addr = (uintptr_t)rdma_req->req.data;
TAILQ_REMOVE(&rdma_conn->pending_data_buf_queue, rdma_req, link); TAILQ_REMOVE(&rdma_qpair->pending_data_buf_queue, rdma_req, link);
if (rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) { if (rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
TAILQ_INSERT_TAIL(&rdma_conn->pending_rdma_rw_queue, rdma_req, link); TAILQ_INSERT_TAIL(&rdma_qpair->pending_rdma_rw_queue, rdma_req, link);
} else { } else {
rc = spdk_nvmf_request_exec(&rdma_req->req); rc = spdk_nvmf_request_exec(&rdma_req->req);
if (rc < 0) { if (rc < 0) {
@ -890,13 +891,13 @@ spdk_nvmf_rdma_handle_pending_rdma_rw(struct spdk_nvmf_conn *conn)
} }
/* Try to initiate RDMA Reads or Writes on requests that have data buffers */ /* Try to initiate RDMA Reads or Writes on requests that have data buffers */
while (rdma_conn->cur_rdma_rw_depth < rdma_conn->max_rw_depth) { while (rdma_qpair->cur_rdma_rw_depth < rdma_qpair->max_rw_depth) {
rdma_req = TAILQ_FIRST(&rdma_conn->pending_rdma_rw_queue); rdma_req = TAILQ_FIRST(&rdma_qpair->pending_rdma_rw_queue);
if (spdk_unlikely(!rdma_req)) { if (spdk_unlikely(!rdma_req)) {
break; break;
} }
TAILQ_REMOVE(&rdma_conn->pending_rdma_rw_queue, rdma_req, link); TAILQ_REMOVE(&rdma_qpair->pending_rdma_rw_queue, rdma_req, link);
SPDK_TRACELOG(SPDK_TRACE_RDMA, "Submitting previously queued for RDMA R/W request %p\n", rdma_req); SPDK_TRACELOG(SPDK_TRACE_RDMA, "Submitting previously queued for RDMA R/W request %p\n", rdma_req);
@ -990,7 +991,7 @@ spdk_nvmf_rdma_listen_remove(struct spdk_nvmf_listen_addr *listen_addr)
} }
static int static int
spdk_nvmf_rdma_poll(struct spdk_nvmf_conn *conn); spdk_nvmf_rdma_poll(struct spdk_nvmf_qpair *qpair);
static void static void
spdk_nvmf_rdma_addr_listen_init(struct spdk_nvmf_rdma_listen_addr *addr) spdk_nvmf_rdma_addr_listen_init(struct spdk_nvmf_rdma_listen_addr *addr)
@ -1020,7 +1021,7 @@ spdk_nvmf_rdma_acceptor_poll(void)
{ {
struct rdma_cm_event *event; struct rdma_cm_event *event;
int rc; int rc;
struct spdk_nvmf_rdma_conn *rdma_conn, *tmp; struct spdk_nvmf_rdma_qpair *rdma_qpair, *tmp;
struct spdk_nvmf_rdma_listen_addr *addr = NULL, *addr_tmp; struct spdk_nvmf_rdma_listen_addr *addr = NULL, *addr_tmp;
if (g_rdma.event_channel == NULL) { if (g_rdma.event_channel == NULL) {
@ -1037,15 +1038,15 @@ spdk_nvmf_rdma_acceptor_poll(void)
/* Process pending connections for incoming capsules. The only capsule /* Process pending connections for incoming capsules. The only capsule
* this should ever find is a CONNECT request. */ * this should ever find is a CONNECT request. */
TAILQ_FOREACH_SAFE(rdma_conn, &g_pending_conns, link, tmp) { TAILQ_FOREACH_SAFE(rdma_qpair, &g_pending_conns, link, tmp) {
rc = spdk_nvmf_rdma_poll(&rdma_conn->conn); rc = spdk_nvmf_rdma_poll(&rdma_qpair->qpair);
if (rc < 0) { if (rc < 0) {
TAILQ_REMOVE(&g_pending_conns, rdma_conn, link); TAILQ_REMOVE(&g_pending_conns, rdma_qpair, link);
spdk_nvmf_rdma_conn_destroy(rdma_conn); spdk_nvmf_rdma_qpair_destroy(rdma_qpair);
} else if (rc > 0) { } else if (rc > 0) {
/* At least one request was processed which is assumed to be /* At least one request was processed which is assumed to be
* a CONNECT. Remove this connection from our list. */ * a CONNECT. Remove this connection from our list. */
TAILQ_REMOVE(&g_pending_conns, rdma_conn, link); TAILQ_REMOVE(&g_pending_conns, rdma_qpair, link);
} }
} }
@ -1241,14 +1242,14 @@ spdk_nvmf_rdma_ctrlr_fini(struct spdk_nvmf_ctrlr *ctrlr)
} }
static int static int
spdk_nvmf_rdma_ctrlr_add_conn(struct spdk_nvmf_ctrlr *ctrlr, spdk_nvmf_rdma_ctrlr_add_qpair(struct spdk_nvmf_ctrlr *ctrlr,
struct spdk_nvmf_conn *conn) struct spdk_nvmf_qpair *qpair)
{ {
struct spdk_nvmf_rdma_ctrlr *rdma_ctrlr = get_rdma_ctrlr(ctrlr); struct spdk_nvmf_rdma_ctrlr *rdma_ctrlr = get_rdma_ctrlr(ctrlr);
struct spdk_nvmf_rdma_conn *rdma_conn = get_rdma_conn(conn); struct spdk_nvmf_rdma_qpair *rdma_qpair = get_rdma_qpair(qpair);
if (rdma_ctrlr->verbs != NULL) { if (rdma_ctrlr->verbs != NULL) {
if (rdma_ctrlr->verbs != rdma_conn->cm_id->verbs) { if (rdma_ctrlr->verbs != rdma_qpair->cm_id->verbs) {
SPDK_ERRLOG("Two connections belonging to the same ctrlr cannot connect using different RDMA devices.\n"); SPDK_ERRLOG("Two connections belonging to the same ctrlr cannot connect using different RDMA devices.\n");
return -1; return -1;
} }
@ -1257,8 +1258,8 @@ spdk_nvmf_rdma_ctrlr_add_conn(struct spdk_nvmf_ctrlr *ctrlr,
return 0; return 0;
} }
rdma_ctrlr->verbs = rdma_conn->cm_id->verbs; rdma_ctrlr->verbs = rdma_qpair->cm_id->verbs;
rdma_ctrlr->buf_mr = ibv_reg_mr(rdma_conn->cm_id->pd, rdma_ctrlr->buf, rdma_ctrlr->buf_mr = ibv_reg_mr(rdma_qpair->cm_id->pd, rdma_ctrlr->buf,
g_rdma.max_queue_depth * g_rdma.max_io_size, g_rdma.max_queue_depth * g_rdma.max_io_size,
IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_LOCAL_WRITE |
IBV_ACCESS_REMOTE_WRITE); IBV_ACCESS_REMOTE_WRITE);
@ -1277,8 +1278,8 @@ spdk_nvmf_rdma_ctrlr_add_conn(struct spdk_nvmf_ctrlr *ctrlr,
} }
static int static int
spdk_nvmf_rdma_ctrlr_remove_conn(struct spdk_nvmf_ctrlr *ctrlr, spdk_nvmf_rdma_ctrlr_remove_qpair(struct spdk_nvmf_ctrlr *ctrlr,
struct spdk_nvmf_conn *conn) struct spdk_nvmf_qpair *qpair)
{ {
return 0; return 0;
} }
@ -1303,13 +1304,13 @@ static void
request_release_buffer(struct spdk_nvmf_request *req) request_release_buffer(struct spdk_nvmf_request *req)
{ {
struct spdk_nvmf_rdma_request *rdma_req = get_rdma_req(req); struct spdk_nvmf_rdma_request *rdma_req = get_rdma_req(req);
struct spdk_nvmf_conn *conn = req->conn; struct spdk_nvmf_qpair *qpair = req->qpair;
struct spdk_nvmf_rdma_ctrlr *rdma_ctrlr; struct spdk_nvmf_rdma_ctrlr *rdma_ctrlr;
struct spdk_nvmf_rdma_buf *buf; struct spdk_nvmf_rdma_buf *buf;
if (rdma_req->data_from_pool) { if (rdma_req->data_from_pool) {
/* Put the buffer back in the pool */ /* Put the buffer back in the pool */
rdma_ctrlr = get_rdma_ctrlr(conn->ctrlr); rdma_ctrlr = get_rdma_ctrlr(qpair->ctrlr);
buf = req->data; buf = req->data;
SLIST_INSERT_HEAD(&rdma_ctrlr->data_buf_pool, buf, link); SLIST_INSERT_HEAD(&rdma_ctrlr->data_buf_pool, buf, link);
@ -1320,13 +1321,13 @@ request_release_buffer(struct spdk_nvmf_request *req)
} }
static void static void
spdk_nvmf_rdma_close_conn(struct spdk_nvmf_conn *conn) spdk_nvmf_rdma_close_qpair(struct spdk_nvmf_qpair *qpair)
{ {
spdk_nvmf_rdma_conn_destroy(get_rdma_conn(conn)); spdk_nvmf_rdma_qpair_destroy(get_rdma_qpair(qpair));
} }
static int static int
process_incoming_queue(struct spdk_nvmf_rdma_conn *rdma_conn) process_incoming_queue(struct spdk_nvmf_rdma_qpair *rdma_qpair)
{ {
struct spdk_nvmf_rdma_recv *rdma_recv, *tmp; struct spdk_nvmf_rdma_recv *rdma_recv, *tmp;
struct spdk_nvmf_rdma_request *rdma_req; struct spdk_nvmf_rdma_request *rdma_req;
@ -1335,14 +1336,14 @@ process_incoming_queue(struct spdk_nvmf_rdma_conn *rdma_conn)
bool error = false; bool error = false;
count = 0; count = 0;
TAILQ_FOREACH_SAFE(rdma_recv, &rdma_conn->incoming_queue, link, tmp) { TAILQ_FOREACH_SAFE(rdma_recv, &rdma_qpair->incoming_queue, link, tmp) {
rdma_req = TAILQ_FIRST(&rdma_conn->free_queue); rdma_req = TAILQ_FIRST(&rdma_qpair->free_queue);
if (rdma_req == NULL) { if (rdma_req == NULL) {
/* Need to wait for more SEND completions */ /* Need to wait for more SEND completions */
break; break;
} }
TAILQ_REMOVE(&rdma_conn->free_queue, rdma_req, link); TAILQ_REMOVE(&rdma_qpair->free_queue, rdma_req, link);
TAILQ_REMOVE(&rdma_conn->incoming_queue, rdma_recv, link); TAILQ_REMOVE(&rdma_qpair->incoming_queue, rdma_recv, link);
rdma_req->recv = rdma_recv; rdma_req->recv = rdma_recv;
req = &rdma_req->req; req = &rdma_req->req;
@ -1366,7 +1367,7 @@ process_incoming_queue(struct spdk_nvmf_rdma_conn *rdma_conn)
break; break;
case SPDK_NVMF_REQUEST_PREP_PENDING_BUFFER: case SPDK_NVMF_REQUEST_PREP_PENDING_BUFFER:
SPDK_TRACELOG(SPDK_TRACE_RDMA, "Request %p needs data buffer\n", req); SPDK_TRACELOG(SPDK_TRACE_RDMA, "Request %p needs data buffer\n", req);
TAILQ_INSERT_TAIL(&rdma_conn->pending_data_buf_queue, rdma_req, link); TAILQ_INSERT_TAIL(&rdma_qpair->pending_data_buf_queue, rdma_req, link);
break; break;
case SPDK_NVMF_REQUEST_PREP_PENDING_DATA: case SPDK_NVMF_REQUEST_PREP_PENDING_DATA:
SPDK_TRACELOG(SPDK_TRACE_RDMA, "Request %p needs data transfer\n", req); SPDK_TRACELOG(SPDK_TRACE_RDMA, "Request %p needs data transfer\n", req);
@ -1390,21 +1391,21 @@ process_incoming_queue(struct spdk_nvmf_rdma_conn *rdma_conn)
} }
static struct spdk_nvmf_rdma_request * static struct spdk_nvmf_rdma_request *
get_rdma_req_from_wc(struct spdk_nvmf_rdma_conn *rdma_conn, get_rdma_req_from_wc(struct spdk_nvmf_rdma_qpair *rdma_qpair,
struct ibv_wc *wc) struct ibv_wc *wc)
{ {
struct spdk_nvmf_rdma_request *rdma_req; struct spdk_nvmf_rdma_request *rdma_req;
rdma_req = (struct spdk_nvmf_rdma_request *)wc->wr_id; rdma_req = (struct spdk_nvmf_rdma_request *)wc->wr_id;
assert(rdma_req != NULL); assert(rdma_req != NULL);
assert(rdma_req - rdma_conn->reqs >= 0); assert(rdma_req - rdma_qpair->reqs >= 0);
assert(rdma_req - rdma_conn->reqs < (ptrdiff_t)rdma_conn->max_queue_depth); assert(rdma_req - rdma_qpair->reqs < (ptrdiff_t)rdma_qpair->max_queue_depth);
return rdma_req; return rdma_req;
} }
static struct spdk_nvmf_rdma_recv * static struct spdk_nvmf_rdma_recv *
get_rdma_recv_from_wc(struct spdk_nvmf_rdma_conn *rdma_conn, get_rdma_recv_from_wc(struct spdk_nvmf_rdma_qpair *rdma_qpair,
struct ibv_wc *wc) struct ibv_wc *wc)
{ {
struct spdk_nvmf_rdma_recv *rdma_recv; struct spdk_nvmf_rdma_recv *rdma_recv;
@ -1413,8 +1414,8 @@ get_rdma_recv_from_wc(struct spdk_nvmf_rdma_conn *rdma_conn,
rdma_recv = (struct spdk_nvmf_rdma_recv *)wc->wr_id; rdma_recv = (struct spdk_nvmf_rdma_recv *)wc->wr_id;
assert(rdma_recv != NULL); assert(rdma_recv != NULL);
assert(rdma_recv - rdma_conn->recvs >= 0); assert(rdma_recv - rdma_qpair->recvs >= 0);
assert(rdma_recv - rdma_conn->recvs < (ptrdiff_t)rdma_conn->max_queue_depth); assert(rdma_recv - rdma_qpair->recvs < (ptrdiff_t)rdma_qpair->max_queue_depth);
#ifdef DEBUG #ifdef DEBUG
assert(rdma_recv->in_use == false); assert(rdma_recv->in_use == false);
rdma_recv->in_use = true; rdma_recv->in_use = true;
@ -1427,10 +1428,10 @@ get_rdma_recv_from_wc(struct spdk_nvmf_rdma_conn *rdma_conn,
* or -1 on error. * or -1 on error.
*/ */
static int static int
spdk_nvmf_rdma_poll(struct spdk_nvmf_conn *conn) spdk_nvmf_rdma_poll(struct spdk_nvmf_qpair *qpair)
{ {
struct ibv_wc wc[32]; struct ibv_wc wc[32];
struct spdk_nvmf_rdma_conn *rdma_conn = get_rdma_conn(conn); struct spdk_nvmf_rdma_qpair *rdma_qpair = get_rdma_qpair(qpair);
struct spdk_nvmf_rdma_request *rdma_req; struct spdk_nvmf_rdma_request *rdma_req;
struct spdk_nvmf_rdma_recv *rdma_recv; struct spdk_nvmf_rdma_recv *rdma_recv;
struct spdk_nvmf_request *req; struct spdk_nvmf_request *req;
@ -1439,7 +1440,7 @@ spdk_nvmf_rdma_poll(struct spdk_nvmf_conn *conn)
bool error = false; bool error = false;
/* Poll for completing operations. */ /* Poll for completing operations. */
rc = ibv_poll_cq(rdma_conn->cq, 32, wc); rc = ibv_poll_cq(rdma_qpair->cq, 32, wc);
if (rc < 0) { if (rc < 0) {
SPDK_ERRLOG("Error polling CQ! (%d): %s\n", SPDK_ERRLOG("Error polling CQ! (%d): %s\n",
errno, strerror(errno)); errno, strerror(errno));
@ -1450,30 +1451,30 @@ spdk_nvmf_rdma_poll(struct spdk_nvmf_conn *conn)
for (i = 0; i < reaped; i++) { for (i = 0; i < reaped; i++) {
if (wc[i].status) { if (wc[i].status) {
SPDK_ERRLOG("CQ error on Connection %p, Request 0x%lu (%d): %s\n", SPDK_ERRLOG("CQ error on Connection %p, Request 0x%lu (%d): %s\n",
conn, wc[i].wr_id, wc[i].status, ibv_wc_status_str(wc[i].status)); qpair, wc[i].wr_id, wc[i].status, ibv_wc_status_str(wc[i].status));
error = true; error = true;
continue; continue;
} }
switch (wc[i].opcode) { switch (wc[i].opcode) {
case IBV_WC_SEND: case IBV_WC_SEND:
rdma_req = get_rdma_req_from_wc(rdma_conn, &wc[i]); rdma_req = get_rdma_req_from_wc(rdma_qpair, &wc[i]);
req = &rdma_req->req; req = &rdma_req->req;
assert(rdma_conn->cur_queue_depth > 0); assert(rdma_qpair->cur_queue_depth > 0);
SPDK_TRACELOG(SPDK_TRACE_RDMA, SPDK_TRACELOG(SPDK_TRACE_RDMA,
"RDMA SEND Complete. Request: %p Connection: %p Outstanding I/O: %d\n", "RDMA SEND Complete. Request: %p Connection: %p Outstanding I/O: %d\n",
req, conn, rdma_conn->cur_queue_depth - 1); req, qpair, rdma_qpair->cur_queue_depth - 1);
rdma_conn->cur_queue_depth--; rdma_qpair->cur_queue_depth--;
/* The request may still own a data buffer. Release it */ /* The request may still own a data buffer. Release it */
request_release_buffer(req); request_release_buffer(req);
/* Put the request back on the free list */ /* Put the request back on the free list */
TAILQ_INSERT_TAIL(&rdma_conn->free_queue, rdma_req, link); TAILQ_INSERT_TAIL(&rdma_qpair->free_queue, rdma_req, link);
/* Try to process queued incoming requests */ /* Try to process queued incoming requests */
rc = process_incoming_queue(rdma_conn); rc = process_incoming_queue(rdma_qpair);
if (rc < 0) { if (rc < 0) {
error = true; error = true;
continue; continue;
@ -1482,20 +1483,20 @@ spdk_nvmf_rdma_poll(struct spdk_nvmf_conn *conn)
break; break;
case IBV_WC_RDMA_WRITE: case IBV_WC_RDMA_WRITE:
rdma_req = get_rdma_req_from_wc(rdma_conn, &wc[i]); rdma_req = get_rdma_req_from_wc(rdma_qpair, &wc[i]);
req = &rdma_req->req; req = &rdma_req->req;
SPDK_TRACELOG(SPDK_TRACE_RDMA, "RDMA WRITE Complete. Request: %p Connection: %p\n", SPDK_TRACELOG(SPDK_TRACE_RDMA, "RDMA WRITE Complete. Request: %p Connection: %p\n",
req, conn); req, qpair);
spdk_trace_record(TRACE_RDMA_WRITE_COMPLETE, 0, 0, (uint64_t)req, 0); spdk_trace_record(TRACE_RDMA_WRITE_COMPLETE, 0, 0, (uint64_t)req, 0);
/* Now that the write has completed, the data buffer can be released */ /* Now that the write has completed, the data buffer can be released */
request_release_buffer(req); request_release_buffer(req);
rdma_conn->cur_rdma_rw_depth--; rdma_qpair->cur_rdma_rw_depth--;
/* Since an RDMA R/W operation completed, try to submit from the pending list. */ /* Since an RDMA R/W operation completed, try to submit from the pending list. */
rc = spdk_nvmf_rdma_handle_pending_rdma_rw(conn); rc = spdk_nvmf_rdma_handle_pending_rdma_rw(qpair);
if (rc < 0) { if (rc < 0) {
error = true; error = true;
continue; continue;
@ -1504,11 +1505,11 @@ spdk_nvmf_rdma_poll(struct spdk_nvmf_conn *conn)
break; break;
case IBV_WC_RDMA_READ: case IBV_WC_RDMA_READ:
rdma_req = get_rdma_req_from_wc(rdma_conn, &wc[i]); rdma_req = get_rdma_req_from_wc(rdma_qpair, &wc[i]);
req = &rdma_req->req; req = &rdma_req->req;
SPDK_TRACELOG(SPDK_TRACE_RDMA, "RDMA READ Complete. Request: %p Connection: %p\n", SPDK_TRACELOG(SPDK_TRACE_RDMA, "RDMA READ Complete. Request: %p Connection: %p\n",
req, conn); req, qpair);
spdk_trace_record(TRACE_RDMA_READ_COMPLETE, 0, 0, (uint64_t)req, 0); spdk_trace_record(TRACE_RDMA_READ_COMPLETE, 0, 0, (uint64_t)req, 0);
rc = spdk_nvmf_request_exec(req); rc = spdk_nvmf_request_exec(req);
if (rc) { if (rc) {
@ -1518,8 +1519,8 @@ spdk_nvmf_rdma_poll(struct spdk_nvmf_conn *conn)
count++; count++;
/* Since an RDMA R/W operation completed, try to submit from the pending list. */ /* Since an RDMA R/W operation completed, try to submit from the pending list. */
rdma_conn->cur_rdma_rw_depth--; rdma_qpair->cur_rdma_rw_depth--;
rc = spdk_nvmf_rdma_handle_pending_rdma_rw(conn); rc = spdk_nvmf_rdma_handle_pending_rdma_rw(qpair);
if (rc < 0) { if (rc < 0) {
error = true; error = true;
continue; continue;
@ -1528,20 +1529,20 @@ spdk_nvmf_rdma_poll(struct spdk_nvmf_conn *conn)
break; break;
case IBV_WC_RECV: case IBV_WC_RECV:
rdma_recv = get_rdma_recv_from_wc(rdma_conn, &wc[i]); rdma_recv = get_rdma_recv_from_wc(rdma_qpair, &wc[i]);
rdma_conn->cur_queue_depth++; rdma_qpair->cur_queue_depth++;
if (rdma_conn->cur_queue_depth > rdma_conn->max_queue_depth) { if (rdma_qpair->cur_queue_depth > rdma_qpair->max_queue_depth) {
SPDK_TRACELOG(SPDK_TRACE_RDMA, SPDK_TRACELOG(SPDK_TRACE_RDMA,
"Temporarily exceeded maximum queue depth (%u). Queueing.\n", "Temporarily exceeded maximum queue depth (%u). Queueing.\n",
rdma_conn->cur_queue_depth); rdma_qpair->cur_queue_depth);
} }
SPDK_TRACELOG(SPDK_TRACE_RDMA, SPDK_TRACELOG(SPDK_TRACE_RDMA,
"RDMA RECV Complete. Recv: %p Connection: %p Outstanding I/O: %d\n", "RDMA RECV Complete. Recv: %p Connection: %p Outstanding I/O: %d\n",
rdma_recv, conn, rdma_conn->cur_queue_depth); rdma_recv, qpair, rdma_qpair->cur_queue_depth);
TAILQ_INSERT_TAIL(&rdma_conn->incoming_queue, rdma_recv, link); TAILQ_INSERT_TAIL(&rdma_qpair->incoming_queue, rdma_recv, link);
rc = process_incoming_queue(rdma_conn); rc = process_incoming_queue(rdma_qpair);
if (rc < 0) { if (rc < 0) {
error = true; error = true;
continue; continue;
@ -1564,11 +1565,11 @@ spdk_nvmf_rdma_poll(struct spdk_nvmf_conn *conn)
} }
static bool static bool
spdk_nvmf_rdma_conn_is_idle(struct spdk_nvmf_conn *conn) spdk_nvmf_rdma_qpair_is_idle(struct spdk_nvmf_qpair *qpair)
{ {
struct spdk_nvmf_rdma_conn *rdma_conn = get_rdma_conn(conn); struct spdk_nvmf_rdma_qpair *rdma_qpair = get_rdma_qpair(qpair);
if (rdma_conn->cur_queue_depth == 0 && rdma_conn->cur_rdma_rw_depth == 0) { if (rdma_qpair->cur_queue_depth == 0 && rdma_qpair->cur_rdma_rw_depth == 0) {
return true; return true;
} }
return false; return false;
@ -1587,14 +1588,14 @@ const struct spdk_nvmf_transport spdk_nvmf_transport_rdma = {
.ctrlr_init = spdk_nvmf_rdma_ctrlr_init, .ctrlr_init = spdk_nvmf_rdma_ctrlr_init,
.ctrlr_fini = spdk_nvmf_rdma_ctrlr_fini, .ctrlr_fini = spdk_nvmf_rdma_ctrlr_fini,
.ctrlr_add_conn = spdk_nvmf_rdma_ctrlr_add_conn, .ctrlr_add_qpair = spdk_nvmf_rdma_ctrlr_add_qpair,
.ctrlr_remove_conn = spdk_nvmf_rdma_ctrlr_remove_conn, .ctrlr_remove_qpair = spdk_nvmf_rdma_ctrlr_remove_qpair,
.req_complete = spdk_nvmf_rdma_request_complete, .req_complete = spdk_nvmf_rdma_request_complete,
.conn_fini = spdk_nvmf_rdma_close_conn, .qpair_fini = spdk_nvmf_rdma_close_qpair,
.conn_poll = spdk_nvmf_rdma_poll, .qpair_poll = spdk_nvmf_rdma_poll,
.conn_is_idle = spdk_nvmf_rdma_conn_is_idle, .qpair_is_idle = spdk_nvmf_rdma_qpair_is_idle,
}; };

View File

@ -59,7 +59,7 @@ spdk_nvmf_request_complete(struct spdk_nvmf_request *req)
response->cid, response->cdw0, response->rsvd1, response->cid, response->cdw0, response->rsvd1,
*(uint16_t *)&response->status); *(uint16_t *)&response->status);
if (req->conn->transport->req_complete(req)) { if (req->qpair->transport->req_complete(req)) {
SPDK_ERRLOG("Transport request completion error!\n"); SPDK_ERRLOG("Transport request completion error!\n");
return -1; return -1;
} }
@ -76,7 +76,7 @@ nvmf_process_property_get(struct spdk_nvmf_request *req)
cmd = &req->cmd->prop_get_cmd; cmd = &req->cmd->prop_get_cmd;
response = &req->rsp->prop_get_rsp; response = &req->rsp->prop_get_rsp;
spdk_nvmf_property_get(req->conn->ctrlr, cmd, response); spdk_nvmf_property_get(req->qpair->ctrlr, cmd, response);
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
} }
@ -88,7 +88,7 @@ nvmf_process_property_set(struct spdk_nvmf_request *req)
cmd = &req->cmd->prop_set_cmd; cmd = &req->cmd->prop_set_cmd;
spdk_nvmf_property_set(req->conn->ctrlr, cmd, &req->rsp->nvme_cpl); spdk_nvmf_property_set(req->qpair->ctrlr, cmd, &req->rsp->nvme_cpl);
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
} }
@ -100,9 +100,9 @@ spdk_nvmf_handle_connect(struct spdk_nvmf_request *req)
struct spdk_nvmf_fabric_connect_data *connect_data = (struct spdk_nvmf_fabric_connect_data *) struct spdk_nvmf_fabric_connect_data *connect_data = (struct spdk_nvmf_fabric_connect_data *)
req->data; req->data;
struct spdk_nvmf_fabric_connect_rsp *response = &req->rsp->connect_rsp; struct spdk_nvmf_fabric_connect_rsp *response = &req->rsp->connect_rsp;
struct spdk_nvmf_conn *conn = req->conn; struct spdk_nvmf_qpair *qpair = req->qpair;
spdk_nvmf_ctrlr_connect(conn, connect, connect_data, response); spdk_nvmf_ctrlr_connect(qpair, connect, connect_data, response);
SPDK_TRACELOG(SPDK_TRACE_NVMF, "connect capsule response: cntlid = 0x%04x\n", SPDK_TRACELOG(SPDK_TRACE_NVMF, "connect capsule response: cntlid = 0x%04x\n",
response->status_code_specific.success.cntlid); response->status_code_specific.success.cntlid);
@ -182,12 +182,12 @@ nvmf_process_connect(struct spdk_nvmf_request *req)
static spdk_nvmf_request_exec_status static spdk_nvmf_request_exec_status
nvmf_process_fabrics_command(struct spdk_nvmf_request *req) nvmf_process_fabrics_command(struct spdk_nvmf_request *req)
{ {
struct spdk_nvmf_conn *conn = req->conn; struct spdk_nvmf_qpair *qpair = req->qpair;
struct spdk_nvmf_capsule_cmd *cap_hdr; struct spdk_nvmf_capsule_cmd *cap_hdr;
cap_hdr = &req->cmd->nvmf_cmd; cap_hdr = &req->cmd->nvmf_cmd;
if (conn->ctrlr == NULL) { if (qpair->ctrlr == NULL) {
/* No ctrlr established yet; the only valid command is Connect */ /* No ctrlr established yet; the only valid command is Connect */
if (cap_hdr->fctype == SPDK_NVMF_FABRIC_COMMAND_CONNECT) { if (cap_hdr->fctype == SPDK_NVMF_FABRIC_COMMAND_CONNECT) {
return nvmf_process_connect(req); return nvmf_process_connect(req);
@ -197,7 +197,7 @@ nvmf_process_fabrics_command(struct spdk_nvmf_request *req)
req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR; req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
} }
} else if (conn->type == CONN_TYPE_AQ) { } else if (qpair->type == QPAIR_TYPE_AQ) {
/* /*
* Controller session is established, and this is an admin queue. * Controller session is established, and this is an admin queue.
* Disallow Connect and allow other fabrics commands. * Disallow Connect and allow other fabrics commands.
@ -223,7 +223,7 @@ nvmf_process_fabrics_command(struct spdk_nvmf_request *req)
} }
static void static void
nvmf_trace_command(union nvmf_h2c_msg *h2c_msg, enum conn_type conn_type) nvmf_trace_command(union nvmf_h2c_msg *h2c_msg, enum spdk_nvmf_qpair_type qpair_type)
{ {
struct spdk_nvmf_capsule_cmd *cap_hdr = &h2c_msg->nvmf_cmd; struct spdk_nvmf_capsule_cmd *cap_hdr = &h2c_msg->nvmf_cmd;
struct spdk_nvme_cmd *cmd = &h2c_msg->nvme_cmd; struct spdk_nvme_cmd *cmd = &h2c_msg->nvme_cmd;
@ -233,12 +233,12 @@ nvmf_trace_command(union nvmf_h2c_msg *h2c_msg, enum conn_type conn_type)
if (cmd->opc == SPDK_NVME_OPC_FABRIC) { if (cmd->opc == SPDK_NVME_OPC_FABRIC) {
opc = cap_hdr->fctype; opc = cap_hdr->fctype;
SPDK_TRACELOG(SPDK_TRACE_NVMF, "%s Fabrics cmd: fctype 0x%02x cid %u\n", SPDK_TRACELOG(SPDK_TRACE_NVMF, "%s Fabrics cmd: fctype 0x%02x cid %u\n",
conn_type == CONN_TYPE_AQ ? "Admin" : "I/O", qpair_type == QPAIR_TYPE_AQ ? "Admin" : "I/O",
cap_hdr->fctype, cap_hdr->cid); cap_hdr->fctype, cap_hdr->cid);
} else { } else {
opc = cmd->opc; opc = cmd->opc;
SPDK_TRACELOG(SPDK_TRACE_NVMF, "%s cmd: opc 0x%02x fuse %u cid %u nsid %u cdw10 0x%08x\n", SPDK_TRACELOG(SPDK_TRACE_NVMF, "%s cmd: opc 0x%02x fuse %u cid %u nsid %u cdw10 0x%08x\n",
conn_type == CONN_TYPE_AQ ? "Admin" : "I/O", qpair_type == QPAIR_TYPE_AQ ? "Admin" : "I/O",
cmd->opc, cmd->fuse, cmd->cid, cmd->nsid, cmd->cdw10); cmd->opc, cmd->fuse, cmd->cid, cmd->nsid, cmd->cdw10);
if (cmd->mptr) { if (cmd->mptr) {
SPDK_TRACELOG(SPDK_TRACE_NVMF, "mptr 0x%" PRIx64 "\n", cmd->mptr); SPDK_TRACELOG(SPDK_TRACE_NVMF, "mptr 0x%" PRIx64 "\n", cmd->mptr);
@ -269,12 +269,12 @@ nvmf_trace_command(union nvmf_h2c_msg *h2c_msg, enum conn_type conn_type)
int int
spdk_nvmf_request_exec(struct spdk_nvmf_request *req) spdk_nvmf_request_exec(struct spdk_nvmf_request *req)
{ {
struct spdk_nvmf_ctrlr *ctrlr = req->conn->ctrlr; struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
spdk_nvmf_request_exec_status status; spdk_nvmf_request_exec_status status;
nvmf_trace_command(req->cmd, req->conn->type); nvmf_trace_command(req->cmd, req->qpair->type);
if (cmd->opc == SPDK_NVME_OPC_FABRIC) { if (cmd->opc == SPDK_NVME_OPC_FABRIC) {
status = nvmf_process_fabrics_command(req); status = nvmf_process_fabrics_command(req);
@ -292,7 +292,7 @@ spdk_nvmf_request_exec(struct spdk_nvmf_request *req)
if (subsystem->is_removed) { if (subsystem->is_removed) {
rsp->status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; rsp->status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
status = SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; status = SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
} else if (req->conn->type == CONN_TYPE_AQ) { } else if (req->qpair->type == QPAIR_TYPE_AQ) {
status = subsystem->ops->process_admin_cmd(req); status = subsystem->ops->process_admin_cmd(req);
} else { } else {
status = subsystem->ops->process_io_cmd(req); status = subsystem->ops->process_io_cmd(req);

View File

@ -59,7 +59,7 @@ union nvmf_c2h_msg {
SPDK_STATIC_ASSERT(sizeof(union nvmf_c2h_msg) == 16, "Incorrect size"); SPDK_STATIC_ASSERT(sizeof(union nvmf_c2h_msg) == 16, "Incorrect size");
struct spdk_nvmf_request { struct spdk_nvmf_request {
struct spdk_nvmf_conn *conn; struct spdk_nvmf_qpair *qpair;
uint32_t length; uint32_t length;
enum spdk_nvme_data_transfer xfer; enum spdk_nvme_data_transfer xfer;
void *data; void *data;

View File

@ -112,13 +112,13 @@ spdk_nvmf_subsystem_start(struct spdk_nvmf_subsystem *subsystem)
static bool static bool
nvmf_subsystem_removable(struct spdk_nvmf_subsystem *subsystem) nvmf_subsystem_removable(struct spdk_nvmf_subsystem *subsystem)
{ {
struct spdk_nvmf_ctrlr *ctrlr; struct spdk_nvmf_ctrlr *ctrlr;
struct spdk_nvmf_conn *conn; struct spdk_nvmf_qpair *qpair;
if (subsystem->is_removed) { if (subsystem->is_removed) {
TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) { TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) {
TAILQ_FOREACH(conn, &ctrlr->connections, link) { TAILQ_FOREACH(qpair, &ctrlr->qpairs, link) {
if (!conn->transport->conn_is_idle(conn)) { if (!qpair->transport->qpair_is_idle(qpair)) {
return false; return false;
} }
} }

View File

@ -92,14 +92,14 @@ struct spdk_nvmf_transport {
void (*ctrlr_fini)(struct spdk_nvmf_ctrlr *ctrlr); void (*ctrlr_fini)(struct spdk_nvmf_ctrlr *ctrlr);
/** /**
* Add a connection to a ctrlr * Add a qpair to a ctrlr
*/ */
int (*ctrlr_add_conn)(struct spdk_nvmf_ctrlr *ctrlr, struct spdk_nvmf_conn *conn); int (*ctrlr_add_qpair)(struct spdk_nvmf_ctrlr *ctrlr, struct spdk_nvmf_qpair *qpair);
/** /**
* Remove a connection from a ctrlr * Remove a qpair from a ctrlr
*/ */
int (*ctrlr_remove_conn)(struct spdk_nvmf_ctrlr *ctrlr, struct spdk_nvmf_conn *conn); int (*ctrlr_remove_qpair)(struct spdk_nvmf_ctrlr *ctrlr, struct spdk_nvmf_qpair *qpair);
/* /*
* Signal request completion, which sends a response * Signal request completion, which sends a response
@ -110,17 +110,17 @@ struct spdk_nvmf_transport {
/* /*
* Deinitialize a connection. * Deinitialize a connection.
*/ */
void (*conn_fini)(struct spdk_nvmf_conn *conn); void (*qpair_fini)(struct spdk_nvmf_qpair *qpair);
/* /*
* Poll a connection for events. * Poll a connection for events.
*/ */
int (*conn_poll)(struct spdk_nvmf_conn *conn); int (*qpair_poll)(struct spdk_nvmf_qpair *qpair);
/* /*
* True if the conn has no pending IO. * True if the qpair has no pending IO.
*/ */
bool (*conn_is_idle)(struct spdk_nvmf_conn *conn); bool (*qpair_is_idle)(struct spdk_nvmf_qpair *qpair);
}; };
int spdk_nvmf_transport_init(void); int spdk_nvmf_transport_init(void);

View File

@ -40,14 +40,14 @@
SPDK_LOG_REGISTER_TRACE_FLAG("nvmf", SPDK_TRACE_NVMF) SPDK_LOG_REGISTER_TRACE_FLAG("nvmf", SPDK_TRACE_NVMF)
struct spdk_nvmf_conn * struct spdk_nvmf_qpair *
spdk_nvmf_ctrlr_get_conn(struct spdk_nvmf_ctrlr *ctrlr, uint16_t qid) spdk_nvmf_ctrlr_get_qpair(struct spdk_nvmf_ctrlr *ctrlr, uint16_t qid)
{ {
return NULL; return NULL;
} }
struct spdk_nvmf_request * struct spdk_nvmf_request *
spdk_nvmf_conn_get_request(struct spdk_nvmf_conn *conn, uint16_t cid) spdk_nvmf_qpair_get_request(struct spdk_nvmf_qpair *qpair, uint16_t cid)
{ {
return NULL; return NULL;
} }

View File

@ -149,14 +149,14 @@ test_process_discovery_cmd(void)
int ret; int ret;
/* random request length value for testing */ /* random request length value for testing */
int req_length = 122; int req_length = 122;
struct spdk_nvmf_conn req_conn = {}; struct spdk_nvmf_qpair req_qpair = {};
struct spdk_nvmf_ctrlr req_ctrlr = {}; struct spdk_nvmf_ctrlr req_ctrlr = {};
struct spdk_nvme_ctrlr_data req_data = {}; struct spdk_nvme_ctrlr_data req_data = {};
struct spdk_nvmf_discovery_log_page req_page = {}; struct spdk_nvmf_discovery_log_page req_page = {};
union nvmf_h2c_msg req_cmd = {}; union nvmf_h2c_msg req_cmd = {};
union nvmf_c2h_msg req_rsp = {}; union nvmf_c2h_msg req_rsp = {};
req.conn = &req_conn; req.qpair = &req_qpair;
req.cmd = &req_cmd; req.cmd = &req_cmd;
req.rsp = &req_rsp; req.rsp = &req_rsp;
@ -168,7 +168,7 @@ test_process_discovery_cmd(void)
/* IDENTIFY opcode return value check */ /* IDENTIFY opcode return value check */
req.cmd->nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY; req.cmd->nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY;
req.cmd->nvme_cmd.cdw10 = SPDK_NVME_IDENTIFY_CTRLR; req.cmd->nvme_cmd.cdw10 = SPDK_NVME_IDENTIFY_CTRLR;
req.conn->ctrlr = &req_ctrlr; req.qpair->ctrlr = &req_ctrlr;
req.data = &req_data; req.data = &req_data;
ret = nvmf_discovery_ctrlr_process_admin_cmd(&req); ret = nvmf_discovery_ctrlr_process_admin_cmd(&req);
CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_SUCCESS); CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_SUCCESS);

View File

@ -45,7 +45,7 @@ void spdk_trace_record(uint16_t tpoint_id, uint16_t poller_id, uint32_t size,
} }
void void
spdk_nvmf_ctrlr_connect(struct spdk_nvmf_conn *conn, spdk_nvmf_ctrlr_connect(struct spdk_nvmf_qpair *qpair,
struct spdk_nvmf_fabric_connect_cmd *cmd, struct spdk_nvmf_fabric_connect_cmd *cmd,
struct spdk_nvmf_fabric_connect_data *data, struct spdk_nvmf_fabric_connect_data *data,
struct spdk_nvmf_fabric_connect_rsp *rsp) struct spdk_nvmf_fabric_connect_rsp *rsp)
@ -97,7 +97,7 @@ struct spdk_nvme_ns *spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint3
} }
void void
spdk_nvmf_ctrlr_disconnect(struct spdk_nvmf_conn *conn) spdk_nvmf_ctrlr_disconnect(struct spdk_nvmf_qpair *qpair)
{ {
} }
@ -138,14 +138,14 @@ test_nvmf_process_fabrics_cmd(void)
{ {
struct spdk_nvmf_request req = {}; struct spdk_nvmf_request req = {};
int ret; int ret;
struct spdk_nvmf_conn req_conn = {}; struct spdk_nvmf_qpair req_qpair = {};
union nvmf_h2c_msg req_cmd = {}; union nvmf_h2c_msg req_cmd = {};
union nvmf_c2h_msg req_rsp = {}; union nvmf_c2h_msg req_rsp = {};
req.conn = &req_conn; req.qpair = &req_qpair;
req.cmd = &req_cmd; req.cmd = &req_cmd;
req.rsp = &req_rsp; req.rsp = &req_rsp;
req.conn->ctrlr = NULL; req.qpair->ctrlr = NULL;
/* No ctrlr and invalid command check */ /* No ctrlr and invalid command check */
req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET; req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET;