nvmf: Remove poll group from controller
Now rely entirely on the user to create and poll the poll groups. Change-Id: I66baaa2d0f493390a055a32e6c902f5e2f574534 Signed-off-by: Ben Walker <benjamin.walker@intel.com> Reviewed-on: https://review.gerrithub.io/385954 Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com> Tested-by: SPDK Automated Test System <sys_sgsw@intel.com> Reviewed-by: Changpeng Liu <changpeng.liu@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
parent
2302dc6cdc
commit
8b79ef3372
@ -141,12 +141,41 @@ nvmf_tgt_shutdown_subsystem_by_nqn(const char *nqn)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
nvmf_tgt_poll_group_add(void *arg1, void *arg2)
|
||||||
|
{
|
||||||
|
struct spdk_nvmf_qpair *qpair = arg1;
|
||||||
|
struct nvmf_tgt_poll_group *pg = arg2;
|
||||||
|
|
||||||
|
spdk_nvmf_poll_group_add(pg->group, qpair);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
new_qpair(struct spdk_nvmf_qpair *qpair)
|
||||||
|
{
|
||||||
|
struct spdk_event *event;
|
||||||
|
struct nvmf_tgt_poll_group *pg;
|
||||||
|
uint32_t core;
|
||||||
|
|
||||||
|
core = g_tgt.core;
|
||||||
|
g_tgt.core = spdk_env_get_next_core(core);
|
||||||
|
if (g_tgt.core == UINT32_MAX) {
|
||||||
|
g_tgt.core = spdk_env_get_first_core();
|
||||||
|
}
|
||||||
|
|
||||||
|
pg = &g_poll_groups[core];
|
||||||
|
assert(pg != NULL);
|
||||||
|
|
||||||
|
event = spdk_event_allocate(core, nvmf_tgt_poll_group_add, qpair, pg);
|
||||||
|
spdk_event_call(event);
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
acceptor_poll(void *arg)
|
acceptor_poll(void *arg)
|
||||||
{
|
{
|
||||||
struct spdk_nvmf_tgt *tgt = arg;
|
struct spdk_nvmf_tgt *tgt = arg;
|
||||||
|
|
||||||
spdk_nvmf_tgt_accept(tgt);
|
spdk_nvmf_tgt_accept(tgt, new_qpair);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -221,6 +250,8 @@ nvmf_tgt_advance_state(void *arg1, void *arg2)
|
|||||||
rc = -ENOMEM;
|
rc = -ENOMEM;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
g_tgt.core = spdk_env_get_first_core();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case NVMF_TGT_INIT_PARSE_CONFIG:
|
case NVMF_TGT_INIT_PARSE_CONFIG:
|
||||||
|
@ -77,6 +77,8 @@ struct nvmf_tgt {
|
|||||||
enum nvmf_tgt_state state;
|
enum nvmf_tgt_state state;
|
||||||
|
|
||||||
struct spdk_nvmf_tgt *tgt;
|
struct spdk_nvmf_tgt *tgt;
|
||||||
|
|
||||||
|
uint32_t core; /* Round-robin tracking of cores for qpair assignment */
|
||||||
};
|
};
|
||||||
|
|
||||||
extern struct spdk_nvmf_tgt_conf g_spdk_nvmf_tgt_conf;
|
extern struct spdk_nvmf_tgt_conf g_spdk_nvmf_tgt_conf;
|
||||||
|
@ -99,10 +99,16 @@ void spdk_nvmf_tgt_destroy(struct spdk_nvmf_tgt *tgt);
|
|||||||
int spdk_nvmf_tgt_listen(struct spdk_nvmf_tgt *tgt,
|
int spdk_nvmf_tgt_listen(struct spdk_nvmf_tgt *tgt,
|
||||||
struct spdk_nvme_transport_id *trid);
|
struct spdk_nvme_transport_id *trid);
|
||||||
|
|
||||||
|
typedef void (*new_qpair_fn)(struct spdk_nvmf_qpair *qpair);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Poll the target for incoming connections.
|
* Poll the target for incoming connections.
|
||||||
|
*
|
||||||
|
* The new_qpair_fn cb_fn will be called for each newly discovered
|
||||||
|
* qpair. The user is expected to add that qpair to a poll group
|
||||||
|
* to establish the connection.
|
||||||
*/
|
*/
|
||||||
void spdk_nvmf_tgt_accept(struct spdk_nvmf_tgt *tgt);
|
void spdk_nvmf_tgt_accept(struct spdk_nvmf_tgt *tgt, new_qpair_fn cb_fn);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a poll group.
|
* Create a poll group.
|
||||||
@ -126,7 +132,6 @@ int spdk_nvmf_poll_group_add(struct spdk_nvmf_poll_group *group,
|
|||||||
int spdk_nvmf_poll_group_remove(struct spdk_nvmf_poll_group *group,
|
int spdk_nvmf_poll_group_remove(struct spdk_nvmf_poll_group *group,
|
||||||
struct spdk_nvmf_qpair *qpair);
|
struct spdk_nvmf_qpair *qpair);
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The NVMf subsystem, as indicated in the specification, is a collection
|
* The NVMf subsystem, as indicated in the specification, is a collection
|
||||||
* of controllers. Any individual controller has
|
* of controllers. Any individual controller has
|
||||||
|
@ -73,13 +73,6 @@ spdk_nvmf_ctrlr_create(struct spdk_nvmf_subsystem *subsystem,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
ctrlr->group = spdk_nvmf_poll_group_create(subsystem->tgt);
|
|
||||||
if (ctrlr->group == NULL) {
|
|
||||||
SPDK_ERRLOG("spdk_nvmf_transport_poll_group_create() failed\n");
|
|
||||||
free(ctrlr);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
TAILQ_INIT(&ctrlr->qpairs);
|
TAILQ_INIT(&ctrlr->qpairs);
|
||||||
ctrlr->kato = connect_cmd->kato;
|
ctrlr->kato = connect_cmd->kato;
|
||||||
ctrlr->async_event_config.raw = 0;
|
ctrlr->async_event_config.raw = 0;
|
||||||
@ -89,12 +82,6 @@ spdk_nvmf_ctrlr_create(struct spdk_nvmf_subsystem *subsystem,
|
|||||||
|
|
||||||
memcpy(ctrlr->hostid, connect_data->hostid, sizeof(ctrlr->hostid));
|
memcpy(ctrlr->hostid, connect_data->hostid, sizeof(ctrlr->hostid));
|
||||||
|
|
||||||
if (spdk_nvmf_poll_group_add(ctrlr->group, admin_qpair)) {
|
|
||||||
spdk_nvmf_poll_group_destroy(ctrlr->group);
|
|
||||||
free(ctrlr);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
ctrlr->vcprop.cap.raw = 0;
|
ctrlr->vcprop.cap.raw = 0;
|
||||||
ctrlr->vcprop.cap.bits.cqr = 1; /* NVMe-oF specification required */
|
ctrlr->vcprop.cap.bits.cqr = 1; /* NVMe-oF specification required */
|
||||||
ctrlr->vcprop.cap.bits.mqes = tgt->opts.max_queue_depth - 1; /* max queue depth */
|
ctrlr->vcprop.cap.bits.mqes = tgt->opts.max_queue_depth - 1; /* max queue depth */
|
||||||
@ -123,7 +110,6 @@ spdk_nvmf_ctrlr_create(struct spdk_nvmf_subsystem *subsystem,
|
|||||||
|
|
||||||
if (spdk_nvmf_subsystem_add_ctrlr(subsystem, ctrlr)) {
|
if (spdk_nvmf_subsystem_add_ctrlr(subsystem, ctrlr)) {
|
||||||
SPDK_ERRLOG("Unable to add controller to subsystem\n");
|
SPDK_ERRLOG("Unable to add controller to subsystem\n");
|
||||||
spdk_nvmf_poll_group_destroy(ctrlr->group);
|
|
||||||
free(ctrlr);
|
free(ctrlr);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -134,7 +120,6 @@ spdk_nvmf_ctrlr_create(struct spdk_nvmf_subsystem *subsystem,
|
|||||||
static void ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr)
|
static void ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr)
|
||||||
{
|
{
|
||||||
spdk_nvmf_subsystem_remove_ctrlr(ctrlr->subsys, ctrlr);
|
spdk_nvmf_subsystem_remove_ctrlr(ctrlr->subsys, ctrlr);
|
||||||
spdk_nvmf_poll_group_destroy(ctrlr->group);
|
|
||||||
free(ctrlr);
|
free(ctrlr);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -324,11 +309,6 @@ spdk_nvmf_ctrlr_connect(struct spdk_nvmf_request *req)
|
|||||||
rsp->status.sc = SPDK_NVMF_FABRIC_SC_CONTROLLER_BUSY;
|
rsp->status.sc = SPDK_NVMF_FABRIC_SC_CONTROLLER_BUSY;
|
||||||
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
|
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (spdk_nvmf_poll_group_add(ctrlr->group, qpair)) {
|
|
||||||
SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid);
|
|
||||||
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ctrlr->num_qpairs++;
|
ctrlr->num_qpairs++;
|
||||||
@ -351,7 +331,6 @@ spdk_nvmf_ctrlr_disconnect(struct spdk_nvmf_qpair *qpair)
|
|||||||
ctrlr->num_qpairs--;
|
ctrlr->num_qpairs--;
|
||||||
TAILQ_REMOVE(&ctrlr->qpairs, qpair, link);
|
TAILQ_REMOVE(&ctrlr->qpairs, qpair, link);
|
||||||
|
|
||||||
spdk_nvmf_poll_group_remove(ctrlr->group, qpair);
|
|
||||||
spdk_nvmf_transport_qpair_fini(qpair);
|
spdk_nvmf_transport_qpair_fini(qpair);
|
||||||
|
|
||||||
if (ctrlr->num_qpairs == 0) {
|
if (ctrlr->num_qpairs == 0) {
|
||||||
|
@ -322,12 +322,12 @@ spdk_nvmf_tgt_get_transport(struct spdk_nvmf_tgt *tgt, enum spdk_nvme_transport_
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
spdk_nvmf_tgt_accept(struct spdk_nvmf_tgt *tgt)
|
spdk_nvmf_tgt_accept(struct spdk_nvmf_tgt *tgt, new_qpair_fn cb_fn)
|
||||||
{
|
{
|
||||||
struct spdk_nvmf_transport *transport, *tmp;
|
struct spdk_nvmf_transport *transport, *tmp;
|
||||||
|
|
||||||
TAILQ_FOREACH_SAFE(transport, &tgt->transports, link, tmp) {
|
TAILQ_FOREACH_SAFE(transport, &tgt->transports, link, tmp) {
|
||||||
spdk_nvmf_transport_accept(transport);
|
spdk_nvmf_transport_accept(transport, cb_fn);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -178,7 +178,6 @@ struct spdk_nvmf_ctrlr {
|
|||||||
} async_event_config;
|
} async_event_config;
|
||||||
struct spdk_nvmf_request *aer_req;
|
struct spdk_nvmf_request *aer_req;
|
||||||
uint8_t hostid[16];
|
uint8_t hostid[16];
|
||||||
struct spdk_nvmf_poll_group *group;
|
|
||||||
|
|
||||||
TAILQ_ENTRY(spdk_nvmf_ctrlr) link;
|
TAILQ_ENTRY(spdk_nvmf_ctrlr) link;
|
||||||
};
|
};
|
||||||
|
147
lib/nvmf/rdma.c
147
lib/nvmf/rdma.c
@ -147,6 +147,7 @@ struct spdk_nvmf_rdma_qpair {
|
|||||||
struct spdk_nvmf_qpair qpair;
|
struct spdk_nvmf_qpair qpair;
|
||||||
|
|
||||||
struct spdk_nvmf_rdma_port *port;
|
struct spdk_nvmf_rdma_port *port;
|
||||||
|
struct spdk_nvmf_rdma_poller *poller;
|
||||||
|
|
||||||
struct rdma_cm_id *cm_id;
|
struct rdma_cm_id *cm_id;
|
||||||
struct ibv_cq *cq;
|
struct ibv_cq *cq;
|
||||||
@ -208,9 +209,6 @@ struct spdk_nvmf_rdma_qpair {
|
|||||||
struct spdk_thread *thread;
|
struct spdk_thread *thread;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* List of RDMA connections that have not yet received a CONNECT capsule */
|
|
||||||
static TAILQ_HEAD(, spdk_nvmf_rdma_qpair) g_pending_conns = TAILQ_HEAD_INITIALIZER(g_pending_conns);
|
|
||||||
|
|
||||||
struct spdk_nvmf_rdma_poller {
|
struct spdk_nvmf_rdma_poller {
|
||||||
struct spdk_nvmf_rdma_device *device;
|
struct spdk_nvmf_rdma_device *device;
|
||||||
struct spdk_nvmf_rdma_poll_group *group;
|
struct spdk_nvmf_rdma_poll_group *group;
|
||||||
@ -286,24 +284,13 @@ spdk_nvmf_rdma_mgmt_channel_destroy(void *io_device, void *ctx_buf)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
|
||||||
spdk_nvmf_rdma_qpair_allocate_channel(struct spdk_nvmf_rdma_qpair *rqpair,
|
|
||||||
struct spdk_nvmf_rdma_transport *rtransport)
|
|
||||||
{
|
|
||||||
rqpair->mgmt_channel = spdk_get_io_channel(rtransport);
|
|
||||||
if (!rqpair->mgmt_channel) {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
rqpair->thread = spdk_get_thread();
|
|
||||||
rqpair->ch = spdk_io_channel_get_ctx(rqpair->mgmt_channel);
|
|
||||||
assert(rqpair->ch != NULL);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
static void
|
||||||
spdk_nvmf_rdma_qpair_destroy(struct spdk_nvmf_rdma_qpair *rqpair)
|
spdk_nvmf_rdma_qpair_destroy(struct spdk_nvmf_rdma_qpair *rqpair)
|
||||||
{
|
{
|
||||||
|
if (rqpair->poller) {
|
||||||
|
TAILQ_REMOVE(&rqpair->poller->qpairs, rqpair, link);
|
||||||
|
}
|
||||||
|
|
||||||
if (rqpair->cmds_mr) {
|
if (rqpair->cmds_mr) {
|
||||||
ibv_dereg_mr(rqpair->cmds_mr);
|
ibv_dereg_mr(rqpair->cmds_mr);
|
||||||
}
|
}
|
||||||
@ -616,7 +603,8 @@ spdk_nvmf_rdma_event_reject(struct rdma_cm_id *id, enum spdk_nvmf_rdma_transport
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
nvmf_rdma_connect(struct spdk_nvmf_transport *transport, struct rdma_cm_event *event)
|
nvmf_rdma_connect(struct spdk_nvmf_transport *transport, struct rdma_cm_event *event,
|
||||||
|
new_qpair_fn cb_fn)
|
||||||
{
|
{
|
||||||
struct spdk_nvmf_rdma_transport *rtransport;
|
struct spdk_nvmf_rdma_transport *rtransport;
|
||||||
struct spdk_nvmf_rdma_qpair *rqpair = NULL;
|
struct spdk_nvmf_rdma_qpair *rqpair = NULL;
|
||||||
@ -625,7 +613,6 @@ nvmf_rdma_connect(struct spdk_nvmf_transport *transport, struct rdma_cm_event *e
|
|||||||
const struct spdk_nvmf_rdma_request_private_data *private_data = NULL;
|
const struct spdk_nvmf_rdma_request_private_data *private_data = NULL;
|
||||||
uint16_t max_queue_depth;
|
uint16_t max_queue_depth;
|
||||||
uint16_t max_rw_depth;
|
uint16_t max_rw_depth;
|
||||||
int rc;
|
|
||||||
|
|
||||||
rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
|
rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
|
||||||
|
|
||||||
@ -711,26 +698,7 @@ nvmf_rdma_connect(struct spdk_nvmf_transport *transport, struct rdma_cm_event *e
|
|||||||
|
|
||||||
event->id->context = &rqpair->qpair;
|
event->id->context = &rqpair->qpair;
|
||||||
|
|
||||||
spdk_nvmf_rdma_qpair_initialize(&rqpair->qpair);
|
cb_fn(&rqpair->qpair);
|
||||||
|
|
||||||
rc = spdk_nvmf_rdma_event_accept(rqpair->cm_id, rqpair);
|
|
||||||
if (rc) {
|
|
||||||
/* Try to reject, but we probably can't */
|
|
||||||
spdk_nvmf_rdma_event_reject(event->id, SPDK_NVMF_RDMA_ERROR_NO_RESOURCES);
|
|
||||||
spdk_nvmf_rdma_qpair_destroy(rqpair);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Add this RDMA connection to the global list until a CONNECT capsule
|
|
||||||
* is received. */
|
|
||||||
TAILQ_INSERT_TAIL(&g_pending_conns, rqpair, pending_link);
|
|
||||||
|
|
||||||
rc = spdk_nvmf_rdma_qpair_allocate_channel(rqpair, rtransport);
|
|
||||||
if (rc) {
|
|
||||||
spdk_nvmf_rdma_event_reject(event->id, SPDK_NVMF_RDMA_ERROR_NO_RESOURCES);
|
|
||||||
spdk_nvmf_rdma_qpair_destroy(rqpair);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -738,7 +706,20 @@ nvmf_rdma_connect(struct spdk_nvmf_transport *transport, struct rdma_cm_event *e
|
|||||||
static void
|
static void
|
||||||
nvmf_rdma_handle_disconnect(void *ctx)
|
nvmf_rdma_handle_disconnect(void *ctx)
|
||||||
{
|
{
|
||||||
struct spdk_nvmf_qpair *qpair = ctx;
|
struct spdk_nvmf_qpair *qpair = ctx;
|
||||||
|
struct spdk_nvmf_ctrlr *ctrlr;
|
||||||
|
struct spdk_nvmf_rdma_qpair *rqpair;
|
||||||
|
|
||||||
|
rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
|
||||||
|
|
||||||
|
ctrlr = qpair->ctrlr;
|
||||||
|
if (ctrlr == NULL) {
|
||||||
|
/* No ctrlr has been established yet, so destroy
|
||||||
|
* the connection.
|
||||||
|
*/
|
||||||
|
spdk_nvmf_rdma_qpair_destroy(rqpair);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
spdk_nvmf_ctrlr_disconnect(qpair);
|
spdk_nvmf_ctrlr_disconnect(qpair);
|
||||||
}
|
}
|
||||||
@ -746,11 +727,8 @@ nvmf_rdma_handle_disconnect(void *ctx)
|
|||||||
static int
|
static int
|
||||||
nvmf_rdma_disconnect(struct rdma_cm_event *evt)
|
nvmf_rdma_disconnect(struct rdma_cm_event *evt)
|
||||||
{
|
{
|
||||||
struct spdk_nvmf_qpair *qpair;
|
struct spdk_nvmf_qpair *qpair;
|
||||||
struct spdk_nvmf_ctrlr *ctrlr;
|
struct spdk_io_channel *ch;
|
||||||
struct spdk_nvmf_rdma_qpair *rqpair;
|
|
||||||
struct spdk_nvmf_rdma_qpair *r, *t;
|
|
||||||
struct spdk_io_channel *ch;
|
|
||||||
|
|
||||||
if (evt->id == NULL) {
|
if (evt->id == NULL) {
|
||||||
SPDK_ERRLOG("disconnect request: missing cm_id\n");
|
SPDK_ERRLOG("disconnect request: missing cm_id\n");
|
||||||
@ -765,29 +743,6 @@ nvmf_rdma_disconnect(struct rdma_cm_event *evt)
|
|||||||
/* ack the disconnect event before rdma_destroy_id */
|
/* ack the disconnect event before rdma_destroy_id */
|
||||||
rdma_ack_cm_event(evt);
|
rdma_ack_cm_event(evt);
|
||||||
|
|
||||||
rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
|
|
||||||
|
|
||||||
/* The connection may still be in this pending list when a disconnect
|
|
||||||
* event arrives. Search for it and remove it if it is found.
|
|
||||||
*/
|
|
||||||
TAILQ_FOREACH_SAFE(r, &g_pending_conns, pending_link, t) {
|
|
||||||
if (r == rqpair) {
|
|
||||||
SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Received disconnect for qpair %p before first SEND ack\n",
|
|
||||||
rqpair);
|
|
||||||
TAILQ_REMOVE(&g_pending_conns, rqpair, pending_link);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ctrlr = qpair->ctrlr;
|
|
||||||
if (ctrlr == NULL) {
|
|
||||||
/* No ctrlr has been established yet, so destroy
|
|
||||||
* the connection immediately.
|
|
||||||
*/
|
|
||||||
spdk_nvmf_rdma_qpair_destroy(rqpair);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
ch = spdk_io_channel_from_ctx(qpair->group);
|
ch = spdk_io_channel_from_ctx(qpair->group);
|
||||||
spdk_thread_send_msg(spdk_io_channel_get_thread(ch), nvmf_rdma_handle_disconnect, qpair);
|
spdk_thread_send_msg(spdk_io_channel_get_thread(ch), nvmf_rdma_handle_disconnect, qpair);
|
||||||
|
|
||||||
@ -1443,12 +1398,11 @@ spdk_nvmf_rdma_qpair_poll(struct spdk_nvmf_rdma_transport *rtransport,
|
|||||||
struct spdk_nvmf_rdma_qpair *rqpair);
|
struct spdk_nvmf_rdma_qpair *rqpair);
|
||||||
|
|
||||||
static void
|
static void
|
||||||
spdk_nvmf_rdma_accept(struct spdk_nvmf_transport *transport)
|
spdk_nvmf_rdma_accept(struct spdk_nvmf_transport *transport, new_qpair_fn cb_fn)
|
||||||
{
|
{
|
||||||
struct spdk_nvmf_rdma_transport *rtransport;
|
struct spdk_nvmf_rdma_transport *rtransport;
|
||||||
struct rdma_cm_event *event;
|
struct rdma_cm_event *event;
|
||||||
int rc;
|
int rc;
|
||||||
struct spdk_nvmf_rdma_qpair *rqpair, *tmp;
|
|
||||||
char buf[64];
|
char buf[64];
|
||||||
|
|
||||||
rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
|
rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
|
||||||
@ -1457,22 +1411,6 @@ spdk_nvmf_rdma_accept(struct spdk_nvmf_transport *transport)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Process pending connections for incoming capsules. The only capsule
|
|
||||||
* this should ever find is a CONNECT request. */
|
|
||||||
TAILQ_FOREACH_SAFE(rqpair, &g_pending_conns, pending_link, tmp) {
|
|
||||||
rc = spdk_nvmf_rdma_qpair_poll(rtransport, rqpair);
|
|
||||||
if (rc < 0) {
|
|
||||||
TAILQ_REMOVE(&g_pending_conns, rqpair, pending_link);
|
|
||||||
spdk_nvmf_rdma_qpair_destroy(rqpair);
|
|
||||||
} else if (rc > 0) {
|
|
||||||
spdk_put_io_channel(rqpair->mgmt_channel);
|
|
||||||
rqpair->mgmt_channel = NULL;
|
|
||||||
/* At least one request was processed which is assumed to be
|
|
||||||
* a CONNECT. Remove this connection from our list. */
|
|
||||||
TAILQ_REMOVE(&g_pending_conns, rqpair, pending_link);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
rc = rdma_get_cm_event(rtransport->event_channel, &event);
|
rc = rdma_get_cm_event(rtransport->event_channel, &event);
|
||||||
if (rc == 0) {
|
if (rc == 0) {
|
||||||
@ -1480,7 +1418,7 @@ spdk_nvmf_rdma_accept(struct spdk_nvmf_transport *transport)
|
|||||||
|
|
||||||
switch (event->event) {
|
switch (event->event) {
|
||||||
case RDMA_CM_EVENT_CONNECT_REQUEST:
|
case RDMA_CM_EVENT_CONNECT_REQUEST:
|
||||||
rc = nvmf_rdma_connect(transport, event);
|
rc = nvmf_rdma_connect(transport, event, cb_fn);
|
||||||
if (rc < 0) {
|
if (rc < 0) {
|
||||||
SPDK_ERRLOG("Unable to process connect event. rc: %d\n", rc);
|
SPDK_ERRLOG("Unable to process connect event. rc: %d\n", rc);
|
||||||
break;
|
break;
|
||||||
@ -1604,11 +1542,14 @@ static int
|
|||||||
spdk_nvmf_rdma_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
|
spdk_nvmf_rdma_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
|
||||||
struct spdk_nvmf_qpair *qpair)
|
struct spdk_nvmf_qpair *qpair)
|
||||||
{
|
{
|
||||||
|
struct spdk_nvmf_rdma_transport *rtransport;
|
||||||
struct spdk_nvmf_rdma_poll_group *rgroup;
|
struct spdk_nvmf_rdma_poll_group *rgroup;
|
||||||
struct spdk_nvmf_rdma_qpair *rqpair;
|
struct spdk_nvmf_rdma_qpair *rqpair;
|
||||||
struct spdk_nvmf_rdma_device *device;
|
struct spdk_nvmf_rdma_device *device;
|
||||||
struct spdk_nvmf_rdma_poller *poller;
|
struct spdk_nvmf_rdma_poller *poller;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
rtransport = SPDK_CONTAINEROF(qpair->transport, struct spdk_nvmf_rdma_transport, transport);
|
||||||
rgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_rdma_poll_group, group);
|
rgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_rdma_poll_group, group);
|
||||||
rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
|
rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
|
||||||
|
|
||||||
@ -1631,6 +1572,27 @@ spdk_nvmf_rdma_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
|
|||||||
}
|
}
|
||||||
|
|
||||||
TAILQ_INSERT_TAIL(&poller->qpairs, rqpair, link);
|
TAILQ_INSERT_TAIL(&poller->qpairs, rqpair, link);
|
||||||
|
rqpair->poller = poller;
|
||||||
|
|
||||||
|
spdk_nvmf_rdma_qpair_initialize(qpair);
|
||||||
|
|
||||||
|
rqpair->mgmt_channel = spdk_get_io_channel(rtransport);
|
||||||
|
if (!rqpair->mgmt_channel) {
|
||||||
|
spdk_nvmf_rdma_event_reject(rqpair->cm_id, SPDK_NVMF_RDMA_ERROR_NO_RESOURCES);
|
||||||
|
spdk_nvmf_rdma_qpair_destroy(rqpair);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
rqpair->ch = spdk_io_channel_get_ctx(rqpair->mgmt_channel);
|
||||||
|
assert(rqpair->ch != NULL);
|
||||||
|
|
||||||
|
rc = spdk_nvmf_rdma_event_accept(rqpair->cm_id, rqpair);
|
||||||
|
if (rc) {
|
||||||
|
/* Try to reject, but we probably can't */
|
||||||
|
spdk_nvmf_rdma_event_reject(rqpair->cm_id, SPDK_NVMF_RDMA_ERROR_NO_RESOURCES);
|
||||||
|
spdk_nvmf_rdma_qpair_destroy(rqpair);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1774,15 +1736,6 @@ spdk_nvmf_rdma_qpair_poll(struct spdk_nvmf_rdma_transport *rtransport,
|
|||||||
bool error = false;
|
bool error = false;
|
||||||
char buf[64];
|
char buf[64];
|
||||||
|
|
||||||
/* reset the mgmt_channel and thread info of qpair */
|
|
||||||
if (rqpair->mgmt_channel != NULL) {
|
|
||||||
if (rqpair->thread != spdk_get_thread()) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
} else if (spdk_nvmf_rdma_qpair_allocate_channel(rqpair, rtransport)) {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Poll for completing operations. */
|
/* Poll for completing operations. */
|
||||||
reaped = ibv_poll_cq(rqpair->cq, 32, wc);
|
reaped = ibv_poll_cq(rqpair->cq, 32, wc);
|
||||||
if (reaped < 0) {
|
if (reaped < 0) {
|
||||||
|
@ -69,9 +69,8 @@ spdk_nvmf_request_complete(struct spdk_nvmf_request *req)
|
|||||||
{
|
{
|
||||||
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
|
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
|
||||||
|
|
||||||
if ((cmd->opc == SPDK_NVME_OPC_FABRIC ||
|
if (cmd->opc == SPDK_NVME_OPC_FABRIC ||
|
||||||
req->qpair->type == QPAIR_TYPE_AQ) &&
|
req->qpair->type == QPAIR_TYPE_AQ) {
|
||||||
req->qpair->group) {
|
|
||||||
struct spdk_io_channel *ch;
|
struct spdk_io_channel *ch;
|
||||||
|
|
||||||
ch = spdk_io_channel_from_ctx(req->qpair->group);
|
ch = spdk_io_channel_from_ctx(req->qpair->group);
|
||||||
|
@ -104,9 +104,9 @@ spdk_nvmf_transport_stop_listen(struct spdk_nvmf_transport *transport,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
spdk_nvmf_transport_accept(struct spdk_nvmf_transport *transport)
|
spdk_nvmf_transport_accept(struct spdk_nvmf_transport *transport, new_qpair_fn cb_fn)
|
||||||
{
|
{
|
||||||
transport->ops->accept(transport);
|
transport->ops->accept(transport, cb_fn);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -78,7 +78,7 @@ struct spdk_nvmf_transport_ops {
|
|||||||
/**
|
/**
|
||||||
* Check for new connections on the transport.
|
* Check for new connections on the transport.
|
||||||
*/
|
*/
|
||||||
void (*accept)(struct spdk_nvmf_transport *transport);
|
void (*accept)(struct spdk_nvmf_transport *transport, new_qpair_fn cb_fn);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Fill out a discovery log entry for a specific listen address.
|
* Fill out a discovery log entry for a specific listen address.
|
||||||
@ -141,7 +141,7 @@ int spdk_nvmf_transport_listen(struct spdk_nvmf_transport *transport,
|
|||||||
int spdk_nvmf_transport_stop_listen(struct spdk_nvmf_transport *transport,
|
int spdk_nvmf_transport_stop_listen(struct spdk_nvmf_transport *transport,
|
||||||
const struct spdk_nvme_transport_id *trid);
|
const struct spdk_nvme_transport_id *trid);
|
||||||
|
|
||||||
void spdk_nvmf_transport_accept(struct spdk_nvmf_transport *transport);
|
void spdk_nvmf_transport_accept(struct spdk_nvmf_transport *transport, new_qpair_fn cb_fn);
|
||||||
|
|
||||||
void spdk_nvmf_transport_listener_discover(struct spdk_nvmf_transport *transport,
|
void spdk_nvmf_transport_listener_discover(struct spdk_nvmf_transport *transport,
|
||||||
struct spdk_nvme_transport_id *trid,
|
struct spdk_nvme_transport_id *trid,
|
||||||
|
@ -152,10 +152,6 @@ int spdk_bdev_free_io(struct spdk_bdev_io *bdev_io)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
void spdk_bdev_close(struct spdk_bdev_desc *desc)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
const char *spdk_nvmf_subsystem_get_nqn(struct spdk_nvmf_subsystem *subsystem)
|
const char *spdk_nvmf_subsystem_get_nqn(struct spdk_nvmf_subsystem *subsystem)
|
||||||
{
|
{
|
||||||
return NULL;
|
return NULL;
|
||||||
|
Loading…
Reference in New Issue
Block a user