nvmf: Remove poll group from controller

Now rely entirely on the user to create and poll
the poll groups.

Change-Id: I66baaa2d0f493390a055a32e6c902f5e2f574534
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.gerrithub.io/385954
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Ben Walker 2017-08-30 09:36:33 -07:00
parent 2302dc6cdc
commit 8b79ef3372
11 changed files with 99 additions and 135 deletions

View File

@ -141,12 +141,41 @@ nvmf_tgt_shutdown_subsystem_by_nqn(const char *nqn)
return -1;
}
static void
nvmf_tgt_poll_group_add(void *arg1, void *arg2)
{
struct spdk_nvmf_qpair *qpair = arg1;
struct nvmf_tgt_poll_group *pg = arg2;
spdk_nvmf_poll_group_add(pg->group, qpair);
}
static void
new_qpair(struct spdk_nvmf_qpair *qpair)
{
struct spdk_event *event;
struct nvmf_tgt_poll_group *pg;
uint32_t core;
core = g_tgt.core;
g_tgt.core = spdk_env_get_next_core(core);
if (g_tgt.core == UINT32_MAX) {
g_tgt.core = spdk_env_get_first_core();
}
pg = &g_poll_groups[core];
assert(pg != NULL);
event = spdk_event_allocate(core, nvmf_tgt_poll_group_add, qpair, pg);
spdk_event_call(event);
}
static void
acceptor_poll(void *arg)
{
struct spdk_nvmf_tgt *tgt = arg;
spdk_nvmf_tgt_accept(tgt);
spdk_nvmf_tgt_accept(tgt, new_qpair);
}
static void
@ -221,6 +250,8 @@ nvmf_tgt_advance_state(void *arg1, void *arg2)
rc = -ENOMEM;
break;
}
g_tgt.core = spdk_env_get_first_core();
break;
}
case NVMF_TGT_INIT_PARSE_CONFIG:

View File

@ -77,6 +77,8 @@ struct nvmf_tgt {
enum nvmf_tgt_state state;
struct spdk_nvmf_tgt *tgt;
uint32_t core; /* Round-robin tracking of cores for qpair assignment */
};
extern struct spdk_nvmf_tgt_conf g_spdk_nvmf_tgt_conf;

View File

@ -99,10 +99,16 @@ void spdk_nvmf_tgt_destroy(struct spdk_nvmf_tgt *tgt);
int spdk_nvmf_tgt_listen(struct spdk_nvmf_tgt *tgt,
struct spdk_nvme_transport_id *trid);
typedef void (*new_qpair_fn)(struct spdk_nvmf_qpair *qpair);
/**
* Poll the target for incoming connections.
*
* The new_qpair_fn cb_fn will be called for each newly discovered
* qpair. The user is expected to add that qpair to a poll group
* to establish the connection.
*/
void spdk_nvmf_tgt_accept(struct spdk_nvmf_tgt *tgt);
void spdk_nvmf_tgt_accept(struct spdk_nvmf_tgt *tgt, new_qpair_fn cb_fn);
/**
* Create a poll group.
@ -126,7 +132,6 @@ int spdk_nvmf_poll_group_add(struct spdk_nvmf_poll_group *group,
int spdk_nvmf_poll_group_remove(struct spdk_nvmf_poll_group *group,
struct spdk_nvmf_qpair *qpair);
/*
* The NVMf subsystem, as indicated in the specification, is a collection
* of controllers. Any individual controller has

View File

@ -73,13 +73,6 @@ spdk_nvmf_ctrlr_create(struct spdk_nvmf_subsystem *subsystem,
return NULL;
}
ctrlr->group = spdk_nvmf_poll_group_create(subsystem->tgt);
if (ctrlr->group == NULL) {
SPDK_ERRLOG("spdk_nvmf_transport_poll_group_create() failed\n");
free(ctrlr);
return NULL;
}
TAILQ_INIT(&ctrlr->qpairs);
ctrlr->kato = connect_cmd->kato;
ctrlr->async_event_config.raw = 0;
@ -89,12 +82,6 @@ spdk_nvmf_ctrlr_create(struct spdk_nvmf_subsystem *subsystem,
memcpy(ctrlr->hostid, connect_data->hostid, sizeof(ctrlr->hostid));
if (spdk_nvmf_poll_group_add(ctrlr->group, admin_qpair)) {
spdk_nvmf_poll_group_destroy(ctrlr->group);
free(ctrlr);
return NULL;
}
ctrlr->vcprop.cap.raw = 0;
ctrlr->vcprop.cap.bits.cqr = 1; /* NVMe-oF specification required */
ctrlr->vcprop.cap.bits.mqes = tgt->opts.max_queue_depth - 1; /* max queue depth */
@ -123,7 +110,6 @@ spdk_nvmf_ctrlr_create(struct spdk_nvmf_subsystem *subsystem,
if (spdk_nvmf_subsystem_add_ctrlr(subsystem, ctrlr)) {
SPDK_ERRLOG("Unable to add controller to subsystem\n");
spdk_nvmf_poll_group_destroy(ctrlr->group);
free(ctrlr);
return NULL;
}
@ -134,7 +120,6 @@ spdk_nvmf_ctrlr_create(struct spdk_nvmf_subsystem *subsystem,
static void ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr)
{
spdk_nvmf_subsystem_remove_ctrlr(ctrlr->subsys, ctrlr);
spdk_nvmf_poll_group_destroy(ctrlr->group);
free(ctrlr);
}
@ -324,11 +309,6 @@ spdk_nvmf_ctrlr_connect(struct spdk_nvmf_request *req)
rsp->status.sc = SPDK_NVMF_FABRIC_SC_CONTROLLER_BUSY;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
if (spdk_nvmf_poll_group_add(ctrlr->group, qpair)) {
SPDK_NVMF_INVALID_CONNECT_CMD(rsp, qid);
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
}
ctrlr->num_qpairs++;
@ -351,7 +331,6 @@ spdk_nvmf_ctrlr_disconnect(struct spdk_nvmf_qpair *qpair)
ctrlr->num_qpairs--;
TAILQ_REMOVE(&ctrlr->qpairs, qpair, link);
spdk_nvmf_poll_group_remove(ctrlr->group, qpair);
spdk_nvmf_transport_qpair_fini(qpair);
if (ctrlr->num_qpairs == 0) {

View File

@ -322,12 +322,12 @@ spdk_nvmf_tgt_get_transport(struct spdk_nvmf_tgt *tgt, enum spdk_nvme_transport_
}
void
spdk_nvmf_tgt_accept(struct spdk_nvmf_tgt *tgt)
spdk_nvmf_tgt_accept(struct spdk_nvmf_tgt *tgt, new_qpair_fn cb_fn)
{
struct spdk_nvmf_transport *transport, *tmp;
TAILQ_FOREACH_SAFE(transport, &tgt->transports, link, tmp) {
spdk_nvmf_transport_accept(transport);
spdk_nvmf_transport_accept(transport, cb_fn);
}
}

View File

@ -178,7 +178,6 @@ struct spdk_nvmf_ctrlr {
} async_event_config;
struct spdk_nvmf_request *aer_req;
uint8_t hostid[16];
struct spdk_nvmf_poll_group *group;
TAILQ_ENTRY(spdk_nvmf_ctrlr) link;
};

View File

@ -147,6 +147,7 @@ struct spdk_nvmf_rdma_qpair {
struct spdk_nvmf_qpair qpair;
struct spdk_nvmf_rdma_port *port;
struct spdk_nvmf_rdma_poller *poller;
struct rdma_cm_id *cm_id;
struct ibv_cq *cq;
@ -208,9 +209,6 @@ struct spdk_nvmf_rdma_qpair {
struct spdk_thread *thread;
};
/* List of RDMA connections that have not yet received a CONNECT capsule */
static TAILQ_HEAD(, spdk_nvmf_rdma_qpair) g_pending_conns = TAILQ_HEAD_INITIALIZER(g_pending_conns);
struct spdk_nvmf_rdma_poller {
struct spdk_nvmf_rdma_device *device;
struct spdk_nvmf_rdma_poll_group *group;
@ -286,24 +284,13 @@ spdk_nvmf_rdma_mgmt_channel_destroy(void *io_device, void *ctx_buf)
}
}
static int
spdk_nvmf_rdma_qpair_allocate_channel(struct spdk_nvmf_rdma_qpair *rqpair,
struct spdk_nvmf_rdma_transport *rtransport)
{
rqpair->mgmt_channel = spdk_get_io_channel(rtransport);
if (!rqpair->mgmt_channel) {
return -1;
}
rqpair->thread = spdk_get_thread();
rqpair->ch = spdk_io_channel_get_ctx(rqpair->mgmt_channel);
assert(rqpair->ch != NULL);
return 0;
}
static void
spdk_nvmf_rdma_qpair_destroy(struct spdk_nvmf_rdma_qpair *rqpair)
{
if (rqpair->poller) {
TAILQ_REMOVE(&rqpair->poller->qpairs, rqpair, link);
}
if (rqpair->cmds_mr) {
ibv_dereg_mr(rqpair->cmds_mr);
}
@ -616,7 +603,8 @@ spdk_nvmf_rdma_event_reject(struct rdma_cm_id *id, enum spdk_nvmf_rdma_transport
}
static int
nvmf_rdma_connect(struct spdk_nvmf_transport *transport, struct rdma_cm_event *event)
nvmf_rdma_connect(struct spdk_nvmf_transport *transport, struct rdma_cm_event *event,
new_qpair_fn cb_fn)
{
struct spdk_nvmf_rdma_transport *rtransport;
struct spdk_nvmf_rdma_qpair *rqpair = NULL;
@ -625,7 +613,6 @@ nvmf_rdma_connect(struct spdk_nvmf_transport *transport, struct rdma_cm_event *e
const struct spdk_nvmf_rdma_request_private_data *private_data = NULL;
uint16_t max_queue_depth;
uint16_t max_rw_depth;
int rc;
rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
@ -711,26 +698,7 @@ nvmf_rdma_connect(struct spdk_nvmf_transport *transport, struct rdma_cm_event *e
event->id->context = &rqpair->qpair;
spdk_nvmf_rdma_qpair_initialize(&rqpair->qpair);
rc = spdk_nvmf_rdma_event_accept(rqpair->cm_id, rqpair);
if (rc) {
/* Try to reject, but we probably can't */
spdk_nvmf_rdma_event_reject(event->id, SPDK_NVMF_RDMA_ERROR_NO_RESOURCES);
spdk_nvmf_rdma_qpair_destroy(rqpair);
return -1;
}
/* Add this RDMA connection to the global list until a CONNECT capsule
* is received. */
TAILQ_INSERT_TAIL(&g_pending_conns, rqpair, pending_link);
rc = spdk_nvmf_rdma_qpair_allocate_channel(rqpair, rtransport);
if (rc) {
spdk_nvmf_rdma_event_reject(event->id, SPDK_NVMF_RDMA_ERROR_NO_RESOURCES);
spdk_nvmf_rdma_qpair_destroy(rqpair);
return -1;
}
cb_fn(&rqpair->qpair);
return 0;
}
@ -738,7 +706,20 @@ nvmf_rdma_connect(struct spdk_nvmf_transport *transport, struct rdma_cm_event *e
static void
nvmf_rdma_handle_disconnect(void *ctx)
{
struct spdk_nvmf_qpair *qpair = ctx;
struct spdk_nvmf_qpair *qpair = ctx;
struct spdk_nvmf_ctrlr *ctrlr;
struct spdk_nvmf_rdma_qpair *rqpair;
rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
ctrlr = qpair->ctrlr;
if (ctrlr == NULL) {
/* No ctrlr has been established yet, so destroy
* the connection.
*/
spdk_nvmf_rdma_qpair_destroy(rqpair);
return;
}
spdk_nvmf_ctrlr_disconnect(qpair);
}
@ -746,11 +727,8 @@ nvmf_rdma_handle_disconnect(void *ctx)
static int
nvmf_rdma_disconnect(struct rdma_cm_event *evt)
{
struct spdk_nvmf_qpair *qpair;
struct spdk_nvmf_ctrlr *ctrlr;
struct spdk_nvmf_rdma_qpair *rqpair;
struct spdk_nvmf_rdma_qpair *r, *t;
struct spdk_io_channel *ch;
struct spdk_nvmf_qpair *qpair;
struct spdk_io_channel *ch;
if (evt->id == NULL) {
SPDK_ERRLOG("disconnect request: missing cm_id\n");
@ -765,29 +743,6 @@ nvmf_rdma_disconnect(struct rdma_cm_event *evt)
/* ack the disconnect event before rdma_destroy_id */
rdma_ack_cm_event(evt);
rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
/* The connection may still be in this pending list when a disconnect
* event arrives. Search for it and remove it if it is found.
*/
TAILQ_FOREACH_SAFE(r, &g_pending_conns, pending_link, t) {
if (r == rqpair) {
SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Received disconnect for qpair %p before first SEND ack\n",
rqpair);
TAILQ_REMOVE(&g_pending_conns, rqpair, pending_link);
break;
}
}
ctrlr = qpair->ctrlr;
if (ctrlr == NULL) {
/* No ctrlr has been established yet, so destroy
* the connection immediately.
*/
spdk_nvmf_rdma_qpair_destroy(rqpair);
return 0;
}
ch = spdk_io_channel_from_ctx(qpair->group);
spdk_thread_send_msg(spdk_io_channel_get_thread(ch), nvmf_rdma_handle_disconnect, qpair);
@ -1443,12 +1398,11 @@ spdk_nvmf_rdma_qpair_poll(struct spdk_nvmf_rdma_transport *rtransport,
struct spdk_nvmf_rdma_qpair *rqpair);
static void
spdk_nvmf_rdma_accept(struct spdk_nvmf_transport *transport)
spdk_nvmf_rdma_accept(struct spdk_nvmf_transport *transport, new_qpair_fn cb_fn)
{
struct spdk_nvmf_rdma_transport *rtransport;
struct rdma_cm_event *event;
int rc;
struct spdk_nvmf_rdma_qpair *rqpair, *tmp;
char buf[64];
rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
@ -1457,22 +1411,6 @@ spdk_nvmf_rdma_accept(struct spdk_nvmf_transport *transport)
return;
}
/* Process pending connections for incoming capsules. The only capsule
* this should ever find is a CONNECT request. */
TAILQ_FOREACH_SAFE(rqpair, &g_pending_conns, pending_link, tmp) {
rc = spdk_nvmf_rdma_qpair_poll(rtransport, rqpair);
if (rc < 0) {
TAILQ_REMOVE(&g_pending_conns, rqpair, pending_link);
spdk_nvmf_rdma_qpair_destroy(rqpair);
} else if (rc > 0) {
spdk_put_io_channel(rqpair->mgmt_channel);
rqpair->mgmt_channel = NULL;
/* At least one request was processed which is assumed to be
* a CONNECT. Remove this connection from our list. */
TAILQ_REMOVE(&g_pending_conns, rqpair, pending_link);
}
}
while (1) {
rc = rdma_get_cm_event(rtransport->event_channel, &event);
if (rc == 0) {
@ -1480,7 +1418,7 @@ spdk_nvmf_rdma_accept(struct spdk_nvmf_transport *transport)
switch (event->event) {
case RDMA_CM_EVENT_CONNECT_REQUEST:
rc = nvmf_rdma_connect(transport, event);
rc = nvmf_rdma_connect(transport, event, cb_fn);
if (rc < 0) {
SPDK_ERRLOG("Unable to process connect event. rc: %d\n", rc);
break;
@ -1604,11 +1542,14 @@ static int
spdk_nvmf_rdma_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
struct spdk_nvmf_qpair *qpair)
{
struct spdk_nvmf_rdma_transport *rtransport;
struct spdk_nvmf_rdma_poll_group *rgroup;
struct spdk_nvmf_rdma_qpair *rqpair;
struct spdk_nvmf_rdma_device *device;
struct spdk_nvmf_rdma_poller *poller;
int rc;
rtransport = SPDK_CONTAINEROF(qpair->transport, struct spdk_nvmf_rdma_transport, transport);
rgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_rdma_poll_group, group);
rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
@ -1631,6 +1572,27 @@ spdk_nvmf_rdma_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
}
TAILQ_INSERT_TAIL(&poller->qpairs, rqpair, link);
rqpair->poller = poller;
spdk_nvmf_rdma_qpair_initialize(qpair);
rqpair->mgmt_channel = spdk_get_io_channel(rtransport);
if (!rqpair->mgmt_channel) {
spdk_nvmf_rdma_event_reject(rqpair->cm_id, SPDK_NVMF_RDMA_ERROR_NO_RESOURCES);
spdk_nvmf_rdma_qpair_destroy(rqpair);
return -1;
}
rqpair->ch = spdk_io_channel_get_ctx(rqpair->mgmt_channel);
assert(rqpair->ch != NULL);
rc = spdk_nvmf_rdma_event_accept(rqpair->cm_id, rqpair);
if (rc) {
/* Try to reject, but we probably can't */
spdk_nvmf_rdma_event_reject(rqpair->cm_id, SPDK_NVMF_RDMA_ERROR_NO_RESOURCES);
spdk_nvmf_rdma_qpair_destroy(rqpair);
return -1;
}
return 0;
}
@ -1774,15 +1736,6 @@ spdk_nvmf_rdma_qpair_poll(struct spdk_nvmf_rdma_transport *rtransport,
bool error = false;
char buf[64];
/* reset the mgmt_channel and thread info of qpair */
if (rqpair->mgmt_channel != NULL) {
if (rqpair->thread != spdk_get_thread()) {
return 0;
}
} else if (spdk_nvmf_rdma_qpair_allocate_channel(rqpair, rtransport)) {
return -1;
}
/* Poll for completing operations. */
reaped = ibv_poll_cq(rqpair->cq, 32, wc);
if (reaped < 0) {

View File

@ -69,9 +69,8 @@ spdk_nvmf_request_complete(struct spdk_nvmf_request *req)
{
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
if ((cmd->opc == SPDK_NVME_OPC_FABRIC ||
req->qpair->type == QPAIR_TYPE_AQ) &&
req->qpair->group) {
if (cmd->opc == SPDK_NVME_OPC_FABRIC ||
req->qpair->type == QPAIR_TYPE_AQ) {
struct spdk_io_channel *ch;
ch = spdk_io_channel_from_ctx(req->qpair->group);

View File

@ -104,9 +104,9 @@ spdk_nvmf_transport_stop_listen(struct spdk_nvmf_transport *transport,
}
void
spdk_nvmf_transport_accept(struct spdk_nvmf_transport *transport)
spdk_nvmf_transport_accept(struct spdk_nvmf_transport *transport, new_qpair_fn cb_fn)
{
transport->ops->accept(transport);
transport->ops->accept(transport, cb_fn);
}
void

View File

@ -78,7 +78,7 @@ struct spdk_nvmf_transport_ops {
/**
* Check for new connections on the transport.
*/
void (*accept)(struct spdk_nvmf_transport *transport);
void (*accept)(struct spdk_nvmf_transport *transport, new_qpair_fn cb_fn);
/**
* Fill out a discovery log entry for a specific listen address.
@ -141,7 +141,7 @@ int spdk_nvmf_transport_listen(struct spdk_nvmf_transport *transport,
int spdk_nvmf_transport_stop_listen(struct spdk_nvmf_transport *transport,
const struct spdk_nvme_transport_id *trid);
void spdk_nvmf_transport_accept(struct spdk_nvmf_transport *transport);
void spdk_nvmf_transport_accept(struct spdk_nvmf_transport *transport, new_qpair_fn cb_fn);
void spdk_nvmf_transport_listener_discover(struct spdk_nvmf_transport *transport,
struct spdk_nvme_transport_id *trid,

View File

@ -152,10 +152,6 @@ int spdk_bdev_free_io(struct spdk_bdev_io *bdev_io)
return -1;
}
void spdk_bdev_close(struct spdk_bdev_desc *desc)
{
}
const char *spdk_nvmf_subsystem_get_nqn(struct spdk_nvmf_subsystem *subsystem)
{
return NULL;