nvmf: Implement the poll group as an io_channel

It has all the same properties of uniqueness, so
implement it as an io_channel to take advantage
of the other infrastructure for message passing
already available.

Change-Id: I1777b91f0597a5a43ac0d0bbfdf878e874eb04f3
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.gerrithub.io/388291
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Ben Walker 2017-11-17 10:01:39 -07:00 committed by Jim Harris
parent 7b57e9f973
commit 3580546bd1
2 changed files with 125 additions and 46 deletions

View File

@ -61,6 +61,53 @@ spdk_nvmf_tgt_opts_init(struct spdk_nvmf_tgt_opts *opts)
opts->max_io_size = SPDK_NVMF_DEFAULT_MAX_IO_SIZE; opts->max_io_size = SPDK_NVMF_DEFAULT_MAX_IO_SIZE;
} }
static void
spdk_nvmf_poll_group_poll(void *ctx)
{
struct spdk_nvmf_poll_group *group = ctx;
int rc;
struct spdk_nvmf_transport_poll_group *tgroup;
TAILQ_FOREACH(tgroup, &group->tgroups, link) {
rc = spdk_nvmf_transport_poll_group_poll(tgroup);
if (rc < 0) {
return;
}
}
}
static int
spdk_nvmf_tgt_create_poll_group(void *io_device, void *ctx_buf)
{
struct spdk_nvmf_tgt *tgt = io_device;
struct spdk_nvmf_poll_group *group = ctx_buf;
struct spdk_nvmf_transport *transport;
TAILQ_INIT(&group->tgroups);
TAILQ_FOREACH(transport, &tgt->transports, link) {
spdk_nvmf_poll_group_add_transport(group, transport);
}
group->poller = spdk_poller_register(spdk_nvmf_poll_group_poll, group, 0);
return 0;
}
static void
spdk_nvmf_tgt_destroy_poll_group(void *io_device, void *ctx_buf)
{
struct spdk_nvmf_poll_group *group = ctx_buf;
struct spdk_nvmf_transport_poll_group *tgroup, *tmp;
spdk_poller_unregister(&group->poller);
TAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp) {
TAILQ_REMOVE(&group->tgroups, tgroup, link);
spdk_nvmf_transport_poll_group_destroy(tgroup);
}
}
struct spdk_nvmf_tgt * struct spdk_nvmf_tgt *
spdk_nvmf_tgt_create(struct spdk_nvmf_tgt_opts *opts) spdk_nvmf_tgt_create(struct spdk_nvmf_tgt_opts *opts)
{ {
@ -91,6 +138,11 @@ spdk_nvmf_tgt_create(struct spdk_nvmf_tgt_opts *opts)
tgt->max_sid = 0; tgt->max_sid = 0;
TAILQ_INIT(&tgt->transports); TAILQ_INIT(&tgt->transports);
spdk_io_device_register(tgt,
spdk_nvmf_tgt_create_poll_group,
spdk_nvmf_tgt_destroy_poll_group,
sizeof(struct spdk_nvmf_poll_group));
SPDK_DEBUGLOG(SPDK_TRACE_NVMF, "Max Queue Pairs Per Controller: %d\n", SPDK_DEBUGLOG(SPDK_TRACE_NVMF, "Max Queue Pairs Per Controller: %d\n",
tgt->opts.max_qpairs_per_ctrlr); tgt->opts.max_qpairs_per_ctrlr);
SPDK_DEBUGLOG(SPDK_TRACE_NVMF, "Max Queue Depth: %d\n", tgt->opts.max_queue_depth); SPDK_DEBUGLOG(SPDK_TRACE_NVMF, "Max Queue Depth: %d\n", tgt->opts.max_queue_depth);
@ -122,6 +174,30 @@ spdk_nvmf_tgt_destroy(struct spdk_nvmf_tgt *tgt)
free(tgt); free(tgt);
} }
struct spdk_nvmf_tgt_listen_ctx {
struct spdk_nvmf_transport *transport;
struct spdk_nvme_transport_id trid;
};
static void
spdk_nvmf_tgt_listen_done(void *io_device, void *c, int status)
{
free(c);
}
static int
spdk_nvmf_tgt_listen_add_transport(void *io_device,
struct spdk_io_channel *ch,
void *c)
{
struct spdk_nvmf_tgt_listen_ctx *ctx = c;
struct spdk_nvmf_poll_group *group;
group = spdk_io_channel_get_ctx(ch);
return spdk_nvmf_poll_group_add_transport(group, ctx->transport);
}
int int
spdk_nvmf_tgt_listen(struct spdk_nvmf_tgt *tgt, spdk_nvmf_tgt_listen(struct spdk_nvmf_tgt *tgt,
struct spdk_nvme_transport_id *trid) struct spdk_nvme_transport_id *trid)
@ -131,19 +207,39 @@ spdk_nvmf_tgt_listen(struct spdk_nvmf_tgt *tgt,
transport = spdk_nvmf_tgt_get_transport(tgt, trid->trtype); transport = spdk_nvmf_tgt_get_transport(tgt, trid->trtype);
if (!transport) { if (!transport) {
struct spdk_nvmf_tgt_listen_ctx *ctx;
transport = spdk_nvmf_transport_create(tgt, trid->trtype); transport = spdk_nvmf_transport_create(tgt, trid->trtype);
if (!transport) { if (!transport) {
SPDK_ERRLOG("Transport initialization failed\n"); SPDK_ERRLOG("Transport initialization failed\n");
return -EINVAL; return -EINVAL;
} }
TAILQ_INSERT_TAIL(&tgt->transports, transport, link); TAILQ_INSERT_TAIL(&tgt->transports, transport, link);
}
ctx = calloc(1, sizeof(*ctx));
if (!ctx) {
return -ENOMEM;
}
ctx->trid = *trid;
ctx->transport = transport;
/* Send a message to each poll group to notify it that a new transport
* is available.
* TODO: This call does not currently allow the user to wait for these
* messages to propagate. It also does not protect against two calls
* to this function overlapping
*/
spdk_for_each_channel(tgt,
spdk_nvmf_tgt_listen_add_transport,
ctx,
spdk_nvmf_tgt_listen_done);
}
rc = spdk_nvmf_transport_listen(transport, trid); rc = spdk_nvmf_transport_listen(transport, trid);
if (rc < 0) { if (rc < 0) {
SPDK_ERRLOG("Unable to listen on address '%s'\n", trid->traddr); SPDK_ERRLOG("Unable to listen on address '%s'\n", trid->traddr);
return -EINVAL; return rc;
} }
tgt->discovery_genctr++; tgt->discovery_genctr++;
@ -199,63 +295,27 @@ spdk_nvmf_tgt_accept(struct spdk_nvmf_tgt *tgt)
} }
} }
static void
spdk_nvmf_poll_group_poll(void *ctx)
{
struct spdk_nvmf_poll_group *group = ctx;
int rc;
struct spdk_nvmf_transport_poll_group *tgroup;
TAILQ_FOREACH(tgroup, &group->tgroups, link) {
rc = spdk_nvmf_transport_poll_group_poll(tgroup);
if (rc < 0) {
return;
}
}
}
struct spdk_nvmf_poll_group * struct spdk_nvmf_poll_group *
spdk_nvmf_poll_group_create(struct spdk_nvmf_tgt *tgt) spdk_nvmf_poll_group_create(struct spdk_nvmf_tgt *tgt)
{ {
struct spdk_nvmf_poll_group *group; struct spdk_io_channel *ch;
struct spdk_nvmf_transport *transport;
struct spdk_nvmf_transport_poll_group *tgroup;
group = calloc(1, sizeof(*group)); ch = spdk_get_io_channel(tgt);
if (!group) { if (!ch) {
SPDK_ERRLOG("Unable to get I/O channel for target\n");
return NULL; return NULL;
} }
TAILQ_INIT(&group->tgroups); return spdk_io_channel_get_ctx(ch);
TAILQ_FOREACH(transport, &tgt->transports, link) {
tgroup = spdk_nvmf_transport_poll_group_create(transport);
if (!tgroup) {
SPDK_ERRLOG("Unable to create poll group for transport\n");
continue;
}
TAILQ_INSERT_TAIL(&group->tgroups, tgroup, link);
}
group->poller = spdk_poller_register(spdk_nvmf_poll_group_poll, group, 0);
return group;
} }
void void
spdk_nvmf_poll_group_destroy(struct spdk_nvmf_poll_group *group) spdk_nvmf_poll_group_destroy(struct spdk_nvmf_poll_group *group)
{ {
struct spdk_nvmf_transport_poll_group *tgroup, *tmp; struct spdk_io_channel *ch;
spdk_poller_unregister(&group->poller); ch = spdk_io_channel_from_ctx(group);
spdk_put_io_channel(ch);
TAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp) {
TAILQ_REMOVE(&group->tgroups, tgroup, link);
spdk_nvmf_transport_poll_group_destroy(tgroup);
}
free(group);
} }
int int
@ -292,6 +352,23 @@ spdk_nvmf_poll_group_remove(struct spdk_nvmf_poll_group *group,
return rc; return rc;
} }
int
spdk_nvmf_poll_group_add_transport(struct spdk_nvmf_poll_group *group,
struct spdk_nvmf_transport *transport)
{
struct spdk_nvmf_transport_poll_group *tgroup;
tgroup = spdk_nvmf_transport_poll_group_create(transport);
if (!tgroup) {
SPDK_ERRLOG("Unable to create poll group for transport\n");
return -1;
}
TAILQ_INSERT_TAIL(&group->tgroups, tgroup, link);
return 0;
}
SPDK_TRACE_REGISTER_FN(nvmf_trace) SPDK_TRACE_REGISTER_FN(nvmf_trace)
{ {
spdk_trace_register_object(OBJECT_NVMF_IO, 'r'); spdk_trace_register_object(OBJECT_NVMF_IO, 'r');

View File

@ -205,6 +205,8 @@ int spdk_nvmf_poll_group_add(struct spdk_nvmf_poll_group *group,
struct spdk_nvmf_qpair *qpair); struct spdk_nvmf_qpair *qpair);
int spdk_nvmf_poll_group_remove(struct spdk_nvmf_poll_group *group, int spdk_nvmf_poll_group_remove(struct spdk_nvmf_poll_group *group,
struct spdk_nvmf_qpair *qpair); struct spdk_nvmf_qpair *qpair);
int spdk_nvmf_poll_group_add_transport(struct spdk_nvmf_poll_group *group,
struct spdk_nvmf_transport *transport);
void spdk_nvmf_request_exec(struct spdk_nvmf_request *req); void spdk_nvmf_request_exec(struct spdk_nvmf_request *req);
int spdk_nvmf_request_complete(struct spdk_nvmf_request *req); int spdk_nvmf_request_complete(struct spdk_nvmf_request *req);