nvmf/transport->add per-pg cache

This is implemented at a generic level.

Change-Id: Ibf8167e828f8da27cc26cd04e611c3f3c084319a
Signed-off-by: Seth Howell <seth.howell@intel.com>
Reviewed-on: https://review.gerrithub.io/c/440418
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Seth Howell 2019-01-14 13:24:35 -07:00 committed by Ben Walker
parent cefabb56df
commit 8cb172f2a9
2 changed files with 31 additions and 2 deletions

View File

@ -101,8 +101,15 @@ struct spdk_nvmf_listener {
TAILQ_ENTRY(spdk_nvmf_listener) link; TAILQ_ENTRY(spdk_nvmf_listener) link;
}; };
struct spdk_nvmf_transport_pg_cache_buf {
STAILQ_ENTRY(spdk_nvmf_transport_pg_cache_buf) link;
};
struct spdk_nvmf_transport_poll_group { struct spdk_nvmf_transport_poll_group {
struct spdk_nvmf_transport *transport; struct spdk_nvmf_transport *transport;
STAILQ_HEAD(, spdk_nvmf_transport_pg_cache_buf) buf_cache;
uint32_t buf_cache_count;
uint32_t buf_cache_size;
TAILQ_ENTRY(spdk_nvmf_transport_poll_group) link; TAILQ_ENTRY(spdk_nvmf_transport_poll_group) link;
}; };

View File

@ -196,6 +196,7 @@ struct spdk_nvmf_transport_poll_group *
spdk_nvmf_transport_poll_group_create(struct spdk_nvmf_transport *transport) spdk_nvmf_transport_poll_group_create(struct spdk_nvmf_transport *transport)
{ {
struct spdk_nvmf_transport_poll_group *group; struct spdk_nvmf_transport_poll_group *group;
struct spdk_nvmf_transport_pg_cache_buf *buf;
group = transport->ops->poll_group_create(transport); group = transport->ops->poll_group_create(transport);
if (!group) { if (!group) {
@ -203,12 +204,33 @@ spdk_nvmf_transport_poll_group_create(struct spdk_nvmf_transport *transport)
} }
group->transport = transport; group->transport = transport;
STAILQ_INIT(&group->buf_cache);
if (transport->opts.buf_cache_size) {
group->buf_cache_count = 0;
group->buf_cache_size = transport->opts.buf_cache_size;
while (group->buf_cache_count < group->buf_cache_size) {
buf = (struct spdk_nvmf_transport_pg_cache_buf *)spdk_mempool_get(transport->data_buf_pool);
if (!buf) {
SPDK_NOTICELOG("Unable to reserve the full number of buffers for the pg buffer cache.\n");
break;
}
STAILQ_INSERT_HEAD(&group->buf_cache, buf, link);
group->buf_cache_count++;
}
}
return group; return group;
} }
void void
spdk_nvmf_transport_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group) spdk_nvmf_transport_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
{ {
struct spdk_nvmf_transport_pg_cache_buf *buf, *tmp;
STAILQ_FOREACH_SAFE(buf, &group->buf_cache, link, tmp) {
STAILQ_REMOVE(&group->buf_cache, buf, spdk_nvmf_transport_pg_cache_buf, link);
spdk_mempool_put(group->transport->data_buf_pool, buf);
}
group->transport->ops->poll_group_destroy(group); group->transport->ops->poll_group_destroy(group);
} }