From 8cb172f2a9219419b2a17a5674aacf7ff8d6e11e Mon Sep 17 00:00:00 2001 From: Seth Howell Date: Mon, 14 Jan 2019 13:24:35 -0700 Subject: [PATCH] nvmf/transport->add per-pg cache This is implemented at a generic level. Change-Id: Ibf8167e828f8da27cc26cd04e611c3f3c084319a Signed-off-by: Seth Howell Reviewed-on: https://review.gerrithub.io/c/440418 Tested-by: SPDK CI Jenkins Chandler-Test-Pool: SPDK Automated Test System Reviewed-by: Shuhei Matsumoto Reviewed-by: Darek Stojaczyk Reviewed-by: Ben Walker --- lib/nvmf/nvmf_internal.h | 11 +++++++++-- lib/nvmf/transport.c | 22 ++++++++++++++++++++++ 2 files changed, 31 insertions(+), 2 deletions(-) diff --git a/lib/nvmf/nvmf_internal.h b/lib/nvmf/nvmf_internal.h index 54b3fc028..7aa298eef 100644 --- a/lib/nvmf/nvmf_internal.h +++ b/lib/nvmf/nvmf_internal.h @@ -101,9 +101,16 @@ struct spdk_nvmf_listener { TAILQ_ENTRY(spdk_nvmf_listener) link; }; +struct spdk_nvmf_transport_pg_cache_buf { + STAILQ_ENTRY(spdk_nvmf_transport_pg_cache_buf) link; +}; + struct spdk_nvmf_transport_poll_group { - struct spdk_nvmf_transport *transport; - TAILQ_ENTRY(spdk_nvmf_transport_poll_group) link; + struct spdk_nvmf_transport *transport; + STAILQ_HEAD(, spdk_nvmf_transport_pg_cache_buf) buf_cache; + uint32_t buf_cache_count; + uint32_t buf_cache_size; + TAILQ_ENTRY(spdk_nvmf_transport_poll_group) link; }; struct spdk_nvmf_subsystem_poll_group { diff --git a/lib/nvmf/transport.c b/lib/nvmf/transport.c index 00e970cb2..8535d8389 100644 --- a/lib/nvmf/transport.c +++ b/lib/nvmf/transport.c @@ -196,6 +196,7 @@ struct spdk_nvmf_transport_poll_group * spdk_nvmf_transport_poll_group_create(struct spdk_nvmf_transport *transport) { struct spdk_nvmf_transport_poll_group *group; + struct spdk_nvmf_transport_pg_cache_buf *buf; group = transport->ops->poll_group_create(transport); if (!group) { @@ -203,12 +204,33 @@ spdk_nvmf_transport_poll_group_create(struct spdk_nvmf_transport *transport) } group->transport = transport; + STAILQ_INIT(&group->buf_cache); + + if (transport->opts.buf_cache_size) { + group->buf_cache_count = 0; + group->buf_cache_size = transport->opts.buf_cache_size; + while (group->buf_cache_count < group->buf_cache_size) { + buf = (struct spdk_nvmf_transport_pg_cache_buf *)spdk_mempool_get(transport->data_buf_pool); + if (!buf) { + SPDK_NOTICELOG("Unable to reserve the full number of buffers for the pg buffer cache.\n"); + break; + } + STAILQ_INSERT_HEAD(&group->buf_cache, buf, link); + group->buf_cache_count++; + } + } return group; } void spdk_nvmf_transport_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group) { + struct spdk_nvmf_transport_pg_cache_buf *buf, *tmp; + + STAILQ_FOREACH_SAFE(buf, &group->buf_cache, link, tmp) { + STAILQ_REMOVE(&group->buf_cache, buf, spdk_nvmf_transport_pg_cache_buf, link); + spdk_mempool_put(group->transport->data_buf_pool, buf); + } group->transport->ops->poll_group_destroy(group); }