From e28605f47ab0788de4a88a9401d6b4650db5d06b Mon Sep 17 00:00:00 2001 From: Seth Howell Date: Mon, 14 Jan 2019 12:55:06 -0700 Subject: [PATCH] nvmf/transport: move buffer_pool to generic struct. This is shared between all currently valid transports. Just move it up to the generic structure. This will make implementing more shared features on top of this a lot easier. Change-Id: Ia896edcb7555903ba97adf862bc8d44228df2d36 Signed-off-by: Seth Howell Reviewed-on: https://review.gerrithub.io/c/440416 Tested-by: SPDK CI Jenkins Reviewed-by: Shuhei Matsumoto Reviewed-by: Ben Walker Reviewed-by: Jim Harris --- lib/nvmf/nvmf_internal.h | 7 ++++++ lib/nvmf/rdma.c | 36 +++---------------------------- lib/nvmf/tcp.c | 34 +++-------------------------- lib/nvmf/transport.c | 33 ++++++++++++++++++++++++++++ lib/nvmf/transport.h | 3 +++ test/unit/lib/nvmf/tcp.c/tcp_ut.c | 4 ++-- 6 files changed, 51 insertions(+), 66 deletions(-) diff --git a/lib/nvmf/nvmf_internal.h b/lib/nvmf/nvmf_internal.h index a44e4e4e6..54b3fc028 100644 --- a/lib/nvmf/nvmf_internal.h +++ b/lib/nvmf/nvmf_internal.h @@ -47,6 +47,13 @@ #define SPDK_NVMF_MAX_SGL_ENTRIES 16 +/* AIO backend requires block size aligned data buffers, + * extra 4KiB aligned data buffer should work for most devices. + */ +#define SHIFT_4KB 12u +#define NVMF_DATA_BUFFER_ALIGNMENT (1u << SHIFT_4KB) +#define NVMF_DATA_BUFFER_MASK (NVMF_DATA_BUFFER_ALIGNMENT - 1LL) + enum spdk_nvmf_subsystem_state { SPDK_NVMF_SUBSYSTEM_INACTIVE = 0, SPDK_NVMF_SUBSYSTEM_ACTIVATING, diff --git a/lib/nvmf/rdma.c b/lib/nvmf/rdma.c index e677151f1..89e086e28 100644 --- a/lib/nvmf/rdma.c +++ b/lib/nvmf/rdma.c @@ -61,13 +61,6 @@ /* The RDMA completion queue size */ #define NVMF_RDMA_CQ_SIZE 4096 -/* AIO backend requires block size aligned data buffers, - * extra 4KiB aligned data buffer should work for most devices. - */ -#define SHIFT_4KB 12 -#define NVMF_DATA_BUFFER_ALIGNMENT (1 << SHIFT_4KB) -#define NVMF_DATA_BUFFER_MASK (NVMF_DATA_BUFFER_ALIGNMENT - 1) - enum spdk_nvmf_rdma_request_state { /* The request is not currently in use */ RDMA_REQUEST_STATE_FREE = 0, @@ -371,8 +364,6 @@ struct spdk_nvmf_rdma_transport { struct rdma_event_channel *event_channel; - struct spdk_mempool *data_buf_pool; - pthread_mutex_t lock; /* fields used to poll RDMA/IB events */ @@ -1199,7 +1190,7 @@ spdk_nvmf_rdma_request_fill_iovs(struct spdk_nvmf_rdma_transport *rtransport, rdma_req->req.iovcnt = 0; while (length) { - buf = spdk_mempool_get(rtransport->data_buf_pool); + buf = spdk_mempool_get(rtransport->transport.data_buf_pool); if (!buf) { rc = -ENOMEM; goto err_exit; @@ -1232,7 +1223,7 @@ spdk_nvmf_rdma_request_fill_iovs(struct spdk_nvmf_rdma_transport *rtransport, err_exit: while (i) { i--; - spdk_mempool_put(rtransport->data_buf_pool, rdma_req->data.buffers[i]); + spdk_mempool_put(rtransport->transport.data_buf_pool, rdma_req->data.buffers[i]); rdma_req->req.iov[i].iov_base = NULL; rdma_req->req.iov[i].iov_len = 0; @@ -1343,7 +1334,7 @@ nvmf_rdma_request_free(struct spdk_nvmf_rdma_request *rdma_req, if (rdma_req->data_from_pool) { /* Put the buffer/s back in the pool */ for (uint32_t i = 0; i < rdma_req->req.iovcnt; i++) { - spdk_mempool_put(rtransport->data_buf_pool, rdma_req->data.buffers[i]); + spdk_mempool_put(rtransport->transport.data_buf_pool, rdma_req->data.buffers[i]); rdma_req->req.iov[i].iov_base = NULL; rdma_req->data.buffers[i] = NULL; } @@ -1693,17 +1684,6 @@ spdk_nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts) return NULL; } - rtransport->data_buf_pool = spdk_mempool_create("spdk_nvmf_rdma", - opts->num_shared_buffers, - opts->io_unit_size + NVMF_DATA_BUFFER_ALIGNMENT, - SPDK_MEMPOOL_DEFAULT_CACHE_SIZE, - SPDK_ENV_SOCKET_ID_ANY); - if (!rtransport->data_buf_pool) { - SPDK_ERRLOG("Unable to allocate buffer pool for poll group\n"); - spdk_nvmf_rdma_destroy(&rtransport->transport); - return NULL; - } - contexts = rdma_get_devices(NULL); if (contexts == NULL) { SPDK_ERRLOG("rdma_get_devices() failed: %s (%d)\n", spdk_strerror(errno), errno); @@ -1842,16 +1822,6 @@ spdk_nvmf_rdma_destroy(struct spdk_nvmf_transport *transport) free(device); } - if (rtransport->data_buf_pool != NULL) { - if (spdk_mempool_count(rtransport->data_buf_pool) != - transport->opts.num_shared_buffers) { - SPDK_ERRLOG("transport buffer pool count is %zu but should be %u\n", - spdk_mempool_count(rtransport->data_buf_pool), - transport->opts.num_shared_buffers); - } - } - - spdk_mempool_free(rtransport->data_buf_pool); spdk_io_device_unregister(rtransport, NULL); pthread_mutex_destroy(&rtransport->lock); free(rtransport); diff --git a/lib/nvmf/tcp.c b/lib/nvmf/tcp.c index 570415882..e256b125a 100644 --- a/lib/nvmf/tcp.c +++ b/lib/nvmf/tcp.c @@ -49,13 +49,6 @@ #include "spdk_internal/log.h" #include "spdk_internal/nvme_tcp.h" -/* - * AIO backend requires block size aligned data buffers, - * extra 4KiB aligned data buffer should work for most devices. - */ -#define SHIFT_4KB 12u -#define NVMF_DATA_BUFFER_ALIGNMENT (1u << SHIFT_4KB) -#define NVMF_DATA_BUFFER_MASK (NVMF_DATA_BUFFER_ALIGNMENT - 1LL) #define NVMF_TCP_MAX_ACCEPT_SOCK_ONE_TIME 16 #define NVMF_TCP_PDU_MAX_H2C_DATA_SIZE 131072 @@ -298,8 +291,6 @@ struct spdk_nvmf_tcp_transport { pthread_mutex_t lock; - struct spdk_mempool *data_buf_pool; - TAILQ_HEAD(, spdk_nvmf_tcp_port) ports; }; @@ -535,13 +526,6 @@ spdk_nvmf_tcp_destroy(struct spdk_nvmf_transport *transport) assert(transport != NULL); ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); - if (spdk_mempool_count(ttransport->data_buf_pool) != (transport->opts.num_shared_buffers)) { - SPDK_ERRLOG("transport buffer pool count is %zu but should be %u\n", - spdk_mempool_count(ttransport->data_buf_pool), - transport->opts.num_shared_buffers); - } - - spdk_mempool_free(ttransport->data_buf_pool); spdk_io_device_unregister(ttransport, NULL); pthread_mutex_destroy(&ttransport->lock); free(ttransport); @@ -591,18 +575,6 @@ spdk_nvmf_tcp_create(struct spdk_nvmf_transport_opts *opts) return NULL; } - ttransport->data_buf_pool = spdk_mempool_create("spdk_nvmf_tcp_data", - opts->num_shared_buffers, - opts->max_io_size + NVMF_DATA_BUFFER_ALIGNMENT, - SPDK_MEMPOOL_DEFAULT_CACHE_SIZE, - SPDK_ENV_SOCKET_ID_ANY); - - if (!ttransport->data_buf_pool) { - SPDK_ERRLOG("Unable to allocate buffer pool for poll group\n"); - free(ttransport); - return NULL; - } - min_shared_buffers = spdk_thread_get_count() * opts->buf_cache_size; if (min_shared_buffers > opts->num_shared_buffers) { SPDK_ERRLOG("There are not enough buffers to satisfy" @@ -2164,7 +2136,7 @@ spdk_nvmf_tcp_req_fill_iovs(struct spdk_nvmf_tcp_transport *ttransport, tcp_req->req.iovcnt = 0; while (length) { - buf = spdk_mempool_get(ttransport->data_buf_pool); + buf = spdk_mempool_get(ttransport->transport.data_buf_pool); if (!buf) { goto nomem; } @@ -2185,7 +2157,7 @@ spdk_nvmf_tcp_req_fill_iovs(struct spdk_nvmf_tcp_transport *ttransport, nomem: while (i) { i--; - spdk_mempool_put(ttransport->data_buf_pool, tcp_req->buffers[i]); + spdk_mempool_put(ttransport->transport.data_buf_pool, tcp_req->buffers[i]); tcp_req->req.iov[i].iov_base = NULL; tcp_req->req.iov[i].iov_len = 0; @@ -2596,7 +2568,7 @@ spdk_nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport, if (tcp_req->data_from_pool) { /* Put the buffer/s back in the pool */ for (uint32_t i = 0; i < tcp_req->req.iovcnt; i++) { - spdk_mempool_put(ttransport->data_buf_pool, tcp_req->buffers[i]); + spdk_mempool_put(ttransport->transport.data_buf_pool, tcp_req->buffers[i]); tcp_req->req.iov[i].iov_base = NULL; tcp_req->buffers[i] = NULL; } diff --git a/lib/nvmf/transport.c b/lib/nvmf/transport.c index 263130194..00e970cb2 100644 --- a/lib/nvmf/transport.c +++ b/lib/nvmf/transport.c @@ -50,6 +50,7 @@ static const struct spdk_nvmf_transport_ops *const g_transport_ops[] = { }; #define NUM_TRANSPORTS (SPDK_COUNTOF(g_transport_ops)) +#define MAX_MEMPOOL_NAME_LENGTH 40 static inline const struct spdk_nvmf_transport_ops * spdk_nvmf_get_transport_ops(enum spdk_nvme_transport_type type) @@ -81,6 +82,8 @@ spdk_nvmf_transport_create(enum spdk_nvme_transport_type type, { const struct spdk_nvmf_transport_ops *ops = NULL; struct spdk_nvmf_transport *transport; + char spdk_mempool_name[MAX_MEMPOOL_NAME_LENGTH]; + int chars_written; if ((opts->max_io_size % opts->io_unit_size != 0) || (opts->max_io_size / opts->io_unit_size > @@ -109,6 +112,25 @@ spdk_nvmf_transport_create(enum spdk_nvme_transport_type type, transport->ops = ops; transport->opts = *opts; + chars_written = snprintf(spdk_mempool_name, MAX_MEMPOOL_NAME_LENGTH, "%s_%s_%s", "spdk_nvmf", + spdk_nvme_transport_id_trtype_str(type), "data"); + if (chars_written < 0) { + SPDK_ERRLOG("Unable to generate transport data buffer pool name.\n"); + ops->destroy(transport); + return NULL; + } + + transport->data_buf_pool = spdk_mempool_create(spdk_mempool_name, + opts->num_shared_buffers, + opts->max_io_size + NVMF_DATA_BUFFER_ALIGNMENT, + SPDK_MEMPOOL_DEFAULT_CACHE_SIZE, + SPDK_ENV_SOCKET_ID_ANY); + + if (!transport->data_buf_pool) { + SPDK_ERRLOG("Unable to allocate buffer pool for poll group\n"); + ops->destroy(transport); + return NULL; + } return transport; } @@ -128,6 +150,17 @@ spdk_nvmf_transport_get_next(struct spdk_nvmf_transport *transport) int spdk_nvmf_transport_destroy(struct spdk_nvmf_transport *transport) { + if (transport->data_buf_pool != NULL) { + if (spdk_mempool_count(transport->data_buf_pool) != + transport->opts.num_shared_buffers) { + SPDK_ERRLOG("transport buffer pool count is %zu but should be %u\n", + spdk_mempool_count(transport->data_buf_pool), + transport->opts.num_shared_buffers); + } + } + + spdk_mempool_free(transport->data_buf_pool); + return transport->ops->destroy(transport); } diff --git a/lib/nvmf/transport.h b/lib/nvmf/transport.h index 027909634..dca15231b 100644 --- a/lib/nvmf/transport.h +++ b/lib/nvmf/transport.h @@ -44,6 +44,9 @@ struct spdk_nvmf_transport { const struct spdk_nvmf_transport_ops *ops; struct spdk_nvmf_transport_opts opts; + /* A mempool for transport related data transfers */ + struct spdk_mempool *data_buf_pool; + TAILQ_ENTRY(spdk_nvmf_transport) link; }; diff --git a/test/unit/lib/nvmf/tcp.c/tcp_ut.c b/test/unit/lib/nvmf/tcp.c/tcp_ut.c index fbc345088..62e4c92b0 100644 --- a/test/unit/lib/nvmf/tcp.c/tcp_ut.c +++ b/test/unit/lib/nvmf/tcp.c/tcp_ut.c @@ -227,7 +227,7 @@ test_nvmf_tcp_create(void) CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE); CU_ASSERT(transport->opts.io_unit_size == UT_IO_UNIT_SIZE); /* destroy transport */ - spdk_mempool_free(ttransport->data_buf_pool); + spdk_mempool_free(ttransport->transport.data_buf_pool); spdk_io_device_unregister(ttransport, NULL); free(ttransport); @@ -250,7 +250,7 @@ test_nvmf_tcp_create(void) CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE); CU_ASSERT(transport->opts.io_unit_size == UT_MAX_IO_SIZE); /* destroy transport */ - spdk_mempool_free(ttransport->data_buf_pool); + spdk_mempool_free(ttransport->transport.data_buf_pool); spdk_io_device_unregister(ttransport, NULL); free(ttransport);