nvmf/transport: move buffer_pool to generic struct.

This is shared between all currently valid transports. Just move it up
to the generic structure. This will make implementing more shared
features on top of this a lot easier.

Change-Id: Ia896edcb7555903ba97adf862bc8d44228df2d36
Signed-off-by: Seth Howell <seth.howell@intel.com>
Reviewed-on: https://review.gerrithub.io/c/440416
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Seth Howell 2019-01-14 12:55:06 -07:00 committed by Jim Harris
parent cd4dd43ab8
commit e28605f47a
6 changed files with 51 additions and 66 deletions

View File

@ -47,6 +47,13 @@
#define SPDK_NVMF_MAX_SGL_ENTRIES 16
/* AIO backend requires block size aligned data buffers,
* extra 4KiB aligned data buffer should work for most devices.
*/
#define SHIFT_4KB 12u
#define NVMF_DATA_BUFFER_ALIGNMENT (1u << SHIFT_4KB)
#define NVMF_DATA_BUFFER_MASK (NVMF_DATA_BUFFER_ALIGNMENT - 1LL)
enum spdk_nvmf_subsystem_state {
SPDK_NVMF_SUBSYSTEM_INACTIVE = 0,
SPDK_NVMF_SUBSYSTEM_ACTIVATING,

View File

@ -61,13 +61,6 @@
/* The RDMA completion queue size */
#define NVMF_RDMA_CQ_SIZE 4096
/* AIO backend requires block size aligned data buffers,
* extra 4KiB aligned data buffer should work for most devices.
*/
#define SHIFT_4KB 12
#define NVMF_DATA_BUFFER_ALIGNMENT (1 << SHIFT_4KB)
#define NVMF_DATA_BUFFER_MASK (NVMF_DATA_BUFFER_ALIGNMENT - 1)
enum spdk_nvmf_rdma_request_state {
/* The request is not currently in use */
RDMA_REQUEST_STATE_FREE = 0,
@ -371,8 +364,6 @@ struct spdk_nvmf_rdma_transport {
struct rdma_event_channel *event_channel;
struct spdk_mempool *data_buf_pool;
pthread_mutex_t lock;
/* fields used to poll RDMA/IB events */
@ -1199,7 +1190,7 @@ spdk_nvmf_rdma_request_fill_iovs(struct spdk_nvmf_rdma_transport *rtransport,
rdma_req->req.iovcnt = 0;
while (length) {
buf = spdk_mempool_get(rtransport->data_buf_pool);
buf = spdk_mempool_get(rtransport->transport.data_buf_pool);
if (!buf) {
rc = -ENOMEM;
goto err_exit;
@ -1232,7 +1223,7 @@ spdk_nvmf_rdma_request_fill_iovs(struct spdk_nvmf_rdma_transport *rtransport,
err_exit:
while (i) {
i--;
spdk_mempool_put(rtransport->data_buf_pool, rdma_req->data.buffers[i]);
spdk_mempool_put(rtransport->transport.data_buf_pool, rdma_req->data.buffers[i]);
rdma_req->req.iov[i].iov_base = NULL;
rdma_req->req.iov[i].iov_len = 0;
@ -1343,7 +1334,7 @@ nvmf_rdma_request_free(struct spdk_nvmf_rdma_request *rdma_req,
if (rdma_req->data_from_pool) {
/* Put the buffer/s back in the pool */
for (uint32_t i = 0; i < rdma_req->req.iovcnt; i++) {
spdk_mempool_put(rtransport->data_buf_pool, rdma_req->data.buffers[i]);
spdk_mempool_put(rtransport->transport.data_buf_pool, rdma_req->data.buffers[i]);
rdma_req->req.iov[i].iov_base = NULL;
rdma_req->data.buffers[i] = NULL;
}
@ -1693,17 +1684,6 @@ spdk_nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
return NULL;
}
rtransport->data_buf_pool = spdk_mempool_create("spdk_nvmf_rdma",
opts->num_shared_buffers,
opts->io_unit_size + NVMF_DATA_BUFFER_ALIGNMENT,
SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
SPDK_ENV_SOCKET_ID_ANY);
if (!rtransport->data_buf_pool) {
SPDK_ERRLOG("Unable to allocate buffer pool for poll group\n");
spdk_nvmf_rdma_destroy(&rtransport->transport);
return NULL;
}
contexts = rdma_get_devices(NULL);
if (contexts == NULL) {
SPDK_ERRLOG("rdma_get_devices() failed: %s (%d)\n", spdk_strerror(errno), errno);
@ -1842,16 +1822,6 @@ spdk_nvmf_rdma_destroy(struct spdk_nvmf_transport *transport)
free(device);
}
if (rtransport->data_buf_pool != NULL) {
if (spdk_mempool_count(rtransport->data_buf_pool) !=
transport->opts.num_shared_buffers) {
SPDK_ERRLOG("transport buffer pool count is %zu but should be %u\n",
spdk_mempool_count(rtransport->data_buf_pool),
transport->opts.num_shared_buffers);
}
}
spdk_mempool_free(rtransport->data_buf_pool);
spdk_io_device_unregister(rtransport, NULL);
pthread_mutex_destroy(&rtransport->lock);
free(rtransport);

View File

@ -49,13 +49,6 @@
#include "spdk_internal/log.h"
#include "spdk_internal/nvme_tcp.h"
/*
* AIO backend requires block size aligned data buffers,
* extra 4KiB aligned data buffer should work for most devices.
*/
#define SHIFT_4KB 12u
#define NVMF_DATA_BUFFER_ALIGNMENT (1u << SHIFT_4KB)
#define NVMF_DATA_BUFFER_MASK (NVMF_DATA_BUFFER_ALIGNMENT - 1LL)
#define NVMF_TCP_MAX_ACCEPT_SOCK_ONE_TIME 16
#define NVMF_TCP_PDU_MAX_H2C_DATA_SIZE 131072
@ -298,8 +291,6 @@ struct spdk_nvmf_tcp_transport {
pthread_mutex_t lock;
struct spdk_mempool *data_buf_pool;
TAILQ_HEAD(, spdk_nvmf_tcp_port) ports;
};
@ -535,13 +526,6 @@ spdk_nvmf_tcp_destroy(struct spdk_nvmf_transport *transport)
assert(transport != NULL);
ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
if (spdk_mempool_count(ttransport->data_buf_pool) != (transport->opts.num_shared_buffers)) {
SPDK_ERRLOG("transport buffer pool count is %zu but should be %u\n",
spdk_mempool_count(ttransport->data_buf_pool),
transport->opts.num_shared_buffers);
}
spdk_mempool_free(ttransport->data_buf_pool);
spdk_io_device_unregister(ttransport, NULL);
pthread_mutex_destroy(&ttransport->lock);
free(ttransport);
@ -591,18 +575,6 @@ spdk_nvmf_tcp_create(struct spdk_nvmf_transport_opts *opts)
return NULL;
}
ttransport->data_buf_pool = spdk_mempool_create("spdk_nvmf_tcp_data",
opts->num_shared_buffers,
opts->max_io_size + NVMF_DATA_BUFFER_ALIGNMENT,
SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
SPDK_ENV_SOCKET_ID_ANY);
if (!ttransport->data_buf_pool) {
SPDK_ERRLOG("Unable to allocate buffer pool for poll group\n");
free(ttransport);
return NULL;
}
min_shared_buffers = spdk_thread_get_count() * opts->buf_cache_size;
if (min_shared_buffers > opts->num_shared_buffers) {
SPDK_ERRLOG("There are not enough buffers to satisfy"
@ -2164,7 +2136,7 @@ spdk_nvmf_tcp_req_fill_iovs(struct spdk_nvmf_tcp_transport *ttransport,
tcp_req->req.iovcnt = 0;
while (length) {
buf = spdk_mempool_get(ttransport->data_buf_pool);
buf = spdk_mempool_get(ttransport->transport.data_buf_pool);
if (!buf) {
goto nomem;
}
@ -2185,7 +2157,7 @@ spdk_nvmf_tcp_req_fill_iovs(struct spdk_nvmf_tcp_transport *ttransport,
nomem:
while (i) {
i--;
spdk_mempool_put(ttransport->data_buf_pool, tcp_req->buffers[i]);
spdk_mempool_put(ttransport->transport.data_buf_pool, tcp_req->buffers[i]);
tcp_req->req.iov[i].iov_base = NULL;
tcp_req->req.iov[i].iov_len = 0;
@ -2596,7 +2568,7 @@ spdk_nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
if (tcp_req->data_from_pool) {
/* Put the buffer/s back in the pool */
for (uint32_t i = 0; i < tcp_req->req.iovcnt; i++) {
spdk_mempool_put(ttransport->data_buf_pool, tcp_req->buffers[i]);
spdk_mempool_put(ttransport->transport.data_buf_pool, tcp_req->buffers[i]);
tcp_req->req.iov[i].iov_base = NULL;
tcp_req->buffers[i] = NULL;
}

View File

@ -50,6 +50,7 @@ static const struct spdk_nvmf_transport_ops *const g_transport_ops[] = {
};
#define NUM_TRANSPORTS (SPDK_COUNTOF(g_transport_ops))
#define MAX_MEMPOOL_NAME_LENGTH 40
static inline const struct spdk_nvmf_transport_ops *
spdk_nvmf_get_transport_ops(enum spdk_nvme_transport_type type)
@ -81,6 +82,8 @@ spdk_nvmf_transport_create(enum spdk_nvme_transport_type type,
{
const struct spdk_nvmf_transport_ops *ops = NULL;
struct spdk_nvmf_transport *transport;
char spdk_mempool_name[MAX_MEMPOOL_NAME_LENGTH];
int chars_written;
if ((opts->max_io_size % opts->io_unit_size != 0) ||
(opts->max_io_size / opts->io_unit_size >
@ -109,6 +112,25 @@ spdk_nvmf_transport_create(enum spdk_nvme_transport_type type,
transport->ops = ops;
transport->opts = *opts;
chars_written = snprintf(spdk_mempool_name, MAX_MEMPOOL_NAME_LENGTH, "%s_%s_%s", "spdk_nvmf",
spdk_nvme_transport_id_trtype_str(type), "data");
if (chars_written < 0) {
SPDK_ERRLOG("Unable to generate transport data buffer pool name.\n");
ops->destroy(transport);
return NULL;
}
transport->data_buf_pool = spdk_mempool_create(spdk_mempool_name,
opts->num_shared_buffers,
opts->max_io_size + NVMF_DATA_BUFFER_ALIGNMENT,
SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
SPDK_ENV_SOCKET_ID_ANY);
if (!transport->data_buf_pool) {
SPDK_ERRLOG("Unable to allocate buffer pool for poll group\n");
ops->destroy(transport);
return NULL;
}
return transport;
}
@ -128,6 +150,17 @@ spdk_nvmf_transport_get_next(struct spdk_nvmf_transport *transport)
int
spdk_nvmf_transport_destroy(struct spdk_nvmf_transport *transport)
{
if (transport->data_buf_pool != NULL) {
if (spdk_mempool_count(transport->data_buf_pool) !=
transport->opts.num_shared_buffers) {
SPDK_ERRLOG("transport buffer pool count is %zu but should be %u\n",
spdk_mempool_count(transport->data_buf_pool),
transport->opts.num_shared_buffers);
}
}
spdk_mempool_free(transport->data_buf_pool);
return transport->ops->destroy(transport);
}

View File

@ -44,6 +44,9 @@ struct spdk_nvmf_transport {
const struct spdk_nvmf_transport_ops *ops;
struct spdk_nvmf_transport_opts opts;
/* A mempool for transport related data transfers */
struct spdk_mempool *data_buf_pool;
TAILQ_ENTRY(spdk_nvmf_transport) link;
};

View File

@ -227,7 +227,7 @@ test_nvmf_tcp_create(void)
CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE);
CU_ASSERT(transport->opts.io_unit_size == UT_IO_UNIT_SIZE);
/* destroy transport */
spdk_mempool_free(ttransport->data_buf_pool);
spdk_mempool_free(ttransport->transport.data_buf_pool);
spdk_io_device_unregister(ttransport, NULL);
free(ttransport);
@ -250,7 +250,7 @@ test_nvmf_tcp_create(void)
CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE);
CU_ASSERT(transport->opts.io_unit_size == UT_MAX_IO_SIZE);
/* destroy transport */
spdk_mempool_free(ttransport->data_buf_pool);
spdk_mempool_free(ttransport->transport.data_buf_pool);
spdk_io_device_unregister(ttransport, NULL);
free(ttransport);