rdma: adjust I/O unit based on device SGL support

For devices that support fewer SGE elements than our default values, we
need to adjust the I/O unit size so that we don't ever try to submit
more SGLs than we are allowed to.

Change-Id: I316d88459380f28009cc8a3d9357e9c67b08e871
Signed-off-by: Seth Howell <seth.howell@intel.com>
Reviewed-on: https://review.gerrithub.io/c/442776
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Seth Howell 2019-01-30 13:32:01 -07:00 committed by Jim Harris
parent e7beb0d1fd
commit b6b0a0ba59
2 changed files with 19 additions and 6 deletions

View File

@ -1673,10 +1673,9 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
#define SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR 64
#define SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE 4096
#define SPDK_NVMF_RDMA_DEFAULT_MAX_IO_SIZE 131072
#define SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE 4096
#define SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE (SPDK_NVMF_RDMA_DEFAULT_MAX_IO_SIZE / SPDK_NVMF_MAX_SGL_ENTRIES)
#define SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS 512
#define SPDK_NVMF_RDMA_DEFAULT_BUFFER_CACHE_SIZE 32
#define SPDK_NVMF_RDMA_DEFAULT_IO_BUFFER_SIZE (SPDK_NVMF_RDMA_DEFAULT_MAX_IO_SIZE / SPDK_NVMF_MAX_SGL_ENTRIES)
static void
spdk_nvmf_rdma_opts_init(struct spdk_nvmf_transport_opts *opts)
@ -1685,8 +1684,7 @@ spdk_nvmf_rdma_opts_init(struct spdk_nvmf_transport_opts *opts)
opts->max_qpairs_per_ctrlr = SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR;
opts->in_capsule_data_size = SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE;
opts->max_io_size = SPDK_NVMF_RDMA_DEFAULT_MAX_IO_SIZE;
opts->io_unit_size = spdk_max(SPDK_NVMF_RDMA_DEFAULT_IO_BUFFER_SIZE,
SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE);
opts->io_unit_size = SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE;
opts->max_aq_depth = SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH;
opts->num_shared_buffers = SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS;
opts->buf_cache_size = SPDK_NVMF_RDMA_DEFAULT_BUFFER_CACHE_SIZE;
@ -1705,6 +1703,7 @@ spdk_nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
int flag;
uint32_t sge_count;
uint32_t min_shared_buffers;
int max_device_sge = SPDK_NVMF_MAX_SGL_ENTRIES;
rtransport = calloc(1, sizeof(*rtransport));
if (!rtransport) {
@ -1816,6 +1815,8 @@ spdk_nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
}
max_device_sge = spdk_min(max_device_sge, device->attr.max_sge);
#ifdef SPDK_CONFIG_RDMA_SEND_WITH_INVAL
if ((device->attr.device_cap_flags & IBV_DEVICE_MEM_MGT_EXTENSIONS) == 0) {
SPDK_WARNLOG("The libibverbs on this system supports SEND_WITH_INVALIDATE,");
@ -1850,6 +1851,18 @@ spdk_nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
}
rdma_free_devices(contexts);
if (opts->io_unit_size * max_device_sge < opts->max_io_size) {
/* divide and round up. */
opts->io_unit_size = (opts->max_io_size + max_device_sge - 1) / max_device_sge;
/* round up to the nearest 4k. */
opts->io_unit_size = (opts->io_unit_size + NVMF_DATA_BUFFER_ALIGNMENT - 1) & ~NVMF_DATA_BUFFER_MASK;
opts->io_unit_size = spdk_max(opts->io_unit_size, SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE);
SPDK_NOTICELOG("Adjusting the io unit size to fit the device's maximum I/O size. New I/O unit size %u\n",
opts->io_unit_size);
}
if (rc < 0) {
spdk_nvmf_rdma_destroy(&rtransport->transport);
return NULL;

View File

@ -45,8 +45,8 @@ struct spdk_nvmf_transport_opts g_rdma_ut_transport_opts = {
.max_queue_depth = SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH,
.max_qpairs_per_ctrlr = SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR,
.in_capsule_data_size = SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE,
.max_io_size = (SPDK_NVMF_RDMA_DEFAULT_IO_BUFFER_SIZE * RDMA_UT_UNITS_IN_MAX_IO),
.io_unit_size = SPDK_NVMF_RDMA_DEFAULT_IO_BUFFER_SIZE,
.max_io_size = (SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE * RDMA_UT_UNITS_IN_MAX_IO),
.io_unit_size = SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE,
.max_aq_depth = SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH,
.num_shared_buffers = SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS,
};