diff --git a/lib/nvmf/rdma.c b/lib/nvmf/rdma.c index 090e4e161..28d70560d 100644 --- a/lib/nvmf/rdma.c +++ b/lib/nvmf/rdma.c @@ -211,6 +211,13 @@ struct spdk_nvmf_rdma_recv { TAILQ_ENTRY(spdk_nvmf_rdma_recv) link; }; +struct spdk_nvmf_rdma_request_data { + struct spdk_nvmf_rdma_wr rdma_wr; + struct ibv_send_wr wr; + struct ibv_sge sgl[SPDK_NVMF_MAX_SGL_ENTRIES]; + void *buffers[SPDK_NVMF_MAX_SGL_ENTRIES]; +}; + struct spdk_nvmf_rdma_request { struct spdk_nvmf_request req; bool data_from_pool; @@ -225,12 +232,7 @@ struct spdk_nvmf_rdma_request { struct ibv_sge sgl[NVMF_DEFAULT_RSP_SGE]; } rsp; - struct { - struct spdk_nvmf_rdma_wr rdma_wr; - struct ibv_send_wr wr; - struct ibv_sge sgl[NVMF_DEFAULT_TX_SGE]; - void *buffers[NVMF_DEFAULT_TX_SGE]; - } data; + struct spdk_nvmf_rdma_request_data data; struct spdk_nvmf_rdma_wr rdma_wr; @@ -364,6 +366,8 @@ struct spdk_nvmf_rdma_transport { struct rdma_event_channel *event_channel; + struct spdk_mempool *data_wr_pool; + pthread_mutex_t lock; /* fields used to poll RDMA/IB events */ @@ -1712,6 +1716,17 @@ spdk_nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts) return NULL; } + rtransport->data_wr_pool = spdk_mempool_create("spdk_nvmf_rdma_wr_data", + opts->max_queue_depth * SPDK_NVMF_MAX_SGL_ENTRIES, + sizeof(struct spdk_nvmf_rdma_request_data), + SPDK_MEMPOOL_DEFAULT_CACHE_SIZE, + SPDK_ENV_SOCKET_ID_ANY); + if (!rtransport->data_wr_pool) { + SPDK_ERRLOG("Unable to allocate work request pool for poll group\n"); + spdk_nvmf_rdma_destroy(&rtransport->transport); + return NULL; + } + contexts = rdma_get_devices(NULL); if (contexts == NULL) { SPDK_ERRLOG("rdma_get_devices() failed: %s (%d)\n", spdk_strerror(errno), errno); @@ -1850,6 +1865,16 @@ spdk_nvmf_rdma_destroy(struct spdk_nvmf_transport *transport) free(device); } + if (rtransport->data_wr_pool != NULL) { + if (spdk_mempool_count(rtransport->data_wr_pool) != + (transport->opts.max_queue_depth * SPDK_NVMF_MAX_SGL_ENTRIES)) { + SPDK_ERRLOG("transport wr pool count is %zu but should be %u\n", + spdk_mempool_count(rtransport->data_wr_pool), + transport->opts.max_queue_depth * SPDK_NVMF_MAX_SGL_ENTRIES); + } + } + + spdk_mempool_free(rtransport->data_wr_pool); spdk_io_device_unregister(rtransport, NULL); pthread_mutex_destroy(&rtransport->lock); free(rtransport);