From 89a28bfd1b1c8af32ce58f642c66b0c8b17b7035 Mon Sep 17 00:00:00 2001 From: Shuhei Matsumoto Date: Mon, 23 Sep 2019 07:22:10 +0900 Subject: [PATCH] nvmf/rdma: Factor out WR SGE setup in fill_buffers() into fill_wr_sge() Factor out setup WR operation from nvmf_rdma_fill_buffers() into a function nvmf_rdma_fill_wr_sge(). Signed-off-by: Shuhei Matsumoto Change-Id: I813f156b83b6e1773ea76d0d1ed8684b1e267691 Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/468945 Tested-by: SPDK CI Jenkins Reviewed-by: Alexey Marchuk Reviewed-by: Ben Walker Reviewed-by: Jim Harris Reviewed-by: Seth Howell --- lib/nvmf/rdma.c | 48 ++++++++++++++++++++++++++++++------------------ 1 file changed, 30 insertions(+), 18 deletions(-) diff --git a/lib/nvmf/rdma.c b/lib/nvmf/rdma.c index 267f5708e..835900496 100644 --- a/lib/nvmf/rdma.c +++ b/lib/nvmf/rdma.c @@ -1618,6 +1618,35 @@ nvmf_rdma_fill_buffers_with_md_interleave(struct spdk_nvmf_rdma_transport *rtran return 0; } +static bool +nvmf_rdma_fill_wr_sge(struct spdk_nvmf_rdma_device *device, + struct spdk_nvmf_request *req, struct ibv_send_wr *wr) +{ + uint64_t translation_len; + + translation_len = req->iov[req->iovcnt].iov_len; + + if (!g_nvmf_hooks.get_rkey) { + wr->sg_list[wr->num_sge].lkey = ((struct ibv_mr *)spdk_mem_map_translate(device->map, + (uint64_t)req->iov[req->iovcnt].iov_base, &translation_len))->lkey; + } else { + wr->sg_list[wr->num_sge].lkey = spdk_mem_map_translate(device->map, + (uint64_t)req->iov[req->iovcnt].iov_base, &translation_len); + } + + if (spdk_unlikely(translation_len < req->iov[req->iovcnt].iov_len)) { + /* This is a very rare case that can occur when using DPDK version < 19.05 */ + SPDK_ERRLOG("Data buffer split over multiple RDMA Memory Regions. Removing it from circulation.\n"); + return false; + } + + wr->sg_list[wr->num_sge].addr = (uintptr_t)(req->iov[req->iovcnt].iov_base); + wr->sg_list[wr->num_sge].length = req->iov[req->iovcnt].iov_len; + wr->num_sge++; + + return true; +} + static int nvmf_rdma_fill_buffers(struct spdk_nvmf_rdma_transport *rtransport, struct spdk_nvmf_rdma_poll_group *rgroup, @@ -1626,8 +1655,6 @@ nvmf_rdma_fill_buffers(struct spdk_nvmf_rdma_transport *rtransport, struct ibv_send_wr *wr, uint32_t length) { - uint64_t translation_len; - wr->num_sge = 0; while (length) { req->iov[req->iovcnt].iov_base = (void *)((uintptr_t)(req->buffers[req->iovcnt] + @@ -1635,19 +1662,7 @@ nvmf_rdma_fill_buffers(struct spdk_nvmf_rdma_transport *rtransport, ~NVMF_DATA_BUFFER_MASK); req->iov[req->iovcnt].iov_len = spdk_min(length, rtransport->transport.opts.io_unit_size); - translation_len = req->iov[req->iovcnt].iov_len; - - if (!g_nvmf_hooks.get_rkey) { - wr->sg_list[wr->num_sge].lkey = ((struct ibv_mr *)spdk_mem_map_translate(device->map, - (uint64_t)req->iov[req->iovcnt].iov_base, &translation_len))->lkey; - } else { - wr->sg_list[wr->num_sge].lkey = spdk_mem_map_translate(device->map, - (uint64_t)req->iov[req->iovcnt].iov_base, &translation_len); - } - - /* This is a very rare case that can occur when using DPDK version < 19.05 */ - if (spdk_unlikely(translation_len < req->iov[req->iovcnt].iov_len)) { - SPDK_ERRLOG("Data buffer split over multiple RDMA Memory Regions. Removing it from circulation.\n"); + if (spdk_unlikely(!nvmf_rdma_fill_wr_sge(device, req, wr))) { if (nvmf_rdma_replace_buffer(rgroup, &req->buffers[req->iovcnt]) == -ENOMEM) { return -ENOMEM; } @@ -1655,10 +1670,7 @@ nvmf_rdma_fill_buffers(struct spdk_nvmf_rdma_transport *rtransport, } length -= req->iov[req->iovcnt].iov_len; - wr->sg_list[wr->num_sge].addr = (uintptr_t)(req->iov[req->iovcnt].iov_base); - wr->sg_list[wr->num_sge].length = req->iov[req->iovcnt].iov_len; req->iovcnt++; - wr->num_sge++; } return 0;