nvmf/rdma: Move nvmf_rdma_get_lkey() up in a file

This reduces the diff in the next patch.

Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: I85dccdc1a1a5a51777934121f50a6af97feda5a5
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/469480
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Paul Luse <paul.e.luse@intel.com>
Reviewed-by: Seth Howell <seth.howell@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Shuhei Matsumoto 2019-09-26 07:26:19 +09:00 committed by Jim Harris
parent 8f80af4c2a
commit c2f60ea452

View File

@ -1531,6 +1531,31 @@ nvmf_rdma_replace_buffer(struct spdk_nvmf_rdma_poll_group *rgroup, void **buf)
return 0;
}
static bool
nvmf_rdma_get_lkey(struct spdk_nvmf_rdma_device *device, struct iovec *iov,
uint32_t *_lkey)
{
uint64_t translation_len;
uint32_t lkey;
translation_len = iov->iov_len;
if (!g_nvmf_hooks.get_rkey) {
lkey = ((struct ibv_mr *)spdk_mem_map_translate(device->map,
(uint64_t)iov->iov_base, &translation_len))->lkey;
} else {
lkey = spdk_mem_map_translate(device->map,
(uint64_t)iov->iov_base, &translation_len);
}
if (spdk_unlikely(translation_len < iov->iov_len)) {
return false;
}
*_lkey = lkey;
return true;
}
/*
* Fills iov and SGL, iov[i] points to buffer[i], SGE[i] is limited in length to data block size
* and points to part of buffer
@ -1618,31 +1643,6 @@ nvmf_rdma_fill_buffers_with_md_interleave(struct spdk_nvmf_rdma_transport *rtran
return 0;
}
static bool
nvmf_rdma_get_lkey(struct spdk_nvmf_rdma_device *device, struct iovec *iov,
uint32_t *_lkey)
{
uint64_t translation_len;
uint32_t lkey;
translation_len = iov->iov_len;
if (!g_nvmf_hooks.get_rkey) {
lkey = ((struct ibv_mr *)spdk_mem_map_translate(device->map,
(uint64_t)iov->iov_base, &translation_len))->lkey;
} else {
lkey = spdk_mem_map_translate(device->map,
(uint64_t)iov->iov_base, &translation_len);
}
if (spdk_unlikely(translation_len < iov->iov_len)) {
return false;
}
*_lkey = lkey;
return true;
}
static bool
nvmf_rdma_fill_wr_sge(struct spdk_nvmf_rdma_device *device,
struct spdk_nvmf_request *req, struct ibv_send_wr *wr)