nvmf/rdma: Cleanup nvmf_rdma_fill_buffers_with_md_interleave()
This patch - applies nvmf_rdma_get_lkey(), - changes pointer to struct iovec from iovec to iov, - changes pointer to ibv_sge from sg_list to sg_ele, and - passes DIF context instead of decoded data block size and metadata size - use cached pointer to nvmf_request to call - change the ordering of operations to setup sg_ele slightly for nvmf_rdma_fill_buffers_with_md_interleave(). Name changes are from the previous patch. They are for consistency with nvmf_rdma_fill_buffers() and a preparation for the next patch. Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Change-Id: I942fb9d07db52b9ef9f43fdfa8235a9e864964c0 Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/469201 Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Alexey Marchuk <alexeymar@mellanox.com> Reviewed-by: Seth Howell <seth.howell@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
parent
c2f60ea452
commit
8bbde3758a
@ -1567,37 +1567,30 @@ nvmf_rdma_fill_buffers_with_md_interleave(struct spdk_nvmf_rdma_transport *rtran
|
||||
struct spdk_nvmf_request *req,
|
||||
struct ibv_send_wr *wr,
|
||||
uint32_t length,
|
||||
uint32_t data_block_size,
|
||||
uint32_t md_size)
|
||||
const struct spdk_dif_ctx *dif_ctx)
|
||||
{
|
||||
uint32_t remaining_length = length;
|
||||
uint32_t remaining_io_buffer_length;
|
||||
uint32_t data_block_size = dif_ctx->block_size - dif_ctx->md_size;
|
||||
uint32_t md_size = dif_ctx->md_size;
|
||||
uint32_t remaining_data_block = data_block_size;
|
||||
uint32_t offset = 0;
|
||||
uint32_t sge_len;
|
||||
uint64_t translation_len;
|
||||
struct iovec *iovec;
|
||||
struct ibv_sge *sg_list;
|
||||
struct iovec *iov;
|
||||
struct ibv_sge *sg_ele;
|
||||
uint32_t lkey = 0;
|
||||
|
||||
wr->num_sge = 0;
|
||||
|
||||
while (remaining_length && wr->num_sge < SPDK_NVMF_MAX_SGL_ENTRIES) {
|
||||
iovec = &req->iov[req->iovcnt];
|
||||
iovec->iov_base = (void *)((uintptr_t)(req->buffers[req->iovcnt] + NVMF_DATA_BUFFER_MASK)
|
||||
iov = &req->iov[req->iovcnt];
|
||||
iov->iov_base = (void *)((uintptr_t)(req->buffers[req->iovcnt] + NVMF_DATA_BUFFER_MASK)
|
||||
& ~NVMF_DATA_BUFFER_MASK);
|
||||
iovec->iov_len = spdk_min(remaining_length, rtransport->transport.opts.io_unit_size);
|
||||
remaining_io_buffer_length = iovec->iov_len - offset;
|
||||
translation_len = iovec->iov_len;
|
||||
iov->iov_len = spdk_min(remaining_length, rtransport->transport.opts.io_unit_size);
|
||||
remaining_io_buffer_length = iov->iov_len - offset;
|
||||
|
||||
if (!g_nvmf_hooks.get_rkey) {
|
||||
lkey = ((struct ibv_mr *)spdk_mem_map_translate(device->map, (uint64_t)iovec->iov_base,
|
||||
&translation_len))->lkey;
|
||||
} else {
|
||||
lkey = spdk_mem_map_translate(device->map, (uint64_t)iovec->iov_base, &translation_len);
|
||||
}
|
||||
if (spdk_unlikely(!nvmf_rdma_get_lkey(device, iov, &lkey))) {
|
||||
/* This is a very rare case that can occur when using DPDK version < 19.05 */
|
||||
if (spdk_unlikely(translation_len < iovec->iov_len)) {
|
||||
SPDK_ERRLOG("Data buffer split over multiple RDMA Memory Regions. Removing it from circulation.\n");
|
||||
if (nvmf_rdma_replace_buffer(rgroup, &req->buffers[req->iovcnt]) == -ENOMEM) {
|
||||
return -ENOMEM;
|
||||
@ -1608,11 +1601,11 @@ nvmf_rdma_fill_buffers_with_md_interleave(struct spdk_nvmf_rdma_transport *rtran
|
||||
req->iovcnt++;
|
||||
|
||||
while (remaining_io_buffer_length && wr->num_sge < SPDK_NVMF_MAX_SGL_ENTRIES) {
|
||||
sg_list = &wr->sg_list[wr->num_sge];
|
||||
sg_list->addr = (uintptr_t)((char *) iovec->iov_base + offset);
|
||||
sg_ele = &wr->sg_list[wr->num_sge];
|
||||
sg_ele->lkey = lkey;
|
||||
sg_ele->addr = (uintptr_t)((char *)iov->iov_base + offset);
|
||||
sge_len = spdk_min(remaining_io_buffer_length, remaining_data_block);
|
||||
sg_list->length = sge_len;
|
||||
sg_list->lkey = lkey;
|
||||
sg_ele->length = sge_len;
|
||||
remaining_io_buffer_length -= sge_len;
|
||||
remaining_data_block -= sge_len;
|
||||
offset += sge_len;
|
||||
@ -1629,10 +1622,10 @@ nvmf_rdma_fill_buffers_with_md_interleave(struct spdk_nvmf_rdma_transport *rtran
|
||||
if (remaining_io_buffer_length == 0) {
|
||||
/* By subtracting the size of the last IOV from the offset, we ensure that we skip
|
||||
the remaining metadata bits at the beginning of the next buffer */
|
||||
offset -= iovec->iov_len;
|
||||
offset -= iov->iov_len;
|
||||
}
|
||||
}
|
||||
remaining_length -= iovec->iov_len;
|
||||
remaining_length -= iov->iov_len;
|
||||
}
|
||||
|
||||
if (remaining_length) {
|
||||
@ -1718,11 +1711,10 @@ spdk_nvmf_rdma_request_fill_iovs(struct spdk_nvmf_rdma_transport *rtransport,
|
||||
rc = nvmf_rdma_fill_buffers_with_md_interleave(rtransport,
|
||||
rgroup,
|
||||
device,
|
||||
&rdma_req->req,
|
||||
req,
|
||||
wr,
|
||||
length,
|
||||
rdma_req->dif_ctx.block_size - rdma_req->dif_ctx.md_size,
|
||||
rdma_req->dif_ctx.md_size);
|
||||
&rdma_req->dif_ctx);
|
||||
} else {
|
||||
rc = nvmf_rdma_fill_buffers(rtransport, rgroup, device, req, wr, length);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user