nvme/rdma: cache value of bb_mr->rkey

Avoid an extra level of pointer chasing when we are filling out the NVMe
SGL.

Change-Id: I1a40af16fda80f7480c419524876bfb1a1902eb8
Signed-off-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
Daniel Verkamp 2016-12-15 16:40:16 -07:00
parent 6152d5bf8f
commit e1b514ce9a

View File

@ -123,6 +123,9 @@ struct spdk_nvme_rdma_req {
struct ibv_mr *bb_mr;
/* Cached value of bb_mr->rkey */
uint32_t bb_rkey;
uint8_t *bb;
STAILQ_ENTRY(spdk_nvme_rdma_req) link;
@ -258,7 +261,7 @@ nvme_rdma_pre_copy_mem(struct nvme_rdma_qpair *rqpair, struct spdk_nvme_rdma_req
nvme_sgl = &cmd->dptr.sgl1;
nvme_sgl->address = (uint64_t)rdma_req->bb;
nvme_sgl->keyed.key = rdma_req->bb_mr->rkey;
nvme_sgl->keyed.key = rdma_req->bb_rkey;
}
}
@ -467,6 +470,8 @@ nvme_rdma_alloc_reqs(struct nvme_rdma_qpair *rqpair)
goto fail;
}
rdma_req->bb_rkey = rdma_req->bb_mr->rkey;
STAILQ_INSERT_TAIL(&rqpair->free_reqs, rdma_req, link);
}