nvmf/rdma: Separate filling wr->sg_list from filling req->iov for DIF case
This patch separates filling wr->sg_list from filling req->iov in nvmf_rdma_fill_buffers_with_md_interleave() and create an new helper function nvmf_rdma_fill_wr_sgl_with_md_interleave() to fill wr->sg_list by adding iovcnt to struct spdk_nvmf_rdma_request. The subsequent patches will merge nvmf_rdma_fill_buffers() into spdk_nvmf_request_get_buffers(). Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Change-Id: I03206895e37cf385fb8bd7498f2f4a24797c7ce1 Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/469204 Community-CI: Broadcom SPDK FC-NVMe CI <spdk-ci.pdl@broadcom.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Alexey Marchuk <alexeymar@mellanox.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
parent
5593b61f93
commit
b48a97d454
@ -1562,11 +1562,12 @@ static bool
|
|||||||
nvmf_rdma_fill_wr_sge_with_md_interleave(struct spdk_nvmf_rdma_device *device,
|
nvmf_rdma_fill_wr_sge_with_md_interleave(struct spdk_nvmf_rdma_device *device,
|
||||||
struct spdk_nvmf_request *req,
|
struct spdk_nvmf_request *req,
|
||||||
struct ibv_send_wr *wr,
|
struct ibv_send_wr *wr,
|
||||||
|
int iovpos,
|
||||||
uint32_t *_remaining_data_block,
|
uint32_t *_remaining_data_block,
|
||||||
uint32_t *_offset,
|
uint32_t *_offset,
|
||||||
const struct spdk_dif_ctx *dif_ctx)
|
const struct spdk_dif_ctx *dif_ctx)
|
||||||
{
|
{
|
||||||
struct iovec *iov = &req->iov[req->iovcnt];
|
struct iovec *iov = &req->iov[iovpos];
|
||||||
struct ibv_sge *sg_ele;
|
struct ibv_sge *sg_ele;
|
||||||
uint32_t lkey = 0;
|
uint32_t lkey = 0;
|
||||||
uint32_t sge_len;
|
uint32_t sge_len;
|
||||||
@ -1616,39 +1617,33 @@ nvmf_rdma_fill_wr_sge_with_md_interleave(struct spdk_nvmf_rdma_device *device,
|
|||||||
* and points to part of buffer
|
* and points to part of buffer
|
||||||
*/
|
*/
|
||||||
static int
|
static int
|
||||||
nvmf_rdma_fill_buffers_with_md_interleave(struct spdk_nvmf_rdma_transport *rtransport,
|
nvmf_rdma_fill_wr_sgl_with_md_interleave(struct spdk_nvmf_rdma_poll_group *rgroup,
|
||||||
struct spdk_nvmf_rdma_poll_group *rgroup,
|
|
||||||
struct spdk_nvmf_rdma_device *device,
|
struct spdk_nvmf_rdma_device *device,
|
||||||
struct spdk_nvmf_request *req,
|
struct spdk_nvmf_rdma_request *rdma_req,
|
||||||
struct ibv_send_wr *wr,
|
struct ibv_send_wr *wr,
|
||||||
uint32_t length,
|
uint32_t length,
|
||||||
const struct spdk_dif_ctx *dif_ctx)
|
const struct spdk_dif_ctx *dif_ctx)
|
||||||
{
|
{
|
||||||
|
struct spdk_nvmf_request *req = &rdma_req->req;
|
||||||
uint32_t remaining_length = length;
|
uint32_t remaining_length = length;
|
||||||
uint32_t remaining_data_block = dif_ctx->block_size - dif_ctx->md_size;
|
uint32_t remaining_data_block = dif_ctx->block_size - dif_ctx->md_size;
|
||||||
uint32_t offset = 0;
|
uint32_t offset = 0;
|
||||||
struct iovec *iov;
|
|
||||||
|
|
||||||
wr->num_sge = 0;
|
wr->num_sge = 0;
|
||||||
|
|
||||||
while (remaining_length && wr->num_sge < SPDK_NVMF_MAX_SGL_ENTRIES) {
|
while (remaining_length && wr->num_sge < SPDK_NVMF_MAX_SGL_ENTRIES) {
|
||||||
iov = &req->iov[req->iovcnt];
|
|
||||||
iov->iov_base = (void *)((uintptr_t)(req->buffers[req->iovcnt] + NVMF_DATA_BUFFER_MASK)
|
|
||||||
& ~NVMF_DATA_BUFFER_MASK);
|
|
||||||
iov->iov_len = spdk_min(remaining_length, rtransport->transport.opts.io_unit_size);
|
|
||||||
|
|
||||||
while (spdk_unlikely(!nvmf_rdma_fill_wr_sge_with_md_interleave(device, req, wr,
|
while (spdk_unlikely(!nvmf_rdma_fill_wr_sge_with_md_interleave(device, req, wr,
|
||||||
&remaining_data_block, &offset, dif_ctx))) {
|
rdma_req->iovpos, &remaining_data_block, &offset, dif_ctx))) {
|
||||||
if (nvmf_rdma_replace_buffer(rgroup, &req->buffers[req->iovcnt]) == -ENOMEM) {
|
if (nvmf_rdma_replace_buffer(rgroup, &req->buffers[rdma_req->iovpos]) == -ENOMEM) {
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
req->iov[req->iovcnt].iov_base = (void *)((uintptr_t)(req->buffers[req->iovcnt] +
|
req->iov[rdma_req->iovpos].iov_base = (void *)((uintptr_t)(req->buffers[rdma_req->iovpos] +
|
||||||
NVMF_DATA_BUFFER_MASK) &
|
NVMF_DATA_BUFFER_MASK) &
|
||||||
~NVMF_DATA_BUFFER_MASK);
|
~NVMF_DATA_BUFFER_MASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
remaining_length -= iov->iov_len;
|
remaining_length -= req->iov[rdma_req->iovpos].iov_len;
|
||||||
req->iovcnt++;
|
rdma_req->iovpos++;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (remaining_length) {
|
if (remaining_length) {
|
||||||
@ -1746,16 +1741,12 @@ spdk_nvmf_rdma_request_fill_iovs(struct spdk_nvmf_rdma_transport *rtransport,
|
|||||||
req->iovcnt = 0;
|
req->iovcnt = 0;
|
||||||
rdma_req->iovpos = 0;
|
rdma_req->iovpos = 0;
|
||||||
|
|
||||||
if (spdk_unlikely(rdma_req->dif_insert_or_strip)) {
|
|
||||||
rc = nvmf_rdma_fill_buffers_with_md_interleave(rtransport,
|
|
||||||
rgroup,
|
|
||||||
device,
|
|
||||||
req,
|
|
||||||
wr,
|
|
||||||
length,
|
|
||||||
&rdma_req->dif_ctx);
|
|
||||||
} else {
|
|
||||||
nvmf_rdma_fill_buffers(rtransport, req, length);
|
nvmf_rdma_fill_buffers(rtransport, req, length);
|
||||||
|
|
||||||
|
if (spdk_unlikely(rdma_req->dif_insert_or_strip)) {
|
||||||
|
rc = nvmf_rdma_fill_wr_sgl_with_md_interleave(rgroup, device, rdma_req,
|
||||||
|
wr, length, &rdma_req->dif_ctx);
|
||||||
|
} else {
|
||||||
rc = nvmf_rdma_fill_wr_sgl(rgroup, device, rdma_req, wr, length);
|
rc = nvmf_rdma_fill_wr_sgl(rgroup, device, rdma_req, wr, length);
|
||||||
}
|
}
|
||||||
if (rc != 0) {
|
if (rc != 0) {
|
||||||
|
Loading…
Reference in New Issue
Block a user