NVMe-oF: Add explicit reports for MR-split buffers:

This is a failsafe for finding and reporting data buffers that span
multiple Memory Regions. These errors should never be triggered, but
finding and reporting them will help any debugging.

Change-Id: I3c61e3cc510f5a36039fc1815ff0de45fce794d5
Signed-off-by: Seth Howell <seth.howell@intel.com>
Reviewed-on: https://review.gerrithub.io/436054
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Seth Howell 2018-12-04 11:01:15 -07:00 committed by Jim Harris
parent a52fc70d51
commit a451c8385e
2 changed files with 23 additions and 6 deletions

View File

@ -918,6 +918,9 @@ nvme_rdma_build_contig_inline_request(struct nvme_rdma_qpair *rqpair,
(uint64_t)payload, &requested_size);
if (mr == NULL || requested_size < req->payload_size) {
if (mr) {
SPDK_ERRLOG("Data buffer split over multiple RDMA Memory Regions\n");
}
return -EINVAL;
}
@ -978,6 +981,7 @@ nvme_rdma_build_contig_request(struct nvme_rdma_qpair *rqpair,
}
if (requested_size < req->payload_size) {
SPDK_ERRLOG("Data buffer split over multiple RDMA Memory Regions\n");
return -1;
}
@ -1048,6 +1052,7 @@ nvme_rdma_build_sgl_request(struct nvme_rdma_qpair *rqpair,
}
if (mr_length < sge_length) {
SPDK_ERRLOG("Data buffer split over multiple RDMA Memory Regions\n");
return -1;
}
@ -1140,6 +1145,9 @@ nvme_rdma_build_sgl_inline_request(struct nvme_rdma_qpair *rqpair,
mr = (struct ibv_mr *)spdk_mem_map_translate(rqpair->mr_map->map, (uint64_t)virt_addr,
&requested_size);
if (mr == NULL || requested_size < req->payload_size) {
if (mr) {
SPDK_ERRLOG("Data buffer split over multiple RDMA Memory Regions\n");
}
return -1;
}

View File

@ -1147,13 +1147,16 @@ spdk_nvmf_rdma_request_fill_iovs(struct spdk_nvmf_rdma_transport *rtransport,
{
void *buf = NULL;
uint32_t length = rdma_req->req.length;
uint64_t translation_len;
uint32_t i = 0;
int rc = 0;
rdma_req->req.iovcnt = 0;
while (length) {
buf = spdk_mempool_get(rtransport->data_buf_pool);
if (!buf) {
goto nomem;
rc = -ENOMEM;
goto err_exit;
}
rdma_req->req.iov[i].iov_base = (void *)((uintptr_t)(buf + NVMF_DATA_BUFFER_MASK) &
@ -1163,18 +1166,24 @@ spdk_nvmf_rdma_request_fill_iovs(struct spdk_nvmf_rdma_transport *rtransport,
rdma_req->data.buffers[i] = buf;
rdma_req->data.wr.sg_list[i].addr = (uintptr_t)(rdma_req->req.iov[i].iov_base);
rdma_req->data.wr.sg_list[i].length = rdma_req->req.iov[i].iov_len;
translation_len = rdma_req->req.iov[i].iov_len;
rdma_req->data.wr.sg_list[i].lkey = ((struct ibv_mr *)spdk_mem_map_translate(device->map,
(uint64_t)buf, NULL))->lkey;
(uint64_t)buf, &translation_len))->lkey;
length -= rdma_req->req.iov[i].iov_len;
if (translation_len < rdma_req->req.iov[i].iov_len) {
SPDK_ERRLOG("Data buffer split over multiple RDMA Memory Regions\n");
rc = -EINVAL;
goto err_exit;
}
i++;
}
rdma_req->data_from_pool = true;
return 0;
return rc;
nomem:
err_exit:
while (i) {
i--;
spdk_mempool_put(rtransport->data_buf_pool, rdma_req->data.buffers[i]);
@ -1186,7 +1195,7 @@ nomem:
rdma_req->data.wr.sg_list[i].lkey = 0;
}
rdma_req->req.iovcnt = 0;
return -ENOMEM;
return rc;
}
static int