nvme/pcie: split HW SGL entries on 2MB address boundary

This allows NVMe PCIe devices to be used with
physically discontiguous I/O payload buffers.

So far this is just a dumb splitting which
doesn't check for physical contiguity. This is
improved in a subsequent patch.

Change-Id: I0ecc443149225eaa0e4156ddda78613bcf034406
Suggested-by: Daniel Verkamp <daniel.verkamp@intel.com>
Signed-off-by: Dariusz Stojaczyk <dariuszx.stojaczyk@intel.com>
Reviewed-on: https://review.gerrithub.io/417060
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
This commit is contained in:
Dariusz Stojaczyk 2018-06-27 20:51:37 +02:00 committed by Jim Harris
parent bdd0f6119f
commit 3320c06b4c

View File

@ -1761,6 +1761,8 @@ nvme_pcie_qpair_build_contig_request(struct spdk_nvme_qpair *qpair, struct nvme_
return 0; return 0;
} }
#define _2MB_OFFSET(ptr) (((uintptr_t)(ptr)) & (0x200000 - 1))
/** /**
* Build SGL list describing scattered payload buffer. * Build SGL list describing scattered payload buffer.
*/ */
@ -1771,7 +1773,7 @@ nvme_pcie_qpair_build_hw_sgl_request(struct spdk_nvme_qpair *qpair, struct nvme_
int rc; int rc;
void *virt_addr; void *virt_addr;
uint64_t phys_addr; uint64_t phys_addr;
uint32_t remaining_transfer_len, length; uint32_t remaining_transfer_len, remaining_user_sge_len, length;
struct spdk_nvme_sgl_descriptor *sgl; struct spdk_nvme_sgl_descriptor *sgl;
uint32_t nseg = 0; uint32_t nseg = 0;
@ -1791,13 +1793,17 @@ nvme_pcie_qpair_build_hw_sgl_request(struct spdk_nvme_qpair *qpair, struct nvme_
remaining_transfer_len = req->payload_size; remaining_transfer_len = req->payload_size;
while (remaining_transfer_len > 0) { while (remaining_transfer_len > 0) {
if (nseg >= NVME_MAX_SGL_DESCRIPTORS) { rc = req->payload.next_sge_fn(req->payload.contig_or_cb_arg,
&virt_addr, &remaining_user_sge_len);
if (rc) {
nvme_pcie_fail_request_bad_vtophys(qpair, tr); nvme_pcie_fail_request_bad_vtophys(qpair, tr);
return -1; return -1;
} }
rc = req->payload.next_sge_fn(req->payload.contig_or_cb_arg, &virt_addr, &length); remaining_user_sge_len = spdk_min(remaining_user_sge_len, remaining_transfer_len);
if (rc) { remaining_transfer_len -= remaining_user_sge_len;
while (remaining_user_sge_len > 0) {
if (nseg >= NVME_MAX_SGL_DESCRIPTORS) {
nvme_pcie_fail_request_bad_vtophys(qpair, tr); nvme_pcie_fail_request_bad_vtophys(qpair, tr);
return -1; return -1;
} }
@ -1808,8 +1814,9 @@ nvme_pcie_qpair_build_hw_sgl_request(struct spdk_nvme_qpair *qpair, struct nvme_
return -1; return -1;
} }
length = spdk_min(remaining_transfer_len, length); length = spdk_min(remaining_user_sge_len, 0x200000 - _2MB_OFFSET(virt_addr));
remaining_transfer_len -= length; remaining_user_sge_len -= length;
virt_addr += length;
sgl->unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK; sgl->unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
sgl->unkeyed.length = length; sgl->unkeyed.length = length;
@ -1819,6 +1826,7 @@ nvme_pcie_qpair_build_hw_sgl_request(struct spdk_nvme_qpair *qpair, struct nvme_
sgl++; sgl++;
nseg++; nseg++;
} }
}
if (nseg == 1) { if (nseg == 1) {
/* /*