vtophys: add length parameter to the vtophys function

This follows the same trend as the mem_map APIs.

Currently, most of the spdk_vtophys() callers manually
detect physically noncontiguous buffers to split them
into multiple physically contiguous chunks. This patch
is a first step towards encapsulating most of that logic
in a single place - in spdk_vtophys() itself.

This patch doesn't change any functionality on its own,
it only extends the API.

Change-Id: I16faa9dea270c370f2a814cd399f59055b5ccc3d
Signed-off-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-on: https://review.gerrithub.io/c/438449
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: wuzhouhui <wuzhouhui@kingsoft.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
This commit is contained in:
Darek Stojaczyk 2018-12-28 12:59:21 +01:00 committed by Jim Harris
parent daa3b8b2e0
commit 8c4ed83b49
12 changed files with 95 additions and 33 deletions

View File

@ -574,11 +574,14 @@ size_t spdk_ring_dequeue(struct spdk_ring *ring, void **objs, size_t count);
* Get the physical address of a buffer.
*
* \param buf A pointer to a buffer.
* \param size Contains the size of the memory region pointed to by vaddr.
* If vaddr is successfully translated, then this is updated with the size of
* the memory region for which the translation is valid.
*
* \return the physical address of this buffer on success, or SPDK_VTOPHYS_ERROR
* on failure.
*/
uint64_t spdk_vtophys(void *buf);
uint64_t spdk_vtophys(void *buf, uint64_t *size);
struct spdk_pci_addr {
uint32_t domain;

View File

@ -626,7 +626,7 @@ _crypto_operation(struct spdk_bdev_io *bdev_io, enum rte_crypto_cipher_operation
/* Set the mbuf elements address and length. Null out the next pointer. */
src_mbufs[crypto_index]->buf_addr = current_iov;
src_mbufs[crypto_index]->buf_iova = spdk_vtophys((void *)current_iov);
src_mbufs[crypto_index]->buf_iova = spdk_vtophys((void *)current_iov, NULL);
src_mbufs[crypto_index]->data_len = crypto_len;
src_mbufs[crypto_index]->next = NULL;
/* Store context in every mbuf as we don't know anything about completion order */
@ -659,7 +659,8 @@ _crypto_operation(struct spdk_bdev_io *bdev_io, enum rte_crypto_cipher_operation
/* Set the relevant destination en_mbuf elements. */
dst_mbufs[crypto_index]->buf_addr = io_ctx->cry_iov.iov_base + en_offset;
dst_mbufs[crypto_index]->buf_iova = spdk_vtophys(dst_mbufs[crypto_index]->buf_addr);
dst_mbufs[crypto_index]->buf_iova = spdk_vtophys(dst_mbufs[crypto_index]->buf_addr,
NULL);
dst_mbufs[crypto_index]->data_len = crypto_len;
crypto_ops[crypto_index]->sym->m_dst = dst_mbufs[crypto_index];
en_offset += crypto_len;

View File

@ -59,7 +59,7 @@ virt_to_phys(void *vaddr)
}
#endif
return spdk_vtophys(vaddr);
return spdk_vtophys(vaddr, NULL);
}
void *

View File

@ -624,13 +624,12 @@ spdk_vtophys_init(void)
}
uint64_t
spdk_vtophys(void *buf)
spdk_vtophys(void *buf, uint64_t *size)
{
uint64_t vaddr, paddr_2mb;
vaddr = (uint64_t)buf;
paddr_2mb = spdk_mem_map_translate(g_vtophys_map, vaddr, NULL);
paddr_2mb = spdk_mem_map_translate(g_vtophys_map, vaddr, size);
/*
* SPDK_VTOPHYS_ERROR has all bits set, so if the lookup returned SPDK_VTOPHYS_ERROR,
@ -642,7 +641,7 @@ spdk_vtophys(void *buf)
if (paddr_2mb == SPDK_VTOPHYS_ERROR) {
return SPDK_VTOPHYS_ERROR;
} else {
return paddr_2mb + ((uint64_t)buf & MASK_2MB);
return paddr_2mb + (vaddr & MASK_2MB);
}
}

View File

@ -432,7 +432,7 @@ ioat_channel_start(struct spdk_ioat_chan *ioat)
}
for (i = 0; i < num_descriptors; i++) {
phys_addr = spdk_vtophys(&ioat->hw_ring[i]);
phys_addr = spdk_vtophys(&ioat->hw_ring[i], NULL);
if (phys_addr == SPDK_VTOPHYS_ERROR) {
SPDK_ERRLOG("Failed to translate descriptor %u to physical address\n", i);
return -1;
@ -611,12 +611,12 @@ spdk_ioat_submit_copy(struct spdk_ioat_chan *ioat, void *cb_arg, spdk_ioat_req_c
while (remaining) {
if (_2MB_PAGE(vsrc) != vsrc_page) {
vsrc_page = _2MB_PAGE(vsrc);
psrc_page = spdk_vtophys((void *)vsrc_page);
psrc_page = spdk_vtophys((void *)vsrc_page, NULL);
}
if (_2MB_PAGE(vdst) != vdst_page) {
vdst_page = _2MB_PAGE(vdst);
pdst_page = spdk_vtophys((void *)vdst_page);
pdst_page = spdk_vtophys((void *)vdst_page, NULL);
}
op_size = remaining;
op_size = spdk_min(op_size, (0x200000 - _2MB_OFFSET(vsrc)));
@ -688,7 +688,7 @@ spdk_ioat_submit_fill(struct spdk_ioat_chan *ioat, void *cb_arg, spdk_ioat_req_c
remaining -= op_size;
last_desc = ioat_prep_fill(ioat,
spdk_vtophys((void *)vdst),
spdk_vtophys((void *)vdst, NULL),
fill_pattern,
op_size);

View File

@ -140,7 +140,7 @@ spdk_mobj_ctor(struct spdk_mempool *mp, __attribute__((unused)) void *arg,
* right before the 512B aligned buffer area.
*/
phys_addr = (uint64_t *)m->buf - 1;
*phys_addr = spdk_vtophys(m) + off;
*phys_addr = spdk_vtophys(m, NULL) + off;
}
#define NUM_PDU_PER_CONNECTION(iscsi) (2 * (iscsi->MaxQueueDepth + MAX_LARGE_DATAIN_PER_CONNECTION + 8))

View File

@ -59,7 +59,7 @@ spdk_nvme_ocssd_ns_cmd_vector_reset(struct spdk_nvme_ns *ns,
cmd->nsid = ns->id;
if (chunk_info != NULL) {
cmd->mptr = spdk_vtophys(chunk_info);
cmd->mptr = spdk_vtophys(chunk_info, NULL);
}
/*
@ -70,7 +70,7 @@ spdk_nvme_ocssd_ns_cmd_vector_reset(struct spdk_nvme_ns *ns,
if (num_lbas == 1) {
*(uint64_t *)&cmd->cdw10 = *lba_list;
} else {
*(uint64_t *)&cmd->cdw10 = spdk_vtophys(lba_list);
*(uint64_t *)&cmd->cdw10 = spdk_vtophys(lba_list, NULL);
}
cmd->cdw12 = num_lbas - 1;
@ -120,7 +120,7 @@ _nvme_ocssd_ns_cmd_vector_rw_with_md(struct spdk_nvme_ns *ns,
if (num_lbas == 1) {
*(uint64_t *)&cmd->cdw10 = *lba_list;
} else {
*(uint64_t *)&cmd->cdw10 = spdk_vtophys(lba_list);
*(uint64_t *)&cmd->cdw10 = spdk_vtophys(lba_list, NULL);
}
cmd->cdw12 = num_lbas - 1;
@ -221,8 +221,8 @@ spdk_nvme_ocssd_ns_cmd_vector_copy(struct spdk_nvme_ns *ns,
*(uint64_t *)&cmd->cdw10 = *src_lba_list;
*(uint64_t *)&cmd->cdw14 = *dst_lba_list;
} else {
*(uint64_t *)&cmd->cdw10 = spdk_vtophys(src_lba_list);
*(uint64_t *)&cmd->cdw14 = spdk_vtophys(dst_lba_list);
*(uint64_t *)&cmd->cdw10 = spdk_vtophys(src_lba_list, NULL);
*(uint64_t *)&cmd->cdw14 = spdk_vtophys(dst_lba_list, NULL);
}
cmd->cdw12 = num_lbas - 1;

View File

@ -1052,7 +1052,7 @@ nvme_pcie_qpair_construct(struct spdk_nvme_qpair *qpair)
for (i = 0; i < num_trackers; i++) {
tr = &pqpair->tr[i];
nvme_qpair_construct_tracker(tr, i, spdk_vtophys(tr));
nvme_qpair_construct_tracker(tr, i, spdk_vtophys(tr, NULL));
TAILQ_INSERT_HEAD(&pqpair->free_tr, tr, tq_list);
}
@ -1712,7 +1712,7 @@ nvme_pcie_prp_list_append(struct nvme_tracker *tr, uint32_t *prp_index, void *vi
return -EINVAL;
}
phys_addr = spdk_vtophys(virt_addr);
phys_addr = spdk_vtophys(virt_addr, NULL);
if (spdk_unlikely(phys_addr == SPDK_VTOPHYS_ERROR)) {
SPDK_ERRLOG("vtophys(%p) failed\n", virt_addr);
return -EINVAL;
@ -1821,7 +1821,7 @@ nvme_pcie_qpair_build_hw_sgl_request(struct spdk_nvme_qpair *qpair, struct nvme_
return -1;
}
phys_addr = spdk_vtophys(virt_addr);
phys_addr = spdk_vtophys(virt_addr, NULL);
if (phys_addr == SPDK_VTOPHYS_ERROR) {
nvme_pcie_fail_request_bad_vtophys(qpair, tr);
return -1;
@ -1970,7 +1970,7 @@ nvme_pcie_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_reques
if (req->payload_size && req->payload.md) {
md_payload = req->payload.md + req->md_offset;
tr->req->cmd.mptr = spdk_vtophys(md_payload);
tr->req->cmd.mptr = spdk_vtophys(md_payload, NULL);
if (tr->req->cmd.mptr == SPDK_VTOPHYS_ERROR) {
nvme_pcie_fail_request_bad_vtophys(qpair, tr);
rc = -EINVAL;

View File

@ -565,7 +565,7 @@ spdk_vhost_session_mem_unregister(struct spdk_vhost_session *vsession)
end = CEIL_2MB(region->mmap_addr + region->mmap_size);
len = end - start;
if (spdk_vtophys((void *) start) == SPDK_VTOPHYS_ERROR) {
if (spdk_vtophys((void *) start, NULL) == SPDK_VTOPHYS_ERROR) {
continue; /* region has not been registered */
}

View File

@ -535,7 +535,7 @@ virtqueue_req_add_iovs(struct virtqueue *vq, struct iovec *iovs, uint16_t iovcnt
if (!vq->vdev->is_hw) {
desc->addr = (uintptr_t)iovs[i].iov_base;
} else {
desc->addr = spdk_vtophys(iovs[i].iov_base);
desc->addr = spdk_vtophys(iovs[i].iov_base, NULL);
}
desc->len = iovs[i].iov_len;

View File

@ -163,7 +163,7 @@ spdk_dma_free(void *buf)
DEFINE_RETURN_MOCK(spdk_vtophys, uint64_t);
uint64_t
spdk_vtophys(void *buf)
spdk_vtophys(void *buf, uint64_t *size)
{
HANDLE_RETURN_MOCK(spdk_vtophys);

View File

@ -34,6 +34,7 @@
#include "spdk/stdinc.h"
#include "spdk/env.h"
#include "spdk/util.h"
#include "CUnit/Basic.h"
@ -52,7 +53,7 @@ vtophys_malloc_test(void)
continue;
}
paddr = spdk_vtophys(p);
paddr = spdk_vtophys(p, NULL);
CU_ASSERT(paddr == SPDK_VTOPHYS_ERROR);
free(p);
@ -60,29 +61,87 @@ vtophys_malloc_test(void)
}
/* Test addresses that are not in the valid x86-64 usermode range */
paddr = spdk_vtophys((void *)0x0000800000000000ULL);
paddr = spdk_vtophys((void *)0x0000800000000000ULL, NULL);
CU_ASSERT(paddr == SPDK_VTOPHYS_ERROR)
}
static void
vtophys_spdk_malloc_test(void)
{
void *p = NULL;
void *buf = NULL, *p = NULL;
size_t buf_align = 512;
int i;
unsigned int size = 1;
uint64_t paddr;
uint64_t paddr, tmpsize;
/* Test vtophys on memory allocated through SPDK */
for (i = 0; i < 31; i++) {
p = spdk_dma_zmalloc(size, 512, NULL);
if (p == NULL) {
buf = spdk_dma_zmalloc(size, buf_align, NULL);
if (buf == NULL) {
continue;
}
paddr = spdk_vtophys(p);
/* test vtophys translation with no length parameter */
paddr = spdk_vtophys(buf, NULL);
CU_ASSERT(paddr != SPDK_VTOPHYS_ERROR);
spdk_dma_free(p);
/* translate the entire buffer; it's not necessarily contiguous */
p = buf;
tmpsize = size;
while (p < buf + size) {
paddr = spdk_vtophys(p, &tmpsize);
CU_ASSERT(paddr != SPDK_VTOPHYS_ERROR);
CU_ASSERT(tmpsize >= spdk_min(size, buf_align));
p += tmpsize;
tmpsize = buf + size - p;
}
CU_ASSERT(tmpsize == 0);
/* translate a valid vaddr, but with length 0 */
p = buf;
tmpsize = 0;
paddr = spdk_vtophys(p, &tmpsize);
CU_ASSERT(paddr != SPDK_VTOPHYS_ERROR);
CU_ASSERT(tmpsize == 0);
/* translate the first half of the buffer */
p = buf;
tmpsize = size / 2;
while (p < buf + size / 2) {
paddr = spdk_vtophys(p, &tmpsize);
CU_ASSERT(paddr != SPDK_VTOPHYS_ERROR);
CU_ASSERT(tmpsize >= spdk_min(size / 2, buf_align));
p += tmpsize;
tmpsize = buf + size / 2 - p;
}
CU_ASSERT(tmpsize == 0);
/* translate the second half of the buffer */
p = buf + size / 2;
tmpsize = size / 2;
while (p < buf + size) {
paddr = spdk_vtophys(p, &tmpsize);
CU_ASSERT(paddr != SPDK_VTOPHYS_ERROR);
CU_ASSERT(tmpsize >= spdk_min(size / 2, buf_align));
p += tmpsize;
tmpsize = buf + size - p;
}
CU_ASSERT(tmpsize == 0);
/* translate a region that's not entirely registered */
p = buf;
tmpsize = UINT64_MAX;
while (p < buf + size) {
paddr = spdk_vtophys(p, &tmpsize);
CU_ASSERT(paddr != SPDK_VTOPHYS_ERROR);
CU_ASSERT(tmpsize >= buf_align);
p += tmpsize;
/* verify our region is really contiguous */
CU_ASSERT(paddr + tmpsize - 1 == spdk_vtophys(p - 1, &tmpsize));
tmpsize = UINT64_MAX;
}
spdk_dma_free(buf);
size = size << 1;
}
}