nvme: adjust physically contiguous memory comments

The NVMe PCIe transport only requires physically contiguous allocations
for struct nvme_tracker and the I/O SQ and CQ entries, which are already
handled separately.  Change the comments to indicate that struct
nvme_payload's contiguous type only requires the memory to be virtually
contiguous, since nvme_pcie_prp_list_append() already steps through the
buffer and translates each (4K) page independently.

Change-Id: I45ac8dfb2c033a0fcbf2effbe33af4efc1eb23cb
Reported-by: Dariusz Stojaczyk <dariuszx.stojaczyk@intel.com>
Signed-off-by: Daniel Verkamp <daniel.verkamp@intel.com>
Reviewed-on: https://review.gerrithub.io/417045
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Pawel Wodkowski <pawelx.wodkowski@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Dariusz Stojaczyk <dariuszx.stojaczyk@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Daniel Verkamp 2018-06-27 08:57:14 -07:00 committed by Jim Harris
parent 95fe928067
commit 4af4e4f509
2 changed files with 9 additions and 9 deletions

View File

@ -166,7 +166,7 @@ nvme_user_copy_cmd_complete(void *arg, const struct spdk_nvme_cpl *cpl)
}
/**
* Allocate a request as well as a physically contiguous buffer to copy to/from the user's buffer.
* Allocate a request as well as a DMA-capable buffer to copy to/from the user's buffer.
*
* This is intended for use in non-fast-path functions (admin commands, reservations, etc.)
* where the overhead of a copy is not a problem.
@ -177,24 +177,24 @@ nvme_allocate_request_user_copy(struct spdk_nvme_qpair *qpair,
void *cb_arg, bool host_to_controller)
{
struct nvme_request *req;
void *contig_buffer = NULL;
void *dma_buffer = NULL;
uint64_t phys_addr;
if (buffer && payload_size) {
contig_buffer = spdk_dma_zmalloc(payload_size, 4096, &phys_addr);
if (!contig_buffer) {
dma_buffer = spdk_dma_zmalloc(payload_size, 4096, &phys_addr);
if (!dma_buffer) {
return NULL;
}
if (host_to_controller) {
memcpy(contig_buffer, buffer, payload_size);
memcpy(dma_buffer, buffer, payload_size);
}
}
req = nvme_allocate_request_contig(qpair, contig_buffer, payload_size, nvme_user_copy_cmd_complete,
req = nvme_allocate_request_contig(qpair, dma_buffer, payload_size, nvme_user_copy_cmd_complete,
NULL);
if (!req) {
spdk_dma_free(contig_buffer);
spdk_dma_free(dma_buffer);
return NULL;
}

View File

@ -155,14 +155,14 @@ struct nvme_payload {
/**
* If reset_sgl_fn == NULL, this is a contig payload, and contig_or_cb_arg contains the
* virtual memory address of a single physically contiguous buffer.
* virtual memory address of a single virtually contiguous buffer.
*
* If reset_sgl_fn != NULL, this is a SGL payload, and contig_or_cb_arg contains the
* cb_arg that will be passed to the SGL callback functions.
*/
void *contig_or_cb_arg;
/** Virtual memory address of a single physically contiguous metadata buffer */
/** Virtual memory address of a single virtually contiguous metadata buffer */
void *md;
};