nvme: optimize layout of struct nvme_payload

Rather than storing nvme_payload::type explicitly, use the SGL reset
function pointer as an indicator: if reset_sgl_fn is non-NULL, then the
payload is an SGL type; otherwise it is a contiguous buffer type.

This eliminates the one-byte type member from struct nvme_payload,
making it an even 32 bytes instead of 33, allowing the removal of the
awkward packing inside struct nvme_request.

Change-Id: If2a32437a23fe14eb5287e096ac060067296f1dd
Signed-off-by: Daniel Verkamp <daniel.verkamp@intel.com>
Reviewed-on: https://review.gerrithub.io/413175
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Daniel Verkamp 2018-05-31 17:52:54 -07:00
parent caf85d8f23
commit 7dff719f7e
7 changed files with 68 additions and 71 deletions

View File

@ -206,10 +206,10 @@ nvme_user_copy_cmd_complete(void *arg, const struct spdk_nvme_cpl *cpl)
if (xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST ||
xfer == SPDK_NVME_DATA_BIDIRECTIONAL) {
assert(req->pid == getpid());
memcpy(req->user_buffer, req->payload.u.contig, req->payload_size);
memcpy(req->user_buffer, req->payload.contig_or_cb_arg, req->payload_size);
}
spdk_dma_free(req->payload.u.contig);
spdk_dma_free(req->payload.contig_or_cb_arg);
}
/* Call the user's original callback now that the buffer has been copied */

View File

@ -135,50 +135,46 @@ enum spdk_nvme_ctrlr_flags {
/**
* Descriptor for a request data payload.
*
* This struct is arranged so that it fits nicely in struct nvme_request.
*/
struct __attribute__((packed)) nvme_payload {
union {
/** Virtual memory address of a single physically contiguous buffer */
void *contig;
struct nvme_payload {
/**
* Functions for retrieving physical addresses for scattered payloads.
*/
spdk_nvme_req_reset_sgl_cb reset_sgl_fn;
spdk_nvme_req_next_sge_cb next_sge_fn;
/**
* Functions for retrieving physical addresses for scattered payloads.
*/
struct nvme_sgl_args {
spdk_nvme_req_reset_sgl_cb reset_sgl_fn;
spdk_nvme_req_next_sge_cb next_sge_fn;
void *cb_arg;
} sgl;
} u;
/**
* If reset_sgl_fn == NULL, this is a contig payload, and contig_or_cb_arg contains the
* virtual memory address of a single physically contiguous buffer.
*
* If reset_sgl_fn != NULL, this is a SGL payload, and contig_or_cb_arg contains the
* cb_arg that will be passed to the SGL callback functions.
*/
void *contig_or_cb_arg;
/** Virtual memory address of a single physically contiguous metadata buffer */
void *md;
/** \ref nvme_payload_type */
uint8_t type;
};
#define NVME_PAYLOAD_CONTIG(contig_, md_) \
(struct nvme_payload) { \
.u.contig = (contig_), \
.reset_sgl_fn = NULL, \
.next_sge_fn = NULL, \
.contig_or_cb_arg = (contig_), \
.md = (md_), \
.type = NVME_PAYLOAD_TYPE_CONTIG, \
}
#define NVME_PAYLOAD_SGL(reset_sgl_fn_, next_sge_fn_, cb_arg_, md_) \
(struct nvme_payload) { \
.u.sgl.reset_sgl_fn = (reset_sgl_fn_), \
.u.sgl.next_sge_fn = (next_sge_fn_), \
.u.sgl.cb_arg = (cb_arg_), \
.reset_sgl_fn = (reset_sgl_fn_), \
.next_sge_fn = (next_sge_fn_), \
.contig_or_cb_arg = (cb_arg_), \
.md = (md_), \
.type = NVME_PAYLOAD_TYPE_SGL, \
}
static inline enum nvme_payload_type
nvme_payload_type(const struct nvme_payload *payload) {
return payload->type;
return payload->reset_sgl_fn ? NVME_PAYLOAD_TYPE_SGL : NVME_PAYLOAD_TYPE_CONTIG;
}
struct nvme_request {

View File

@ -234,7 +234,9 @@ _nvme_ns_cmd_split_request_prp(struct spdk_nvme_ns *ns,
uint32_t io_flags, struct nvme_request *req,
uint16_t apptag_mask, uint16_t apptag)
{
struct nvme_sgl_args *args;
spdk_nvme_req_reset_sgl_cb reset_sgl_fn = req->payload.reset_sgl_fn;
spdk_nvme_req_next_sge_cb next_sge_fn = req->payload.next_sge_fn;
void *sgl_cb_arg = req->payload.contig_or_cb_arg;
bool start_valid, end_valid, last_sge, child_equals_parent;
uint64_t child_lba = lba;
uint32_t req_current_length = 0;
@ -243,10 +245,8 @@ _nvme_ns_cmd_split_request_prp(struct spdk_nvme_ns *ns,
uint32_t page_size = qpair->ctrlr->page_size;
uintptr_t address;
args = &req->payload.u.sgl;
args->reset_sgl_fn(args->cb_arg, payload_offset);
args->next_sge_fn(args->cb_arg, (void **)&address, &sge_length);
reset_sgl_fn(sgl_cb_arg, payload_offset);
next_sge_fn(sgl_cb_arg, (void **)&address, &sge_length);
while (req_current_length < req->payload_size) {
if (sge_length == 0) {
@ -289,7 +289,7 @@ _nvme_ns_cmd_split_request_prp(struct spdk_nvme_ns *ns,
child_length += sge_length;
req_current_length += sge_length;
if (req_current_length < req->payload_size) {
args->next_sge_fn(args->cb_arg, (void **)&address, &sge_length);
next_sge_fn(sgl_cb_arg, (void **)&address, &sge_length);
}
/*
* If the next SGE is not page aligned, we will need to create a child
@ -356,7 +356,9 @@ _nvme_ns_cmd_split_request_sgl(struct spdk_nvme_ns *ns,
uint32_t io_flags, struct nvme_request *req,
uint16_t apptag_mask, uint16_t apptag)
{
struct nvme_sgl_args *args;
spdk_nvme_req_reset_sgl_cb reset_sgl_fn = req->payload.reset_sgl_fn;
spdk_nvme_req_next_sge_cb next_sge_fn = req->payload.next_sge_fn;
void *sgl_cb_arg = req->payload.contig_or_cb_arg;
uint64_t child_lba = lba;
uint32_t req_current_length = 0;
uint32_t child_length = 0;
@ -364,14 +366,13 @@ _nvme_ns_cmd_split_request_sgl(struct spdk_nvme_ns *ns,
uint16_t max_sges, num_sges;
uintptr_t address;
args = &req->payload.u.sgl;
max_sges = ns->ctrlr->max_sges;
args->reset_sgl_fn(args->cb_arg, payload_offset);
reset_sgl_fn(sgl_cb_arg, payload_offset);
num_sges = 0;
while (req_current_length < req->payload_size) {
args->next_sge_fn(args->cb_arg, (void **)&address, &sge_length);
next_sge_fn(sgl_cb_arg, (void **)&address, &sge_length);
if (req_current_length + sge_length > req->payload_size) {
sge_length = req->payload_size - req_current_length;

View File

@ -1743,7 +1743,7 @@ nvme_pcie_qpair_build_contig_request(struct spdk_nvme_qpair *qpair, struct nvme_
uint32_t prp_index = 0;
int rc;
rc = nvme_pcie_prp_list_append(tr, &prp_index, req->payload.u.contig + req->payload_offset,
rc = nvme_pcie_prp_list_append(tr, &prp_index, req->payload.contig_or_cb_arg + req->payload_offset,
req->payload_size, qpair->ctrlr->page_size);
if (rc) {
nvme_pcie_fail_request_bad_vtophys(qpair, tr);
@ -1772,9 +1772,9 @@ nvme_pcie_qpair_build_hw_sgl_request(struct spdk_nvme_qpair *qpair, struct nvme_
*/
assert(req->payload_size != 0);
assert(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_SGL);
assert(req->payload.u.sgl.reset_sgl_fn != NULL);
assert(req->payload.u.sgl.next_sge_fn != NULL);
req->payload.u.sgl.reset_sgl_fn(req->payload.u.sgl.cb_arg, req->payload_offset);
assert(req->payload.reset_sgl_fn != NULL);
assert(req->payload.next_sge_fn != NULL);
req->payload.reset_sgl_fn(req->payload.contig_or_cb_arg, req->payload_offset);
sgl = tr->u.sgl;
req->cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_CONTIG;
@ -1788,7 +1788,7 @@ nvme_pcie_qpair_build_hw_sgl_request(struct spdk_nvme_qpair *qpair, struct nvme_
return -1;
}
rc = req->payload.u.sgl.next_sge_fn(req->payload.u.sgl.cb_arg, &virt_addr, &length);
rc = req->payload.next_sge_fn(req->payload.contig_or_cb_arg, &virt_addr, &length);
if (rc) {
nvme_pcie_fail_request_bad_vtophys(qpair, tr);
return -1;
@ -1849,13 +1849,13 @@ nvme_pcie_qpair_build_prps_sgl_request(struct spdk_nvme_qpair *qpair, struct nvm
* Build scattered payloads.
*/
assert(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_SGL);
assert(req->payload.u.sgl.reset_sgl_fn != NULL);
req->payload.u.sgl.reset_sgl_fn(req->payload.u.sgl.cb_arg, req->payload_offset);
assert(req->payload.reset_sgl_fn != NULL);
req->payload.reset_sgl_fn(req->payload.contig_or_cb_arg, req->payload_offset);
remaining_transfer_len = req->payload_size;
while (remaining_transfer_len > 0) {
assert(req->payload.u.sgl.next_sge_fn != NULL);
rc = req->payload.u.sgl.next_sge_fn(req->payload.u.sgl.cb_arg, &virt_addr, &length);
assert(req->payload.next_sge_fn != NULL);
rc = req->payload.next_sge_fn(req->payload.contig_or_cb_arg, &virt_addr, &length);
if (rc) {
nvme_pcie_fail_request_bad_vtophys(qpair, tr);
return -1;

View File

@ -905,7 +905,7 @@ nvme_rdma_build_null_request(struct nvme_request *req)
static int
nvme_rdma_build_contig_request(struct nvme_rdma_qpair *rqpair, struct nvme_request *req)
{
void *payload = req->payload.u.contig + req->payload_offset;
void *payload = req->payload.contig_or_cb_arg + req->payload_offset;
struct ibv_mr *mr;
assert(req->payload_size != 0);
@ -939,12 +939,12 @@ nvme_rdma_build_sgl_request(struct nvme_rdma_qpair *rqpair, struct nvme_request
assert(req->payload_size != 0);
assert(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_SGL);
assert(req->payload.u.sgl.reset_sgl_fn != NULL);
assert(req->payload.u.sgl.next_sge_fn != NULL);
req->payload.u.sgl.reset_sgl_fn(req->payload.u.sgl.cb_arg, req->payload_offset);
assert(req->payload.reset_sgl_fn != NULL);
assert(req->payload.next_sge_fn != NULL);
req->payload.reset_sgl_fn(req->payload.contig_or_cb_arg, req->payload_offset);
/* TODO: for now, we only support a single SGL entry */
rc = req->payload.u.sgl.next_sge_fn(req->payload.u.sgl.cb_arg, &virt_addr, &length);
rc = req->payload.next_sge_fn(req->payload.contig_or_cb_arg, &virt_addr, &length);
if (rc) {
return -1;
}

View File

@ -609,7 +609,7 @@ test_nvme_allocate_request_null(void)
CU_ASSERT(req->pid == getpid());
CU_ASSERT(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_CONTIG);
CU_ASSERT(req->payload.md == NULL);
CU_ASSERT(req->payload.u.contig == NULL);
CU_ASSERT(req->payload.contig_or_cb_arg == NULL);
}
static void
@ -703,8 +703,8 @@ test_nvme_allocate_request_user_copy(void)
CU_ASSERT(req->user_cb_arg == cb_arg);
CU_ASSERT(req->user_buffer == buffer);
CU_ASSERT(req->cb_arg == req);
CU_ASSERT(memcmp(req->payload.u.contig, buffer, payload_size) == 0);
spdk_dma_free(req->payload.u.contig);
CU_ASSERT(memcmp(req->payload.contig_or_cb_arg, buffer, payload_size) == 0);
spdk_dma_free(req->payload.contig_or_cb_arg);
/* same thing but additional path coverage, no copy */
host_to_controller = false;
@ -717,8 +717,8 @@ test_nvme_allocate_request_user_copy(void)
CU_ASSERT(req->user_cb_arg == cb_arg);
CU_ASSERT(req->user_buffer == buffer);
CU_ASSERT(req->cb_arg == req);
CU_ASSERT(memcmp(req->payload.u.contig, buffer, payload_size) != 0);
spdk_dma_free(req->payload.u.contig);
CU_ASSERT(memcmp(req->payload.contig_or_cb_arg, buffer, payload_size) != 0);
spdk_dma_free(req->payload.contig_or_cb_arg);
/* good buffer and valid payload size but make spdk_dma_zmalloc fail */
/* set the mock pointer to NULL for spdk_dma_zmalloc */

View File

@ -613,7 +613,7 @@ test_nvme_ns_cmd_dataset_management(void)
CU_ASSERT(g_request->cmd.nsid == ns.id);
CU_ASSERT(g_request->cmd.cdw10 == 0);
CU_ASSERT(g_request->cmd.cdw11 == SPDK_NVME_DSM_ATTR_DEALLOCATE);
spdk_dma_free(g_request->payload.u.contig);
spdk_dma_free(g_request->payload.contig_or_cb_arg);
nvme_free_request(g_request);
/* TRIM 256 LBAs */
@ -625,7 +625,7 @@ test_nvme_ns_cmd_dataset_management(void)
CU_ASSERT(g_request->cmd.nsid == ns.id);
CU_ASSERT(g_request->cmd.cdw10 == 255u);
CU_ASSERT(g_request->cmd.cdw11 == SPDK_NVME_DSM_ATTR_DEALLOCATE);
spdk_dma_free(g_request->payload.u.contig);
spdk_dma_free(g_request->payload.contig_or_cb_arg);
nvme_free_request(g_request);
rc = spdk_nvme_ns_cmd_dataset_management(&ns, &qpair, SPDK_NVME_DSM_ATTR_DEALLOCATE,
@ -655,9 +655,9 @@ test_nvme_ns_cmd_readv(void)
SPDK_CU_ASSERT_FATAL(g_request != NULL);
CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_READ);
CU_ASSERT(nvme_payload_type(&g_request->payload) == NVME_PAYLOAD_TYPE_SGL);
CU_ASSERT(g_request->payload.u.sgl.reset_sgl_fn == nvme_request_reset_sgl);
CU_ASSERT(g_request->payload.u.sgl.next_sge_fn == nvme_request_next_sge);
CU_ASSERT(g_request->payload.u.sgl.cb_arg == &sge_length);
CU_ASSERT(g_request->payload.reset_sgl_fn == nvme_request_reset_sgl);
CU_ASSERT(g_request->payload.next_sge_fn == nvme_request_next_sge);
CU_ASSERT(g_request->payload.contig_or_cb_arg == &sge_length);
CU_ASSERT(g_request->cmd.nsid == ns.id);
rc = spdk_nvme_ns_cmd_readv(&ns, &qpair, 0x1000, 256, NULL, cb_arg, 0, nvme_request_reset_sgl,
@ -690,9 +690,9 @@ test_nvme_ns_cmd_writev(void)
SPDK_CU_ASSERT_FATAL(g_request != NULL);
CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_WRITE);
CU_ASSERT(nvme_payload_type(&g_request->payload) == NVME_PAYLOAD_TYPE_SGL);
CU_ASSERT(g_request->payload.u.sgl.reset_sgl_fn == nvme_request_reset_sgl);
CU_ASSERT(g_request->payload.u.sgl.next_sge_fn == nvme_request_next_sge);
CU_ASSERT(g_request->payload.u.sgl.cb_arg == &sge_length);
CU_ASSERT(g_request->payload.reset_sgl_fn == nvme_request_reset_sgl);
CU_ASSERT(g_request->payload.next_sge_fn == nvme_request_next_sge);
CU_ASSERT(g_request->payload.contig_or_cb_arg == &sge_length);
CU_ASSERT(g_request->cmd.nsid == ns.id);
rc = spdk_nvme_ns_cmd_writev(&ns, &qpair, 0x1000, 256, NULL, cb_arg, 0,
@ -725,9 +725,9 @@ test_nvme_ns_cmd_comparev(void)
SPDK_CU_ASSERT_FATAL(g_request != NULL);
CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_COMPARE);
CU_ASSERT(nvme_payload_type(&g_request->payload) == NVME_PAYLOAD_TYPE_SGL);
CU_ASSERT(g_request->payload.u.sgl.reset_sgl_fn == nvme_request_reset_sgl);
CU_ASSERT(g_request->payload.u.sgl.next_sge_fn == nvme_request_next_sge);
CU_ASSERT(g_request->payload.u.sgl.cb_arg == &sge_length);
CU_ASSERT(g_request->payload.reset_sgl_fn == nvme_request_reset_sgl);
CU_ASSERT(g_request->payload.next_sge_fn == nvme_request_next_sge);
CU_ASSERT(g_request->payload.contig_or_cb_arg == &sge_length);
CU_ASSERT(g_request->cmd.nsid == ns.id);
rc = spdk_nvme_ns_cmd_comparev(&ns, &qpair, 0x1000, 256, NULL, cb_arg, 0,
@ -807,7 +807,7 @@ test_nvme_ns_cmd_reservation_register(void)
CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10);
spdk_dma_free(g_request->payload.u.contig);
spdk_dma_free(g_request->payload.contig_or_cb_arg);
nvme_free_request(g_request);
free(payload);
cleanup_after_test(&qpair);
@ -845,7 +845,7 @@ test_nvme_ns_cmd_reservation_release(void)
CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10);
spdk_dma_free(g_request->payload.u.contig);
spdk_dma_free(g_request->payload.contig_or_cb_arg);
nvme_free_request(g_request);
free(payload);
cleanup_after_test(&qpair);
@ -883,7 +883,7 @@ test_nvme_ns_cmd_reservation_acquire(void)
CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10);
spdk_dma_free(g_request->payload.u.contig);
spdk_dma_free(g_request->payload.contig_or_cb_arg);
nvme_free_request(g_request);
free(payload);
cleanup_after_test(&qpair);
@ -915,7 +915,7 @@ test_nvme_ns_cmd_reservation_report(void)
CU_ASSERT(g_request->cmd.cdw10 == (size / 4));
spdk_dma_free(g_request->payload.u.contig);
spdk_dma_free(g_request->payload.contig_or_cb_arg);
nvme_free_request(g_request);
free(payload);
cleanup_after_test(&qpair);