nvme: combine various payload types into a struct

This cleans up the I/O splitting code somewhat.

It also moves the SGL payload function pointers up into the hot cache
section of struct nvme_request without pushing the other important
members past the cacheline boundary (because payload is now a union).

Change-Id: I14a5c24f579d57bb84d845147d03aa53bb4bb209
Signed-off-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
Daniel Verkamp 2016-01-22 16:56:20 -07:00
parent 82db40dbd5
commit 407a57165d
9 changed files with 222 additions and 126 deletions

View File

@ -122,7 +122,7 @@ nvme_request_size(void)
} }
struct nvme_request * struct nvme_request *
nvme_allocate_request(void *payload, uint32_t payload_size, nvme_allocate_request(const struct nvme_payload *payload, uint32_t payload_size,
nvme_cb_fn_t cb_fn, void *cb_arg) nvme_cb_fn_t cb_fn, void *cb_arg)
{ {
struct nvme_request *req = NULL; struct nvme_request *req = NULL;
@ -145,15 +145,30 @@ nvme_allocate_request(void *payload, uint32_t payload_size,
req->cb_fn = cb_fn; req->cb_fn = cb_fn;
req->cb_arg = cb_arg; req->cb_arg = cb_arg;
req->timeout = true; req->timeout = true;
req->sgl_offset = 0;
req->parent = NULL; req->parent = NULL;
req->payload = *payload;
req->u.payload = payload;
req->payload_size = payload_size; req->payload_size = payload_size;
return req; return req;
} }
struct nvme_request *
nvme_allocate_request_contig(void *buffer, uint32_t payload_size, nvme_cb_fn_t cb_fn, void *cb_arg)
{
struct nvme_payload payload;
payload.type = NVME_PAYLOAD_TYPE_CONTIG;
payload.u.contig = buffer;
return nvme_allocate_request(&payload, payload_size, cb_fn, cb_arg);
}
struct nvme_request *
nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg)
{
return nvme_allocate_request_contig(NULL, 0, cb_fn, cb_arg);
}
void void
nvme_free_request(struct nvme_request *req) nvme_free_request(struct nvme_request *req)
{ {

View File

@ -608,7 +608,7 @@ nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
struct nvme_request *req; struct nvme_request *req;
aer->ctrlr = ctrlr; aer->ctrlr = ctrlr;
req = nvme_allocate_request(NULL, 0, nvme_ctrlr_async_event_cb, aer); req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer);
aer->req = req; aer->req = req;
if (req == NULL) { if (req == NULL) {
return -1; return -1;

View File

@ -41,7 +41,7 @@ nvme_ctrlr_cmd_io_raw(struct nvme_controller *ctrlr,
{ {
struct nvme_request *req; struct nvme_request *req;
req = nvme_allocate_request(buf, len, cb_fn, cb_arg); req = nvme_allocate_request_contig(buf, len, cb_fn, cb_arg);
if (req == NULL) { if (req == NULL) {
return ENOMEM; return ENOMEM;
@ -62,7 +62,7 @@ nvme_ctrlr_cmd_admin_raw(struct nvme_controller *ctrlr,
struct nvme_request *req; struct nvme_request *req;
nvme_mutex_lock(&ctrlr->ctrlr_lock); nvme_mutex_lock(&ctrlr->ctrlr_lock);
req = nvme_allocate_request(buf, len, cb_fn, cb_arg); req = nvme_allocate_request_contig(buf, len, cb_fn, cb_arg);
if (req == NULL) { if (req == NULL) {
nvme_mutex_unlock(&ctrlr->ctrlr_lock); nvme_mutex_unlock(&ctrlr->ctrlr_lock);
return ENOMEM; return ENOMEM;
@ -83,9 +83,9 @@ nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr, void *payload,
struct nvme_request *req; struct nvme_request *req;
struct nvme_command *cmd; struct nvme_command *cmd;
req = nvme_allocate_request(payload, req = nvme_allocate_request_contig(payload,
sizeof(struct nvme_controller_data), sizeof(struct nvme_controller_data),
cb_fn, cb_arg); cb_fn, cb_arg);
cmd = &req->cmd; cmd = &req->cmd;
cmd->opc = NVME_OPC_IDENTIFY; cmd->opc = NVME_OPC_IDENTIFY;
@ -106,9 +106,9 @@ nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr, uint16_t nsid,
struct nvme_request *req; struct nvme_request *req;
struct nvme_command *cmd; struct nvme_command *cmd;
req = nvme_allocate_request(payload, req = nvme_allocate_request_contig(payload,
sizeof(struct nvme_namespace_data), sizeof(struct nvme_namespace_data),
cb_fn, cb_arg); cb_fn, cb_arg);
cmd = &req->cmd; cmd = &req->cmd;
cmd->opc = NVME_OPC_IDENTIFY; cmd->opc = NVME_OPC_IDENTIFY;
@ -129,7 +129,7 @@ nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr,
struct nvme_request *req; struct nvme_request *req;
struct nvme_command *cmd; struct nvme_command *cmd;
req = nvme_allocate_request(NULL, 0, cb_fn, cb_arg); req = nvme_allocate_request_null(cb_fn, cb_arg);
cmd = &req->cmd; cmd = &req->cmd;
cmd->opc = NVME_OPC_CREATE_IO_CQ; cmd->opc = NVME_OPC_CREATE_IO_CQ;
@ -156,7 +156,7 @@ nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr,
struct nvme_request *req; struct nvme_request *req;
struct nvme_command *cmd; struct nvme_command *cmd;
req = nvme_allocate_request(NULL, 0, cb_fn, cb_arg); req = nvme_allocate_request_null(cb_fn, cb_arg);
cmd = &req->cmd; cmd = &req->cmd;
cmd->opc = NVME_OPC_CREATE_IO_SQ; cmd->opc = NVME_OPC_CREATE_IO_SQ;
@ -182,7 +182,7 @@ nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr, uint8_t feature,
struct nvme_command *cmd; struct nvme_command *cmd;
nvme_mutex_lock(&ctrlr->ctrlr_lock); nvme_mutex_lock(&ctrlr->ctrlr_lock);
req = nvme_allocate_request(NULL, 0, cb_fn, cb_arg); req = nvme_allocate_request_null(cb_fn, cb_arg);
if (req == NULL) { if (req == NULL) {
nvme_mutex_unlock(&ctrlr->ctrlr_lock); nvme_mutex_unlock(&ctrlr->ctrlr_lock);
return ENOMEM; return ENOMEM;
@ -209,7 +209,7 @@ nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr, uint8_t feature,
struct nvme_command *cmd; struct nvme_command *cmd;
nvme_mutex_lock(&ctrlr->ctrlr_lock); nvme_mutex_lock(&ctrlr->ctrlr_lock);
req = nvme_allocate_request(NULL, 0, cb_fn, cb_arg); req = nvme_allocate_request_null(cb_fn, cb_arg);
if (req == NULL) { if (req == NULL) {
nvme_mutex_unlock(&ctrlr->ctrlr_lock); nvme_mutex_unlock(&ctrlr->ctrlr_lock);
return ENOMEM; return ENOMEM;
@ -259,7 +259,7 @@ nvme_ctrlr_cmd_get_log_page(struct nvme_controller *ctrlr, uint8_t log_page,
struct nvme_command *cmd; struct nvme_command *cmd;
nvme_mutex_lock(&ctrlr->ctrlr_lock); nvme_mutex_lock(&ctrlr->ctrlr_lock);
req = nvme_allocate_request(payload, payload_size, cb_fn, cb_arg); req = nvme_allocate_request_contig(payload, payload_size, cb_fn, cb_arg);
if (req == NULL) { if (req == NULL) {
nvme_mutex_unlock(&ctrlr->ctrlr_lock); nvme_mutex_unlock(&ctrlr->ctrlr_lock);
return ENOMEM; return ENOMEM;
@ -284,7 +284,7 @@ nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid,
struct nvme_request *req; struct nvme_request *req;
struct nvme_command *cmd; struct nvme_command *cmd;
req = nvme_allocate_request(NULL, 0, cb_fn, cb_arg); req = nvme_allocate_request_null(cb_fn, cb_arg);
cmd = &req->cmd; cmd = &req->cmd;
cmd->opc = NVME_OPC_ABORT; cmd->opc = NVME_OPC_ABORT;

View File

@ -105,12 +105,46 @@
*/ */
#define DEFAULT_MAX_IO_QUEUES (1024) #define DEFAULT_MAX_IO_QUEUES (1024)
enum nvme_payload_type {
NVME_PAYLOAD_TYPE_INVALID = 0,
/** nvme_request::u.payload.contig_buffer is valid for this request */
NVME_PAYLOAD_TYPE_CONTIG,
/** nvme_request::u.sgl is valid for this request */
NVME_PAYLOAD_TYPE_SGL,
};
/**
* Descriptor for a request data payload.
*
* This struct is arranged so that it fits nicely in struct nvme_request.
*/
struct __attribute__((packed)) nvme_payload {
union {
/** Virtual memory address of a single physically contiguous buffer */
void *contig;
/**
* Functions for retrieving physical addresses for scattered payloads.
*/
struct {
nvme_req_reset_sgl_fn_t reset_sgl_fn;
nvme_req_next_sge_fn_t next_sge_fn;
} sgl;
} u;
/** \ref nvme_payload_type */
uint8_t type;
};
struct nvme_request { struct nvme_request {
struct nvme_command cmd; struct nvme_command cmd;
union { /**
void *payload; * Data payload for this request's command.
} u; */
struct nvme_payload payload;
uint8_t timeout; uint8_t timeout;
uint8_t retries; uint8_t retries;
@ -121,6 +155,13 @@ struct nvme_request {
*/ */
uint8_t num_children; uint8_t num_children;
uint32_t payload_size; uint32_t payload_size;
/**
* Offset in bytes from the beginning of payload for this request.
* This is used for I/O commands that are split into multiple requests.
*/
uint32_t payload_offset;
nvme_cb_fn_t cb_fn; nvme_cb_fn_t cb_fn;
void *cb_arg; void *cb_arg;
STAILQ_ENTRY(nvme_request) stailq; STAILQ_ENTRY(nvme_request) stailq;
@ -159,13 +200,6 @@ struct nvme_request {
* status once all child requests are completed. * status once all child requests are completed.
*/ */
struct nvme_completion parent_status; struct nvme_completion parent_status;
/**
* Functions for retrieving physical addresses for scattered payloads.
*/
nvme_req_reset_sgl_fn_t reset_sgl_fn;
nvme_req_next_sge_fn_t next_sge_fn;
uint32_t sgl_offset;
}; };
struct nvme_completion_poll_status { struct nvme_completion_poll_status {
@ -397,9 +431,11 @@ int nvme_ns_construct(struct nvme_namespace *ns, uint16_t id,
struct nvme_controller *ctrlr); struct nvme_controller *ctrlr);
void nvme_ns_destruct(struct nvme_namespace *ns); void nvme_ns_destruct(struct nvme_namespace *ns);
struct nvme_request * struct nvme_request *nvme_allocate_request(const struct nvme_payload *payload,
nvme_allocate_request(void *payload, uint32_t payload_size, uint32_t payload_size, nvme_cb_fn_t cb_fn, void *cb_arg);
nvme_cb_fn_t cb_fn, void *cb_arg); struct nvme_request *nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg);
struct nvme_request *nvme_allocate_request_contig(void *buffer, uint32_t payload_size,
nvme_cb_fn_t cb_fn, void *cb_arg);
void nvme_free_request(struct nvme_request *req); void nvme_free_request(struct nvme_request *req);
#endif /* __NVME_INTERNAL_H__ */ #endif /* __NVME_INTERNAL_H__ */

View File

@ -38,12 +38,10 @@
* *
*/ */
static struct nvme_request * static struct nvme_request *_nvme_ns_cmd_rw(struct nvme_namespace *ns,
_nvme_ns_cmd_rw(struct nvme_namespace *ns, void *payload, uint64_t lba, const struct nvme_payload *payload, uint64_t lba,
uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg, uint32_t lba_count, nvme_cb_fn_t cb_fn,
uint32_t opc, uint32_t io_flags, void *cb_arg, uint32_t opc, uint32_t io_flags);
nvme_req_reset_sgl_fn_t reset_sgl_fn,
nvme_req_next_sge_fn_t next_sge_fn);
static void static void
nvme_cb_complete_child(void *child_arg, const struct nvme_completion *cpl) nvme_cb_complete_child(void *child_arg, const struct nvme_completion *cpl)
@ -89,13 +87,12 @@ nvme_request_add_child(struct nvme_request *parent, struct nvme_request *child)
} }
static struct nvme_request * static struct nvme_request *
_nvme_ns_cmd_split_request(struct nvme_namespace *ns, void *payload, _nvme_ns_cmd_split_request(struct nvme_namespace *ns,
const struct nvme_payload *payload,
uint64_t lba, uint32_t lba_count, uint64_t lba, uint32_t lba_count,
nvme_cb_fn_t cb_fn, void *cb_arg, uint32_t opc, nvme_cb_fn_t cb_fn, void *cb_arg, uint32_t opc,
uint32_t io_flags, struct nvme_request *req, uint32_t io_flags, struct nvme_request *req,
uint32_t sectors_per_max_io, uint32_t sector_mask, uint32_t sectors_per_max_io, uint32_t sector_mask)
nvme_req_reset_sgl_fn_t reset_sgl_fn,
nvme_req_next_sge_fn_t next_sge_fn)
{ {
uint32_t sector_size = ns->sector_size; uint32_t sector_size = ns->sector_size;
uint32_t remaining_lba_count = lba_count; uint32_t remaining_lba_count = lba_count;
@ -107,30 +104,25 @@ _nvme_ns_cmd_split_request(struct nvme_namespace *ns, void *payload,
lba_count = nvme_min(remaining_lba_count, lba_count); lba_count = nvme_min(remaining_lba_count, lba_count);
child = _nvme_ns_cmd_rw(ns, payload, lba, lba_count, cb_fn, child = _nvme_ns_cmd_rw(ns, payload, lba, lba_count, cb_fn,
cb_arg, opc, io_flags, reset_sgl_fn, next_sge_fn); cb_arg, opc, io_flags);
if (child == NULL) { if (child == NULL) {
nvme_free_request(req); nvme_free_request(req);
return NULL; return NULL;
} }
child->payload_offset = offset;
nvme_request_add_child(req, child); nvme_request_add_child(req, child);
remaining_lba_count -= lba_count; remaining_lba_count -= lba_count;
lba += lba_count; lba += lba_count;
if (req->u.payload == NULL) { offset += lba_count * sector_size;
child->sgl_offset = offset;
offset += lba_count * ns->sector_size;
} else
payload = (void *)((uintptr_t)payload + (lba_count * sector_size));
} }
return req; return req;
} }
static struct nvme_request * static struct nvme_request *
_nvme_ns_cmd_rw(struct nvme_namespace *ns, void *payload, uint64_t lba, _nvme_ns_cmd_rw(struct nvme_namespace *ns, const struct nvme_payload *payload,
uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg, uint64_t lba, uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg, uint32_t opc,
uint32_t opc, uint32_t io_flags, uint32_t io_flags)
nvme_req_reset_sgl_fn_t reset_sgl_fn,
nvme_req_next_sge_fn_t next_sge_fn)
{ {
struct nvme_request *req; struct nvme_request *req;
struct nvme_command *cmd; struct nvme_command *cmd;
@ -153,9 +145,6 @@ _nvme_ns_cmd_rw(struct nvme_namespace *ns, void *payload, uint64_t lba,
return NULL; return NULL;
} }
req->reset_sgl_fn = reset_sgl_fn;
req->next_sge_fn = next_sge_fn;
/* /*
* Intel DC P3*00 NVMe controllers benefit from driver-assisted striping. * Intel DC P3*00 NVMe controllers benefit from driver-assisted striping.
* If this controller defines a stripe boundary and this I/O spans a stripe * If this controller defines a stripe boundary and this I/O spans a stripe
@ -166,12 +155,10 @@ _nvme_ns_cmd_rw(struct nvme_namespace *ns, void *payload, uint64_t lba,
(((lba & (sectors_per_stripe - 1)) + lba_count) > sectors_per_stripe)) { (((lba & (sectors_per_stripe - 1)) + lba_count) > sectors_per_stripe)) {
return _nvme_ns_cmd_split_request(ns, payload, lba, lba_count, cb_fn, cb_arg, opc, return _nvme_ns_cmd_split_request(ns, payload, lba, lba_count, cb_fn, cb_arg, opc,
io_flags, req, sectors_per_stripe, sectors_per_stripe - 1, io_flags, req, sectors_per_stripe, sectors_per_stripe - 1);
reset_sgl_fn, next_sge_fn);
} else if (lba_count > sectors_per_max_io) { } else if (lba_count > sectors_per_max_io) {
return _nvme_ns_cmd_split_request(ns, payload, lba, lba_count, cb_fn, cb_arg, opc, return _nvme_ns_cmd_split_request(ns, payload, lba, lba_count, cb_fn, cb_arg, opc,
io_flags, req, sectors_per_max_io, 0, io_flags, req, sectors_per_max_io, 0);
reset_sgl_fn, next_sge_fn);
} else { } else {
cmd = &req->cmd; cmd = &req->cmd;
cmd->opc = opc; cmd->opc = opc;
@ -188,14 +175,17 @@ _nvme_ns_cmd_rw(struct nvme_namespace *ns, void *payload, uint64_t lba,
} }
int int
nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload, uint64_t lba, nvme_ns_cmd_read(struct nvme_namespace *ns, void *buffer, uint64_t lba,
uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg, uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg,
uint32_t io_flags) uint32_t io_flags)
{ {
struct nvme_request *req; struct nvme_request *req;
struct nvme_payload payload;
req = _nvme_ns_cmd_rw(ns, payload, lba, lba_count, cb_fn, cb_arg, NVME_OPC_READ, io_flags, payload.type = NVME_PAYLOAD_TYPE_CONTIG;
NULL, NULL); payload.u.contig = buffer;
req = _nvme_ns_cmd_rw(ns, &payload, lba, lba_count, cb_fn, cb_arg, NVME_OPC_READ, io_flags);
if (req != NULL) { if (req != NULL) {
nvme_ctrlr_submit_io_request(ns->ctrlr, req); nvme_ctrlr_submit_io_request(ns->ctrlr, req);
return 0; return 0;
@ -211,9 +201,13 @@ nvme_ns_cmd_readv(struct nvme_namespace *ns, uint64_t lba, uint32_t lba_count,
nvme_req_next_sge_fn_t next_sge_fn) nvme_req_next_sge_fn_t next_sge_fn)
{ {
struct nvme_request *req; struct nvme_request *req;
struct nvme_payload payload;
req = _nvme_ns_cmd_rw(ns, NULL, lba, lba_count, cb_fn, cb_arg, NVME_OPC_READ, io_flags, payload.type = NVME_PAYLOAD_TYPE_SGL;
reset_sgl_fn, next_sge_fn); payload.u.sgl.reset_sgl_fn = reset_sgl_fn;
payload.u.sgl.next_sge_fn = next_sge_fn;
req = _nvme_ns_cmd_rw(ns, &payload, lba, lba_count, cb_fn, cb_arg, NVME_OPC_READ, io_flags);
if (req != NULL) { if (req != NULL) {
nvme_ctrlr_submit_io_request(ns->ctrlr, req); nvme_ctrlr_submit_io_request(ns->ctrlr, req);
return 0; return 0;
@ -223,14 +217,17 @@ nvme_ns_cmd_readv(struct nvme_namespace *ns, uint64_t lba, uint32_t lba_count,
} }
int int
nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload, uint64_t lba, nvme_ns_cmd_write(struct nvme_namespace *ns, void *buffer, uint64_t lba,
uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg, uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg,
uint32_t io_flags) uint32_t io_flags)
{ {
struct nvme_request *req; struct nvme_request *req;
struct nvme_payload payload;
req = _nvme_ns_cmd_rw(ns, payload, lba, lba_count, cb_fn, cb_arg, NVME_OPC_WRITE, io_flags, payload.type = NVME_PAYLOAD_TYPE_CONTIG;
NULL, NULL); payload.u.contig = buffer;
req = _nvme_ns_cmd_rw(ns, &payload, lba, lba_count, cb_fn, cb_arg, NVME_OPC_WRITE, io_flags);
if (req != NULL) { if (req != NULL) {
nvme_ctrlr_submit_io_request(ns->ctrlr, req); nvme_ctrlr_submit_io_request(ns->ctrlr, req);
return 0; return 0;
@ -246,9 +243,13 @@ nvme_ns_cmd_writev(struct nvme_namespace *ns, uint64_t lba, uint32_t lba_count,
nvme_req_next_sge_fn_t next_sge_fn) nvme_req_next_sge_fn_t next_sge_fn)
{ {
struct nvme_request *req; struct nvme_request *req;
struct nvme_payload payload;
req = _nvme_ns_cmd_rw(ns, NULL, lba, lba_count, cb_fn, cb_arg, NVME_OPC_WRITE, io_flags, payload.type = NVME_PAYLOAD_TYPE_SGL;
reset_sgl_fn, next_sge_fn); payload.u.sgl.reset_sgl_fn = reset_sgl_fn;
payload.u.sgl.next_sge_fn = next_sge_fn;
req = _nvme_ns_cmd_rw(ns, &payload, lba, lba_count, cb_fn, cb_arg, NVME_OPC_WRITE, io_flags);
if (req != NULL) { if (req != NULL) {
nvme_ctrlr_submit_io_request(ns->ctrlr, req); nvme_ctrlr_submit_io_request(ns->ctrlr, req);
return 0; return 0;
@ -268,9 +269,9 @@ nvme_ns_cmd_deallocate(struct nvme_namespace *ns, void *payload,
return EINVAL; return EINVAL;
} }
req = nvme_allocate_request(payload, req = nvme_allocate_request_contig(payload,
num_ranges * sizeof(struct nvme_dsm_range), num_ranges * sizeof(struct nvme_dsm_range),
cb_fn, cb_arg); cb_fn, cb_arg);
if (req == NULL) { if (req == NULL) {
return ENOMEM; return ENOMEM;
} }
@ -294,7 +295,7 @@ nvme_ns_cmd_flush(struct nvme_namespace *ns, nvme_cb_fn_t cb_fn, void *cb_arg)
struct nvme_request *req; struct nvme_request *req;
struct nvme_command *cmd; struct nvme_command *cmd;
req = nvme_allocate_request(NULL, 0, cb_fn, cb_arg); req = nvme_allocate_request_null(cb_fn, cb_arg);
if (req == NULL) { if (req == NULL) {
return ENOMEM; return ENOMEM;
} }
@ -319,9 +320,9 @@ nvme_ns_cmd_reservation_register(struct nvme_namespace *ns,
struct nvme_request *req; struct nvme_request *req;
struct nvme_command *cmd; struct nvme_command *cmd;
req = nvme_allocate_request(payload, req = nvme_allocate_request_contig(payload,
sizeof(struct nvme_reservation_register_data), sizeof(struct nvme_reservation_register_data),
cb_fn, cb_arg); cb_fn, cb_arg);
if (req == NULL) { if (req == NULL) {
return ENOMEM; return ENOMEM;
} }
@ -353,7 +354,8 @@ nvme_ns_cmd_reservation_release(struct nvme_namespace *ns,
struct nvme_request *req; struct nvme_request *req;
struct nvme_command *cmd; struct nvme_command *cmd;
req = nvme_allocate_request(payload, sizeof(struct nvme_reservation_key_data), cb_fn, cb_arg); req = nvme_allocate_request_contig(payload, sizeof(struct nvme_reservation_key_data), cb_fn,
cb_arg);
if (req == NULL) { if (req == NULL) {
return ENOMEM; return ENOMEM;
} }
@ -385,9 +387,9 @@ nvme_ns_cmd_reservation_acquire(struct nvme_namespace *ns,
struct nvme_request *req; struct nvme_request *req;
struct nvme_command *cmd; struct nvme_command *cmd;
req = nvme_allocate_request(payload, req = nvme_allocate_request_contig(payload,
sizeof(struct nvme_reservation_acquire_data), sizeof(struct nvme_reservation_acquire_data),
cb_fn, cb_arg); cb_fn, cb_arg);
if (req == NULL) { if (req == NULL) {
return ENOMEM; return ENOMEM;
} }

View File

@ -685,16 +685,17 @@ _nvme_qpair_build_sgl_request(struct nvme_qpair *qpair, struct nvme_request *req
*/ */
parent = req->parent ? req->parent : req; parent = req->parent ? req->parent : req;
nvme_assert(req->reset_sgl_fn != NULL, ("sgl reset callback required\n")); nvme_assert(req->payload.type == NVME_PAYLOAD_TYPE_SGL, ("sgl payload type required\n"));
req->reset_sgl_fn(parent->cb_arg, req->sgl_offset); nvme_assert(req->payload.u.sgl.reset_sgl_fn != NULL, ("sgl reset callback required\n"));
req->payload.u.sgl.reset_sgl_fn(parent->cb_arg, req->payload_offset);
remaining_transfer_len = req->payload_size; remaining_transfer_len = req->payload_size;
total_nseg = 0; total_nseg = 0;
last_nseg = 0; last_nseg = 0;
while (remaining_transfer_len > 0) { while (remaining_transfer_len > 0) {
nvme_assert(req->next_sge_fn != NULL, ("sgl callback required\n")); nvme_assert(req->payload.u.sgl.next_sge_fn != NULL, ("sgl callback required\n"));
rc = req->next_sge_fn(parent->cb_arg, &phys_addr, &length); rc = req->payload.u.sgl.next_sge_fn(parent->cb_arg, &phys_addr, &length);
if (rc) if (rc)
return -1; return -1;
@ -803,12 +804,15 @@ nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
tr->req = req; tr->req = req;
req->cmd.cid = tr->cid; req->cmd.cid = tr->cid;
if (req->u.payload) { if (req->payload_size == 0) {
/* Null payload - leave PRP fields zeroed */
} else if (req->payload.type == NVME_PAYLOAD_TYPE_CONTIG) {
/* /*
* Build PRP list describing payload buffer. * Build PRP list describing payload buffer.
*/ */
void *payload = req->payload.u.contig + req->payload_offset;
phys_addr = nvme_vtophys(req->u.payload); phys_addr = nvme_vtophys(payload);
if (phys_addr == NVME_VTOPHYS_ERROR) { if (phys_addr == NVME_VTOPHYS_ERROR) {
_nvme_fail_request_bad_vtophys(qpair, tr); _nvme_fail_request_bad_vtophys(qpair, tr);
return; return;
@ -823,13 +827,13 @@ nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
tr->req->cmd.psdt = NVME_PSDT_PRP; tr->req->cmd.psdt = NVME_PSDT_PRP;
tr->req->cmd.dptr.prp.prp1 = phys_addr; tr->req->cmd.dptr.prp.prp1 = phys_addr;
if (nseg == 2) { if (nseg == 2) {
seg_addr = req->u.payload + PAGE_SIZE - unaligned; seg_addr = payload + PAGE_SIZE - unaligned;
tr->req->cmd.dptr.prp.prp2 = nvme_vtophys(seg_addr); tr->req->cmd.dptr.prp.prp2 = nvme_vtophys(seg_addr);
} else if (nseg > 2) { } else if (nseg > 2) {
cur_nseg = 1; cur_nseg = 1;
tr->req->cmd.dptr.prp.prp2 = (uint64_t)tr->prp_bus_addr; tr->req->cmd.dptr.prp.prp2 = (uint64_t)tr->prp_bus_addr;
while (cur_nseg < nseg) { while (cur_nseg < nseg) {
seg_addr = req->u.payload + cur_nseg * PAGE_SIZE - unaligned; seg_addr = payload + cur_nseg * PAGE_SIZE - unaligned;
phys_addr = nvme_vtophys(seg_addr); phys_addr = nvme_vtophys(seg_addr);
if (phys_addr == NVME_VTOPHYS_ERROR) { if (phys_addr == NVME_VTOPHYS_ERROR) {
_nvme_fail_request_bad_vtophys(qpair, tr); _nvme_fail_request_bad_vtophys(qpair, tr);
@ -839,12 +843,16 @@ nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
cur_nseg++; cur_nseg++;
} }
} }
} else if (req->u.payload == NULL && req->payload_size != 0) { } else if (req->payload.type == NVME_PAYLOAD_TYPE_SGL) {
rc = _nvme_qpair_build_sgl_request(qpair, req, tr); rc = _nvme_qpair_build_sgl_request(qpair, req, tr);
if (rc < 0) { if (rc < 0) {
_nvme_fail_request_bad_vtophys(qpair, tr); _nvme_fail_request_bad_vtophys(qpair, tr);
return; return;
} }
} else {
nvme_assert(0, ("invalid NVMe payload type %d\n", req->payload.type));
_nvme_fail_request_bad_vtophys(qpair, tr);
return;
} }
nvme_qpair_submit_tracker(qpair, tr); nvme_qpair_submit_tracker(qpair, tr);

View File

@ -148,8 +148,8 @@ nvme_ns_construct(struct nvme_namespace *ns, uint16_t id,
struct nvme_request * struct nvme_request *
nvme_allocate_request(void *payload, uint32_t payload_size, nvme_allocate_request(const struct nvme_payload *payload, uint32_t payload_size, nvme_cb_fn_t cb_fn,
nvme_cb_fn_t cb_fn, void *cb_arg) void *cb_arg)
{ {
struct nvme_request *req = NULL; struct nvme_request *req = NULL;
nvme_alloc_request(&req); nvme_alloc_request(&req);
@ -157,21 +157,35 @@ nvme_allocate_request(void *payload, uint32_t payload_size,
if (req != NULL) { if (req != NULL) {
memset(req, 0, offsetof(struct nvme_request, children)); memset(req, 0, offsetof(struct nvme_request, children));
if (payload == NULL || payload_size == 0) { req->payload = *payload;
req->u.payload = NULL; req->payload_size = payload_size;
req->payload_size = 0;
} else {
req->u.payload = payload;
req->payload_size = payload_size;
}
req->cb_fn = cb_fn; req->cb_fn = cb_fn;
req->cb_arg = cb_arg; req->cb_arg = cb_arg;
req->timeout = true; req->timeout = true;
} }
return req; return req;
} }
struct nvme_request *
nvme_allocate_request_contig(void *buffer, uint32_t payload_size, nvme_cb_fn_t cb_fn, void *cb_arg)
{
struct nvme_payload payload;
payload.type = NVME_PAYLOAD_TYPE_CONTIG;
payload.u.contig = buffer;
return nvme_allocate_request(&payload, payload_size, cb_fn, cb_arg);
}
struct nvme_request *
nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg)
{
return nvme_allocate_request_contig(NULL, 0, cb_fn, cb_arg);
}
static void static void
test_nvme_ctrlr_fail(void) test_nvme_ctrlr_fail(void)
{ {

View File

@ -177,20 +177,15 @@ static void verify_intel_get_log_page_directory(struct nvme_request *req)
} }
struct nvme_request * struct nvme_request *
nvme_allocate_request(void *payload, uint32_t payload_size, nvme_allocate_request(const struct nvme_payload *payload, uint32_t payload_size, nvme_cb_fn_t cb_fn,
nvme_cb_fn_t cb_fn, void *cb_arg) void *cb_arg)
{ {
struct nvme_request *req = &g_req; struct nvme_request *req = &g_req;
memset(req, 0, sizeof(*req)); memset(req, 0, sizeof(*req));
if (payload == NULL || payload_size == 0) { req->payload = *payload;
req->u.payload = NULL; req->payload_size = payload_size;
req->payload_size = 0;
} else {
req->u.payload = payload;
req->payload_size = payload_size;
}
req->cb_fn = cb_fn; req->cb_fn = cb_fn;
req->cb_arg = cb_arg; req->cb_arg = cb_arg;
@ -199,6 +194,23 @@ nvme_allocate_request(void *payload, uint32_t payload_size,
return req; return req;
} }
struct nvme_request *
nvme_allocate_request_contig(void *buffer, uint32_t payload_size, nvme_cb_fn_t cb_fn, void *cb_arg)
{
struct nvme_payload payload;
payload.type = NVME_PAYLOAD_TYPE_CONTIG;
payload.u.contig = buffer;
return nvme_allocate_request(&payload, payload_size, cb_fn, cb_arg);
}
struct nvme_request *
nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg)
{
return nvme_allocate_request_contig(NULL, 0, cb_fn, cb_arg);
}
void void
nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr, nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
struct nvme_request *req) struct nvme_request *req)

View File

@ -56,8 +56,8 @@ uint64_t nvme_vtophys(void *buf)
} }
struct nvme_request * struct nvme_request *
nvme_allocate_request(void *payload, uint32_t payload_size, nvme_allocate_request(const struct nvme_payload *payload, uint32_t payload_size, nvme_cb_fn_t cb_fn,
nvme_cb_fn_t cb_fn, void *cb_arg) void *cb_arg)
{ {
struct nvme_request *req = NULL; struct nvme_request *req = NULL;
@ -79,20 +79,29 @@ nvme_allocate_request(void *payload, uint32_t payload_size,
req->cb_fn = cb_fn; req->cb_fn = cb_fn;
req->cb_arg = cb_arg; req->cb_arg = cb_arg;
req->timeout = true; req->timeout = true;
nvme_assert((payload == NULL && payload_size == 0) || req->payload = *payload;
(payload != NULL && payload_size != 0), req->payload_size = payload_size;
("Invalid argument combination of payload and payload_size\n"));
if (payload == NULL || payload_size == 0) {
req->u.payload = NULL;
req->payload_size = 0;
} else {
req->u.payload = payload;
req->payload_size = payload_size;
}
return req; return req;
} }
struct nvme_request *
nvme_allocate_request_contig(void *buffer, uint32_t payload_size, nvme_cb_fn_t cb_fn, void *cb_arg)
{
struct nvme_payload payload;
payload.type = NVME_PAYLOAD_TYPE_CONTIG;
payload.u.contig = buffer;
return nvme_allocate_request(&payload, payload_size, cb_fn, cb_arg);
}
struct nvme_request *
nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg)
{
return nvme_allocate_request_contig(NULL, 0, cb_fn, cb_arg);
}
void void
nvme_free_request(struct nvme_request *req) nvme_free_request(struct nvme_request *req)
{ {
@ -208,7 +217,7 @@ test3(void)
prepare_submit_request_test(&qpair, &ctrlr, &regs); prepare_submit_request_test(&qpair, &ctrlr, &regs);
req = nvme_allocate_request(NULL, 0, expected_success_callback, NULL); req = nvme_allocate_request_null(expected_success_callback, NULL);
SPDK_CU_ASSERT_FATAL(req != NULL); SPDK_CU_ASSERT_FATAL(req != NULL);
CU_ASSERT(qpair.sq_tail == 0); CU_ASSERT(qpair.sq_tail == 0);
@ -232,7 +241,7 @@ test4(void)
prepare_submit_request_test(&qpair, &ctrlr, &regs); prepare_submit_request_test(&qpair, &ctrlr, &regs);
req = nvme_allocate_request(payload, sizeof(payload), expected_failure_callback, NULL); req = nvme_allocate_request_contig(payload, sizeof(payload), expected_failure_callback, NULL);
SPDK_CU_ASSERT_FATAL(req != NULL); SPDK_CU_ASSERT_FATAL(req != NULL);
/* Force vtophys to return a failure. This should /* Force vtophys to return a failure. This should
@ -265,7 +274,7 @@ test_ctrlr_failed(void)
prepare_submit_request_test(&qpair, &ctrlr, &regs); prepare_submit_request_test(&qpair, &ctrlr, &regs);
req = nvme_allocate_request(payload, sizeof(payload), expected_failure_callback, NULL); req = nvme_allocate_request_contig(payload, sizeof(payload), expected_failure_callback, NULL);
SPDK_CU_ASSERT_FATAL(req != NULL); SPDK_CU_ASSERT_FATAL(req != NULL);
/* Disable the queue and set the controller to failed. /* Disable the queue and set the controller to failed.
@ -311,14 +320,14 @@ static void test_nvme_qpair_fail(void)
tr_temp = nvme_malloc("nvme_tracker", sizeof(struct nvme_tracker), tr_temp = nvme_malloc("nvme_tracker", sizeof(struct nvme_tracker),
64, &phys_addr); 64, &phys_addr);
SPDK_CU_ASSERT_FATAL(tr_temp != NULL); SPDK_CU_ASSERT_FATAL(tr_temp != NULL);
tr_temp->req = nvme_allocate_request(NULL, 0, expected_failure_callback, NULL); tr_temp->req = nvme_allocate_request_null(expected_failure_callback, NULL);
SPDK_CU_ASSERT_FATAL(tr_temp->req != NULL); SPDK_CU_ASSERT_FATAL(tr_temp->req != NULL);
LIST_INSERT_HEAD(&qpair.outstanding_tr, tr_temp, list); LIST_INSERT_HEAD(&qpair.outstanding_tr, tr_temp, list);
nvme_qpair_fail(&qpair); nvme_qpair_fail(&qpair);
CU_ASSERT_TRUE(LIST_EMPTY(&qpair.outstanding_tr)); CU_ASSERT_TRUE(LIST_EMPTY(&qpair.outstanding_tr));
req = nvme_allocate_request(NULL, 0, expected_failure_callback, NULL); req = nvme_allocate_request_null(expected_failure_callback, NULL);
SPDK_CU_ASSERT_FATAL(req != NULL); SPDK_CU_ASSERT_FATAL(req != NULL);
STAILQ_INSERT_HEAD(&qpair.queued_req, req, stailq); STAILQ_INSERT_HEAD(&qpair.queued_req, req, stailq);
@ -394,7 +403,7 @@ static void test_nvme_qpair_destroy(void)
tr_temp = nvme_malloc("nvme_tracker", sizeof(struct nvme_tracker), tr_temp = nvme_malloc("nvme_tracker", sizeof(struct nvme_tracker),
64, &phys_addr); 64, &phys_addr);
SPDK_CU_ASSERT_FATAL(tr_temp != NULL); SPDK_CU_ASSERT_FATAL(tr_temp != NULL);
tr_temp->req = nvme_allocate_request(NULL, 0, expected_failure_callback, NULL); tr_temp->req = nvme_allocate_request_null(expected_failure_callback, NULL);
SPDK_CU_ASSERT_FATAL(tr_temp->req != NULL); SPDK_CU_ASSERT_FATAL(tr_temp->req != NULL);
tr_temp->req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST; tr_temp->req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST;