nvme: split based on max number of sges

1) Rename _nvme_ns_cmd_split_sgl_request to
   _nvme_ns_cmd_split_request_prp.

2) Add _nvme_ns_cmd_split_request_sgl.  This
   function will be used with controllers that
   support SGL, to make sure the request does not
   span more SGEs than the controller or supporting
   software supports.

While here, add a useful error message for both
the prp and sgl cases, if the child_length is not
an even multiple of the lba size.

Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: Ifd04aee3c571359009260fe46400ce1844511aa3

Reviewed-on: https://review.gerrithub.io/372359
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
Jim Harris 2017-08-02 12:18:10 -07:00
parent 002660c4f0
commit c27e4a18b1

View File

@ -206,7 +206,7 @@ _nvme_ns_cmd_setup_request(struct spdk_nvme_ns *ns, struct nvme_request *req,
}
static struct nvme_request *
_nvme_ns_cmd_split_sgl_request(struct spdk_nvme_ns *ns,
_nvme_ns_cmd_split_request_prp(struct spdk_nvme_ns *ns,
struct spdk_nvme_qpair *qpair,
const struct nvme_payload *payload,
uint32_t payload_offset, uint32_t md_offset,
@ -294,6 +294,8 @@ _nvme_ns_cmd_split_sgl_request(struct spdk_nvme_ns *ns,
uint32_t child_lba_count;
if ((child_length % ns->extended_lba_size) != 0) {
SPDK_ERRLOG("child_length %u not even multiple of lba_size %u\n",
child_length, ns->extended_lba_size);
return NULL;
}
child_lba_count = child_length / ns->extended_lba_size;
@ -324,6 +326,89 @@ _nvme_ns_cmd_split_sgl_request(struct spdk_nvme_ns *ns,
return req;
}
static struct nvme_request *
_nvme_ns_cmd_split_request_sgl(struct spdk_nvme_ns *ns,
struct spdk_nvme_qpair *qpair,
const struct nvme_payload *payload,
uint32_t payload_offset, uint32_t md_offset,
uint64_t lba, uint32_t lba_count,
spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
uint32_t io_flags, struct nvme_request *req,
uint16_t apptag_mask, uint16_t apptag)
{
struct nvme_sgl_args *args;
uint64_t child_lba = lba;
uint32_t req_current_length = 0;
uint32_t child_length = 0;
uint32_t sge_length;
uint16_t max_sges, num_sges;
uintptr_t address;
args = &req->payload.u.sgl;
max_sges = ns->ctrlr->max_sges;
args->reset_sgl_fn(args->cb_arg, payload_offset);
num_sges = 0;
while (req_current_length < req->payload_size) {
args->next_sge_fn(args->cb_arg, (void **)&address, &sge_length);
if (req_current_length + sge_length > req->payload_size) {
sge_length = req->payload_size - req_current_length;
}
child_length += sge_length;
req_current_length += sge_length;
num_sges++;
if (num_sges < max_sges) {
continue;
}
/*
* We need to create a split here. Send what we have accumulated so far as a child
* request. Checking if the child equals the full payload allows us to *not*
* create a child request when no splitting is required - in that case we will
* fall-through and just create a single request with no children for the entire I/O.
*/
if (child_length != req->payload_size) {
struct nvme_request *child;
uint32_t child_lba_count;
if ((child_length % ns->extended_lba_size) != 0) {
SPDK_ERRLOG("child_length %u not even multiple of lba_size %u\n",
child_length, ns->extended_lba_size);
return NULL;
}
child_lba_count = child_length / ns->extended_lba_size;
/*
* Note the last parameter is set to "false" - this tells the recursive
* call to _nvme_ns_cmd_rw() to not bother with checking for SGL splitting
* since we have already verified it here.
*/
child = _nvme_add_child_request(ns, qpair, payload, payload_offset, md_offset,
child_lba, child_lba_count,
cb_fn, cb_arg, opc, io_flags,
apptag_mask, apptag, req, false);
if (child == NULL) {
return NULL;
}
payload_offset += child_length;
md_offset += child_lba_count * ns->md_size;
child_lba += child_lba_count;
child_length = 0;
num_sges = 0;
}
}
if (child_length == req->payload_size) {
/* No splitting was required, so setup the whole payload as one request. */
_nvme_ns_cmd_setup_request(ns, req, opc, lba, lba_count, io_flags, apptag_mask, apptag);
}
return req;
}
static struct nvme_request *
_nvme_ns_cmd_rw(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
const struct nvme_payload *payload, uint32_t payload_offset, uint32_t md_offset,
@ -377,10 +462,16 @@ _nvme_ns_cmd_rw(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
cb_fn,
cb_arg, opc,
io_flags, req, sectors_per_max_io, 0, apptag_mask, apptag);
} else if (req->payload.type == NVME_PAYLOAD_TYPE_SGL && check_sgl &&
!(ns->ctrlr->flags & SPDK_NVME_CTRLR_SGL_SUPPORTED)) {
return _nvme_ns_cmd_split_sgl_request(ns, qpair, payload, payload_offset, md_offset, lba, lba_count,
cb_fn, cb_arg, opc, io_flags, req, apptag_mask, apptag);
} else if (req->payload.type == NVME_PAYLOAD_TYPE_SGL && check_sgl) {
if (ns->ctrlr->flags & SPDK_NVME_CTRLR_SGL_SUPPORTED) {
return _nvme_ns_cmd_split_request_sgl(ns, qpair, payload, payload_offset, md_offset,
lba, lba_count, cb_fn, cb_arg, opc, io_flags,
req, apptag_mask, apptag);
} else {
return _nvme_ns_cmd_split_request_prp(ns, qpair, payload, payload_offset, md_offset,
lba, lba_count, cb_fn, cb_arg, opc, io_flags,
req, apptag_mask, apptag);
}
}
_nvme_ns_cmd_setup_request(ns, req, opc, lba, lba_count, io_flags, apptag_mask, apptag);