nvme: Use sgls, if available, even for contiguous memory
The hardware sgl format can describe large contiguous
buffers using just a single element, so it's more
efficient that a prp list even for a single memory
segment. Always use the sgl format.
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/475542 (master)
Community-CI: SPDK CI Jenkins <sys_sgci@intel.com>
(cherry picked from commit bed4cdf6c7
)
Change-Id: I9c62582829f0d64dcd1babdbc48930ddb4d9e626
Signed-off-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/478354
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Alexey Marchuk <alexeymar@mellanox.com>
This commit is contained in:
parent
a6302a4cea
commit
db4208d3a1
@ -1815,6 +1815,78 @@ nvme_pcie_qpair_build_contig_request(struct spdk_nvme_qpair *qpair, struct nvme_
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build an SGL describing a physically contiguous payload buffer.
|
||||
*
|
||||
* This is more efficient than using PRP because large buffers can be
|
||||
* described this way.
|
||||
*/
|
||||
static int
|
||||
nvme_pcie_qpair_build_contig_hw_sgl_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req,
|
||||
struct nvme_tracker *tr)
|
||||
{
|
||||
void *virt_addr;
|
||||
uint64_t phys_addr, mapping_length;
|
||||
uint32_t length;
|
||||
struct spdk_nvme_sgl_descriptor *sgl;
|
||||
uint32_t nseg = 0;
|
||||
|
||||
assert(req->payload_size != 0);
|
||||
assert(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_CONTIG);
|
||||
|
||||
sgl = tr->u.sgl;
|
||||
req->cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_CONTIG;
|
||||
req->cmd.dptr.sgl1.unkeyed.subtype = 0;
|
||||
|
||||
length = req->payload_size;
|
||||
virt_addr = req->payload.contig_or_cb_arg + req->payload_offset;
|
||||
|
||||
while (length > 0) {
|
||||
if (nseg >= NVME_MAX_SGL_DESCRIPTORS) {
|
||||
nvme_pcie_fail_request_bad_vtophys(qpair, tr);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
phys_addr = spdk_vtophys(virt_addr, &mapping_length);
|
||||
if (phys_addr == SPDK_VTOPHYS_ERROR) {
|
||||
nvme_pcie_fail_request_bad_vtophys(qpair, tr);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
mapping_length = spdk_min(length, mapping_length);
|
||||
|
||||
length -= mapping_length;
|
||||
virt_addr += mapping_length;
|
||||
|
||||
sgl->unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
|
||||
sgl->unkeyed.length = mapping_length;
|
||||
sgl->address = phys_addr;
|
||||
sgl->unkeyed.subtype = 0;
|
||||
|
||||
sgl++;
|
||||
nseg++;
|
||||
}
|
||||
|
||||
if (nseg == 1) {
|
||||
/*
|
||||
* The whole transfer can be described by a single SGL descriptor.
|
||||
* Use the special case described by the spec where SGL1's type is Data Block.
|
||||
* This means the SGL in the tracker is not used at all, so copy the first (and only)
|
||||
* SGL element into SGL1.
|
||||
*/
|
||||
req->cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
|
||||
req->cmd.dptr.sgl1.address = tr->u.sgl[0].address;
|
||||
req->cmd.dptr.sgl1.unkeyed.length = tr->u.sgl[0].unkeyed.length;
|
||||
} else {
|
||||
/* For now we can only support 1 SGL segment in NVMe controller */
|
||||
req->cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT;
|
||||
req->cmd.dptr.sgl1.address = tr->prp_sgl_bus_addr;
|
||||
req->cmd.dptr.sgl1.unkeyed.length = nseg * sizeof(struct spdk_nvme_sgl_descriptor);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build SGL list describing scattered payload buffer.
|
||||
*/
|
||||
@ -2001,7 +2073,11 @@ nvme_pcie_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_reques
|
||||
/* Null payload - leave PRP fields untouched */
|
||||
rc = 0;
|
||||
} else if (nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_CONTIG) {
|
||||
rc = nvme_pcie_qpair_build_contig_request(qpair, req, tr);
|
||||
if (ctrlr->flags & SPDK_NVME_CTRLR_SGL_SUPPORTED) {
|
||||
rc = nvme_pcie_qpair_build_contig_hw_sgl_request(qpair, req, tr);
|
||||
} else {
|
||||
rc = nvme_pcie_qpair_build_contig_request(qpair, req, tr);
|
||||
}
|
||||
} else if (nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_SGL) {
|
||||
if (ctrlr->flags & SPDK_NVME_CTRLR_SGL_SUPPORTED) {
|
||||
rc = nvme_pcie_qpair_build_hw_sgl_request(qpair, req, tr);
|
||||
|
@ -35,6 +35,7 @@
|
||||
|
||||
#include "spdk_cunit.h"
|
||||
|
||||
#define UNIT_TEST_NO_VTOPHYS
|
||||
#include "common/lib/test_env.c"
|
||||
|
||||
#include "nvme/nvme_pcie.c"
|
||||
@ -232,24 +233,6 @@ nvme_get_quirks(const struct spdk_pci_id *id)
|
||||
abort();
|
||||
}
|
||||
|
||||
bool
|
||||
nvme_completion_is_retry(const struct spdk_nvme_cpl *cpl)
|
||||
{
|
||||
abort();
|
||||
}
|
||||
|
||||
void
|
||||
spdk_nvme_qpair_print_command(struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd)
|
||||
{
|
||||
abort();
|
||||
}
|
||||
|
||||
void
|
||||
spdk_nvme_qpair_print_completion(struct spdk_nvme_qpair *qpair, struct spdk_nvme_cpl *cpl)
|
||||
{
|
||||
abort();
|
||||
}
|
||||
|
||||
int
|
||||
nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
|
||||
{
|
||||
@ -570,98 +553,31 @@ test_hw_sgl_req(void)
|
||||
nvme_free_request(req);
|
||||
}
|
||||
|
||||
static void test_nvme_qpair_abort_reqs(void)
|
||||
{
|
||||
struct spdk_nvme_qpair qpair = {};
|
||||
struct nvme_request *req = NULL;
|
||||
struct spdk_nvme_ctrlr ctrlr = {};
|
||||
struct nvme_tracker *tr_temp;
|
||||
|
||||
prepare_submit_request_test(&qpair, &ctrlr);
|
||||
|
||||
tr_temp = TAILQ_FIRST(&qpair.free_tr);
|
||||
SPDK_CU_ASSERT_FATAL(tr_temp != NULL);
|
||||
TAILQ_REMOVE(&qpair.free_tr, tr_temp, tq_list);
|
||||
tr_temp->req = nvme_allocate_request_null(expected_failure_callback, NULL);
|
||||
SPDK_CU_ASSERT_FATAL(tr_temp->req != NULL);
|
||||
tr_temp->req->cmd.cid = tr_temp->cid;
|
||||
|
||||
TAILQ_INSERT_HEAD(&qpair.outstanding_tr, tr_temp, tq_list);
|
||||
nvme_qpair_abort_reqs(&qpair, true);
|
||||
CU_ASSERT_TRUE(TAILQ_EMPTY(&qpair.outstanding_tr));
|
||||
|
||||
req = nvme_allocate_request_null(expected_failure_callback, NULL);
|
||||
SPDK_CU_ASSERT_FATAL(req != NULL);
|
||||
|
||||
STAILQ_INSERT_HEAD(&qpair.queued_req, req, stailq);
|
||||
nvme_qpair_abort_reqs(&qpair, true);
|
||||
CU_ASSERT_TRUE(STAILQ_EMPTY(&qpair.queued_req));
|
||||
|
||||
cleanup_submit_request_test(&qpair);
|
||||
}
|
||||
|
||||
static void
|
||||
test_nvme_qpair_process_completions_limit(void)
|
||||
{
|
||||
struct spdk_nvme_qpair qpair = {};
|
||||
struct spdk_nvme_ctrlr ctrlr = {};
|
||||
|
||||
prepare_submit_request_test(&qpair, &ctrlr);
|
||||
qpair.is_enabled = true;
|
||||
|
||||
/* Insert 4 entries into the completion queue */
|
||||
CU_ASSERT(qpair.cq_head == 0);
|
||||
ut_insert_cq_entry(&qpair, 0);
|
||||
ut_insert_cq_entry(&qpair, 1);
|
||||
ut_insert_cq_entry(&qpair, 2);
|
||||
ut_insert_cq_entry(&qpair, 3);
|
||||
|
||||
/* This should only process 2 completions, and 2 should be left in the queue */
|
||||
spdk_nvme_qpair_process_completions(&qpair, 2);
|
||||
CU_ASSERT(qpair.cq_head == 2);
|
||||
|
||||
/* This should only process 1 completion, and 1 should be left in the queue */
|
||||
spdk_nvme_qpair_process_completions(&qpair, 1);
|
||||
CU_ASSERT(qpair.cq_head == 3);
|
||||
|
||||
/* This should process the remaining completion */
|
||||
spdk_nvme_qpair_process_completions(&qpair, 5);
|
||||
CU_ASSERT(qpair.cq_head == 4);
|
||||
|
||||
cleanup_submit_request_test(&qpair);
|
||||
}
|
||||
|
||||
static void test_nvme_qpair_destroy(void)
|
||||
{
|
||||
struct spdk_nvme_qpair qpair = {};
|
||||
struct spdk_nvme_ctrlr ctrlr = {};
|
||||
struct nvme_tracker *tr_temp;
|
||||
|
||||
memset(&ctrlr, 0, sizeof(ctrlr));
|
||||
TAILQ_INIT(&ctrlr.free_io_qpairs);
|
||||
TAILQ_INIT(&ctrlr.active_io_qpairs);
|
||||
TAILQ_INIT(&ctrlr.active_procs);
|
||||
|
||||
nvme_qpair_init(&qpair, 1, 128, &ctrlr);
|
||||
nvme_qpair_destroy(&qpair);
|
||||
|
||||
|
||||
nvme_qpair_init(&qpair, 0, 128, &ctrlr);
|
||||
tr_temp = TAILQ_FIRST(&qpair.free_tr);
|
||||
SPDK_CU_ASSERT_FATAL(tr_temp != NULL);
|
||||
TAILQ_REMOVE(&qpair.free_tr, tr_temp, tq_list);
|
||||
tr_temp->req = nvme_allocate_request_null(expected_failure_callback, NULL);
|
||||
SPDK_CU_ASSERT_FATAL(tr_temp->req != NULL);
|
||||
|
||||
tr_temp->req->cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
|
||||
tr_temp->req->cmd.cid = tr_temp->cid;
|
||||
TAILQ_INSERT_HEAD(&qpair.outstanding_tr, tr_temp, tq_list);
|
||||
|
||||
nvme_qpair_destroy(&qpair);
|
||||
CU_ASSERT(TAILQ_EMPTY(&qpair.outstanding_tr));
|
||||
}
|
||||
#endif
|
||||
|
||||
static uint64_t g_vtophys_size = 0;
|
||||
|
||||
DEFINE_RETURN_MOCK(spdk_vtophys, uint64_t);
|
||||
uint64_t
|
||||
spdk_vtophys(void *buf, uint64_t *size)
|
||||
{
|
||||
if (size) {
|
||||
*size = g_vtophys_size;
|
||||
}
|
||||
|
||||
HANDLE_RETURN_MOCK(spdk_vtophys);
|
||||
|
||||
return (uintptr_t)buf;
|
||||
}
|
||||
|
||||
DEFINE_STUB(spdk_nvme_ctrlr_get_process, struct spdk_nvme_ctrlr_process *,
|
||||
(struct spdk_nvme_ctrlr *ctrlr, pid_t pid), NULL);
|
||||
DEFINE_STUB(nvme_completion_is_retry, bool, (const struct spdk_nvme_cpl *cpl), false);
|
||||
DEFINE_STUB_V(spdk_nvme_qpair_print_command, (struct spdk_nvme_qpair *qpair,
|
||||
struct spdk_nvme_cmd *cmd));
|
||||
DEFINE_STUB_V(spdk_nvme_qpair_print_completion, (struct spdk_nvme_qpair *qpair,
|
||||
struct spdk_nvme_cpl *cpl));
|
||||
|
||||
static void
|
||||
prp_list_prep(struct nvme_tracker *tr, struct nvme_request *req, uint32_t *prp_index)
|
||||
{
|
||||
@ -799,16 +715,75 @@ test_prp_list_append(void)
|
||||
(NVME_MAX_PRP_LIST_ENTRIES + 1) * 0x1000, 0x1000) == -EINVAL);
|
||||
}
|
||||
|
||||
static void test_shadow_doorbell_update(void)
|
||||
static void
|
||||
test_build_contig_hw_sgl_request(void)
|
||||
{
|
||||
bool ret;
|
||||
struct spdk_nvme_qpair qpair = {};
|
||||
struct nvme_request req = {};
|
||||
struct nvme_tracker tr = {};
|
||||
int rc;
|
||||
|
||||
/* nvme_pcie_qpair_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old) */
|
||||
ret = nvme_pcie_qpair_need_event(10, 15, 14);
|
||||
CU_ASSERT(ret == false);
|
||||
/* Test 1: Payload covered by a single mapping */
|
||||
req.payload_size = 100;
|
||||
req.payload = NVME_PAYLOAD_CONTIG(0, 0);
|
||||
g_vtophys_size = 100;
|
||||
MOCK_SET(spdk_vtophys, 0xDEADBEEF);
|
||||
|
||||
ret = nvme_pcie_qpair_need_event(14, 15, 14);
|
||||
CU_ASSERT(ret == true);
|
||||
rc = nvme_pcie_qpair_build_contig_hw_sgl_request(&qpair, &req, &tr);
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
|
||||
CU_ASSERT(req.cmd.dptr.sgl1.address == 0xDEADBEEF);
|
||||
CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 100);
|
||||
|
||||
MOCK_CLEAR(spdk_vtophys);
|
||||
g_vtophys_size = 0;
|
||||
memset(&qpair, 0, sizeof(qpair));
|
||||
memset(&req, 0, sizeof(req));
|
||||
memset(&tr, 0, sizeof(tr));
|
||||
|
||||
/* Test 2: Payload covered by a single mapping, but request is at an offset */
|
||||
req.payload_size = 100;
|
||||
req.payload_offset = 50;
|
||||
req.payload = NVME_PAYLOAD_CONTIG(0, 0);
|
||||
g_vtophys_size = 1000;
|
||||
MOCK_SET(spdk_vtophys, 0xDEADBEEF);
|
||||
|
||||
rc = nvme_pcie_qpair_build_contig_hw_sgl_request(&qpair, &req, &tr);
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
|
||||
CU_ASSERT(req.cmd.dptr.sgl1.address == 0xDEADBEEF);
|
||||
CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 100);
|
||||
|
||||
MOCK_CLEAR(spdk_vtophys);
|
||||
g_vtophys_size = 0;
|
||||
memset(&qpair, 0, sizeof(qpair));
|
||||
memset(&req, 0, sizeof(req));
|
||||
memset(&tr, 0, sizeof(tr));
|
||||
|
||||
/* Test 3: Payload spans two mappings */
|
||||
req.payload_size = 100;
|
||||
req.payload = NVME_PAYLOAD_CONTIG(0, 0);
|
||||
g_vtophys_size = 60;
|
||||
tr.prp_sgl_bus_addr = 0xFF0FF;
|
||||
MOCK_SET(spdk_vtophys, 0xDEADBEEF);
|
||||
|
||||
rc = nvme_pcie_qpair_build_contig_hw_sgl_request(&qpair, &req, &tr);
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT);
|
||||
CU_ASSERT(req.cmd.dptr.sgl1.address == tr.prp_sgl_bus_addr);
|
||||
CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 2 * sizeof(struct spdk_nvme_sgl_descriptor));
|
||||
CU_ASSERT(tr.u.sgl[0].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
|
||||
CU_ASSERT(tr.u.sgl[0].unkeyed.length = 60);
|
||||
CU_ASSERT(tr.u.sgl[0].address = 0xDEADBEEF);
|
||||
CU_ASSERT(tr.u.sgl[1].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
|
||||
CU_ASSERT(tr.u.sgl[1].unkeyed.length = 40);
|
||||
CU_ASSERT(tr.u.sgl[1].address = 0xDEADBEEF);
|
||||
|
||||
MOCK_CLEAR(spdk_vtophys);
|
||||
g_vtophys_size = 0;
|
||||
memset(&qpair, 0, sizeof(qpair));
|
||||
memset(&req, 0, sizeof(req));
|
||||
memset(&tr, 0, sizeof(tr));
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
@ -826,9 +801,8 @@ int main(int argc, char **argv)
|
||||
return CU_get_error();
|
||||
}
|
||||
|
||||
if (CU_add_test(suite, "prp_list_append", test_prp_list_append) == NULL
|
||||
|| CU_add_test(suite, "shadow_doorbell_update",
|
||||
test_shadow_doorbell_update) == NULL) {
|
||||
if (CU_add_test(suite, "prp_list_append", test_prp_list_append) == NULL ||
|
||||
CU_add_test(suite, "build_contig_hw_sgl_request", test_build_contig_hw_sgl_request) == NULL) {
|
||||
CU_cleanup_registry();
|
||||
return CU_get_error();
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user