nvme: add max completions limit to I/O polling
nvme_ctrlr_process_io_completions() now takes a second parameter, max_completions, to let the user limit the number of I/Os completed on each poll. If there are many I/Os waiting to be completed, the nvme_ctrlr_process_io_completions() function could run for a long time before returning control to the user, so the max_completions parameter lets the user have more control of latency. Change-Id: I3173059d94ec1cc5dbb636fc0ffd3dc09f3bfe4b Signed-off-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
parent
181de7bf79
commit
668847e150
@ -396,7 +396,7 @@ check_io(struct ns_worker_ctx *ns_ctx)
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
nvme_ctrlr_process_io_completions(ns_ctx->entry->u.nvme.ctrlr);
|
||||
nvme_ctrlr_process_io_completions(ns_ctx->entry->u.nvme.ctrlr, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -155,12 +155,15 @@ int nvme_ctrlr_cmd_io_raw(struct nvme_controller *ctrlr,
|
||||
* This will only process completions for I/O that were submitted on the same thread
|
||||
* that this function is called from. This call is also non-blocking, i.e. it only
|
||||
* processes completions that are ready at the time of this function call. It does not
|
||||
* wait for outstanding commands to finish
|
||||
* wait for outstanding commands to finish.
|
||||
*
|
||||
* \param max_completions Limit the number of completions to be processed in one call, or 0
|
||||
* for unlimited.
|
||||
*
|
||||
* This function is thread safe and can be called at any point after nvme_attach().
|
||||
*
|
||||
*/
|
||||
void nvme_ctrlr_process_io_completions(struct nvme_controller *ctrlr);
|
||||
void nvme_ctrlr_process_io_completions(struct nvme_controller *ctrlr, uint32_t max_completions);
|
||||
|
||||
/**
|
||||
* \brief Send the given admin command to the NVMe controller.
|
||||
|
@ -333,7 +333,7 @@ nvme_ctrlr_identify(struct nvme_controller *ctrlr)
|
||||
nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata,
|
||||
nvme_completion_poll_cb, &status);
|
||||
while (status.done == false) {
|
||||
nvme_qpair_process_completions(&ctrlr->adminq);
|
||||
nvme_qpair_process_completions(&ctrlr->adminq, 0);
|
||||
}
|
||||
if (nvme_completion_is_error(&status.cpl)) {
|
||||
nvme_printf(ctrlr, "nvme_identify_controller failed!\n");
|
||||
@ -369,7 +369,7 @@ nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr)
|
||||
nvme_ctrlr_cmd_set_num_queues(ctrlr, max_io_queues,
|
||||
nvme_completion_poll_cb, &status);
|
||||
while (status.done == false) {
|
||||
nvme_qpair_process_completions(&ctrlr->adminq);
|
||||
nvme_qpair_process_completions(&ctrlr->adminq, 0);
|
||||
}
|
||||
if (nvme_completion_is_error(&status.cpl)) {
|
||||
nvme_printf(ctrlr, "nvme_set_num_queues failed!\n");
|
||||
@ -412,7 +412,7 @@ nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr)
|
||||
nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair,
|
||||
nvme_completion_poll_cb, &status);
|
||||
while (status.done == false) {
|
||||
nvme_qpair_process_completions(&ctrlr->adminq);
|
||||
nvme_qpair_process_completions(&ctrlr->adminq, 0);
|
||||
}
|
||||
if (nvme_completion_is_error(&status.cpl)) {
|
||||
nvme_printf(ctrlr, "nvme_create_io_cq failed!\n");
|
||||
@ -423,7 +423,7 @@ nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr)
|
||||
nvme_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair,
|
||||
nvme_completion_poll_cb, &status);
|
||||
while (status.done == false) {
|
||||
nvme_qpair_process_completions(&ctrlr->adminq);
|
||||
nvme_qpair_process_completions(&ctrlr->adminq, 0);
|
||||
}
|
||||
if (nvme_completion_is_error(&status.cpl)) {
|
||||
nvme_printf(ctrlr, "nvme_create_io_sq failed!\n");
|
||||
@ -566,7 +566,7 @@ nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
|
||||
nvme_ctrlr_cmd_set_async_event_config(ctrlr, state, nvme_completion_poll_cb, &status);
|
||||
|
||||
while (status.done == false) {
|
||||
nvme_qpair_process_completions(&ctrlr->adminq);
|
||||
nvme_qpair_process_completions(&ctrlr->adminq, 0);
|
||||
}
|
||||
if (nvme_completion_is_error(&status.cpl)) {
|
||||
nvme_printf(ctrlr, "nvme_ctrlr_cmd_set_async_event_config failed!\n");
|
||||
@ -728,17 +728,17 @@ nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
|
||||
}
|
||||
|
||||
void
|
||||
nvme_ctrlr_process_io_completions(struct nvme_controller *ctrlr)
|
||||
nvme_ctrlr_process_io_completions(struct nvme_controller *ctrlr, uint32_t max_completions)
|
||||
{
|
||||
nvme_assert(nvme_thread_ioq_index >= 0, ("no ioq_index assigned for thread\n"));
|
||||
nvme_qpair_process_completions(&ctrlr->ioq[nvme_thread_ioq_index]);
|
||||
nvme_qpair_process_completions(&ctrlr->ioq[nvme_thread_ioq_index], max_completions);
|
||||
}
|
||||
|
||||
void
|
||||
nvme_ctrlr_process_admin_completions(struct nvme_controller *ctrlr)
|
||||
{
|
||||
nvme_mutex_lock(&ctrlr->ctrlr_lock);
|
||||
nvme_qpair_process_completions(&ctrlr->adminq);
|
||||
nvme_qpair_process_completions(&ctrlr->adminq, 0);
|
||||
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
}
|
||||
|
||||
|
@ -414,7 +414,7 @@ void nvme_qpair_enable(struct nvme_qpair *qpair);
|
||||
void nvme_qpair_disable(struct nvme_qpair *qpair);
|
||||
void nvme_qpair_submit_tracker(struct nvme_qpair *qpair,
|
||||
struct nvme_tracker *tr);
|
||||
void nvme_qpair_process_completions(struct nvme_qpair *qpair);
|
||||
void nvme_qpair_process_completions(struct nvme_qpair *qpair, uint32_t max_completions);
|
||||
void nvme_qpair_submit_request(struct nvme_qpair *qpair,
|
||||
struct nvme_request *req);
|
||||
void nvme_qpair_reset(struct nvme_qpair *qpair);
|
||||
|
@ -106,7 +106,7 @@ nvme_ns_construct(struct nvme_namespace *ns, uint16_t id,
|
||||
nvme_ctrlr_cmd_identify_namespace(ctrlr, id, nsdata,
|
||||
nvme_completion_poll_cb, &status);
|
||||
while (status.done == false) {
|
||||
nvme_qpair_process_completions(&ctrlr->adminq);
|
||||
nvme_qpair_process_completions(&ctrlr->adminq, 0);
|
||||
}
|
||||
if (nvme_completion_is_error(&status.cpl)) {
|
||||
nvme_printf(ctrlr, "nvme_identify_namespace failed\n");
|
||||
|
@ -443,7 +443,7 @@ nvme_qpair_check_enabled(struct nvme_qpair *qpair)
|
||||
* \sa nvme_cb_fn_t
|
||||
*/
|
||||
void
|
||||
nvme_qpair_process_completions(struct nvme_qpair *qpair)
|
||||
nvme_qpair_process_completions(struct nvme_qpair *qpair, uint32_t max_completions)
|
||||
{
|
||||
struct nvme_tracker *tr;
|
||||
struct nvme_completion *cpl;
|
||||
@ -481,6 +481,10 @@ nvme_qpair_process_completions(struct nvme_qpair *qpair)
|
||||
}
|
||||
|
||||
_nvme_mmio_write_4(qpair->cq_hdbl, qpair->cq_head);
|
||||
|
||||
if (max_completions > 0 && --max_completions == 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -63,7 +63,7 @@ nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
|
||||
}
|
||||
|
||||
void
|
||||
nvme_qpair_process_completions(struct nvme_qpair *qpair)
|
||||
nvme_qpair_process_completions(struct nvme_qpair *qpair, uint32_t max_completions)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -162,6 +162,29 @@ cleanup_submit_request_test(struct nvme_qpair *qpair)
|
||||
nvme_qpair_destroy(qpair);
|
||||
}
|
||||
|
||||
static void
|
||||
ut_insert_cq_entry(struct nvme_qpair *qpair, uint32_t slot)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
struct nvme_tracker *tr;
|
||||
struct nvme_completion *cpl;
|
||||
|
||||
nvme_alloc_request(&req);
|
||||
memset(req, 0, sizeof(*req));
|
||||
req->cmd.cid = slot;
|
||||
|
||||
tr = LIST_FIRST(&qpair->free_tr);
|
||||
LIST_REMOVE(tr, list); /* remove tr from free_tr */
|
||||
LIST_INSERT_HEAD(&qpair->outstanding_tr, tr, list);
|
||||
tr->cid = slot;
|
||||
tr->req = req;
|
||||
qpair->act_tr[tr->cid] = tr;
|
||||
|
||||
cpl = &qpair->cpl[slot];
|
||||
cpl->status.p = qpair->phase;
|
||||
cpl->cid = slot;
|
||||
}
|
||||
|
||||
static void
|
||||
expected_success_callback(void *arg, const struct nvme_completion *cpl)
|
||||
{
|
||||
@ -313,7 +336,39 @@ static void test_nvme_qpair_process_completions(void)
|
||||
qpair.is_enabled = false;
|
||||
qpair.ctrlr->is_resetting = true;
|
||||
|
||||
nvme_qpair_process_completions(&qpair);
|
||||
nvme_qpair_process_completions(&qpair, 0);
|
||||
cleanup_submit_request_test(&qpair);
|
||||
}
|
||||
|
||||
static void
|
||||
test_nvme_qpair_process_completions_limit(void)
|
||||
{
|
||||
struct nvme_qpair qpair = {};
|
||||
struct nvme_controller ctrlr = {};
|
||||
struct nvme_registers regs = {};
|
||||
|
||||
prepare_submit_request_test(&qpair, &ctrlr, ®s);
|
||||
qpair.is_enabled = true;
|
||||
|
||||
/* Insert 4 entries into the completion queue */
|
||||
CU_ASSERT(qpair.cq_head == 0);
|
||||
ut_insert_cq_entry(&qpair, 0);
|
||||
ut_insert_cq_entry(&qpair, 1);
|
||||
ut_insert_cq_entry(&qpair, 2);
|
||||
ut_insert_cq_entry(&qpair, 3);
|
||||
|
||||
/* This should only process 2 completions, and 2 should be left in the queue */
|
||||
nvme_qpair_process_completions(&qpair, 2);
|
||||
CU_ASSERT(qpair.cq_head == 2);
|
||||
|
||||
/* This should only process 1 completion, and 1 should be left in the queue */
|
||||
nvme_qpair_process_completions(&qpair, 1);
|
||||
CU_ASSERT(qpair.cq_head == 3);
|
||||
|
||||
/* This should process the remaining completion */
|
||||
nvme_qpair_process_completions(&qpair, 5);
|
||||
CU_ASSERT(qpair.cq_head == 4);
|
||||
|
||||
cleanup_submit_request_test(&qpair);
|
||||
}
|
||||
|
||||
@ -444,6 +499,8 @@ int main(int argc, char **argv)
|
||||
|| CU_add_test(suite, "struct_packing", struct_packing) == NULL
|
||||
|| CU_add_test(suite, "nvme_qpair_fail", test_nvme_qpair_fail) == NULL
|
||||
|| CU_add_test(suite, "nvme_qpair_process_completions", test_nvme_qpair_process_completions) == NULL
|
||||
|| CU_add_test(suite, "nvme_qpair_process_completions_limit",
|
||||
test_nvme_qpair_process_completions_limit) == NULL
|
||||
|| CU_add_test(suite, "nvme_qpair_destroy", test_nvme_qpair_destroy) == NULL
|
||||
|| CU_add_test(suite, "nvme_completion_is_retry", test_nvme_completion_is_retry) == NULL
|
||||
|| CU_add_test(suite, "get_status_string", test_get_status_string) == NULL
|
||||
|
Loading…
Reference in New Issue
Block a user