nvme: Add qpair option to batch command submissions

Avoid ringing the submission queue doorbell until the
call to spdk_nvme_qpair_process_completions().

Change-Id: I7b3cd952e5ec79109eaa1c3a50f6537d7aaea51a
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/447239
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
This commit is contained in:
Ben Walker 2019-03-06 13:23:56 -07:00
parent 98c4101c51
commit 64a50b6177
3 changed files with 31 additions and 2 deletions

View File

@ -728,6 +728,17 @@ struct spdk_nvme_io_qpair_opts {
* compatibility requirements, or driver-assisted striping. * compatibility requirements, or driver-assisted striping.
*/ */
uint32_t io_queue_requests; uint32_t io_queue_requests;
/**
* When submitting I/O via spdk_nvme_ns_read/write and similar functions,
* don't immediately write the submission queue doorbell. Instead, write
* to the doorbell as necessary inside spdk_nvme_qpair_process_completions().
*
* This results in better batching of I/O submission and consequently fewer
* MMIO writes to the doorbell, which may increase performance.
*
* This only applies to local PCIe devices. */
bool delay_pcie_doorbell;
}; };
/** /**

View File

@ -213,6 +213,10 @@ spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
opts->io_queue_requests = ctrlr->opts.io_queue_requests; opts->io_queue_requests = ctrlr->opts.io_queue_requests;
} }
if (FIELD_OK(delay_pcie_doorbell)) {
opts->delay_pcie_doorbell = false;
}
#undef FIELD_OK #undef FIELD_OK
} }

View File

@ -172,6 +172,7 @@ struct nvme_pcie_qpair {
uint16_t max_completions_cap; uint16_t max_completions_cap;
uint16_t last_sq_tail;
uint16_t sq_tail; uint16_t sq_tail;
uint16_t cq_head; uint16_t cq_head;
uint16_t sq_head; uint16_t sq_head;
@ -180,6 +181,8 @@ struct nvme_pcie_qpair {
bool is_enabled; bool is_enabled;
bool delay_pcie_doorbell;
/* /*
* Base qpair structure. * Base qpair structure.
* This is located after the hot data in this structure so that the important parts of * This is located after the hot data in this structure so that the important parts of
@ -674,6 +677,7 @@ nvme_pcie_ctrlr_construct_admin_qpair(struct spdk_nvme_ctrlr *ctrlr)
} }
pqpair->num_entries = NVME_ADMIN_ENTRIES; pqpair->num_entries = NVME_ADMIN_ENTRIES;
pqpair->delay_pcie_doorbell = false;
ctrlr->adminq = &pqpair->qpair; ctrlr->adminq = &pqpair->qpair;
@ -941,7 +945,7 @@ nvme_pcie_qpair_reset(struct spdk_nvme_qpair *qpair)
{ {
struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair); struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair);
pqpair->sq_tail = pqpair->cq_head = 0; pqpair->last_sq_tail = pqpair->sq_tail = pqpair->cq_head = 0;
/* /*
* First time through the completion queue, HW will set phase * First time through the completion queue, HW will set phase
@ -1212,7 +1216,9 @@ nvme_pcie_qpair_submit_tracker(struct spdk_nvme_qpair *qpair, struct nvme_tracke
SPDK_ERRLOG("sq_tail is passing sq_head!\n"); SPDK_ERRLOG("sq_tail is passing sq_head!\n");
} }
if (!pqpair->delay_pcie_doorbell) {
nvme_pcie_qpair_ring_sq_doorbell(qpair); nvme_pcie_qpair_ring_sq_doorbell(qpair);
}
} }
static void static void
@ -1588,6 +1594,7 @@ nvme_pcie_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
} }
pqpair->num_entries = opts->io_queue_size; pqpair->num_entries = opts->io_queue_size;
pqpair->delay_pcie_doorbell = opts->delay_pcie_doorbell;
qpair = &pqpair->qpair; qpair = &pqpair->qpair;
@ -2125,6 +2132,13 @@ nvme_pcie_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_
} }
} }
if (pqpair->delay_pcie_doorbell) {
if (pqpair->last_sq_tail != pqpair->sq_tail) {
nvme_pcie_qpair_ring_sq_doorbell(qpair);
pqpair->last_sq_tail = pqpair->sq_tail;
}
}
if (spdk_unlikely(ctrlr->timeout_enabled)) { if (spdk_unlikely(ctrlr->timeout_enabled)) {
/* /*
* User registered for timeout callback * User registered for timeout callback