ioat/perf: use new spdk_ioat_flush interface

Use the new spdk_ioat_flush interface to batch
doorbell writes - this significantly improves
descriptor throughput.  For now, just set the
threshold for writing the descriptor to half of
the queue depth.  We can always modify this later
to allow very specific thresholds, but for now
this simple change is sufficient.

Increases 512B descriptor throughput at QD=256 from
3.7M/s to 14.0M/s on my Skylake Xeon server.

Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: I4504579e23cee5b6a1044849c49d33d58bdb51a9

Reviewed-on: https://review.gerrithub.io/c/445355
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
This commit is contained in:
Jim Harris 2019-02-19 12:55:57 -07:00
parent 608a2d5875
commit 6728c1b8cb

View File

@ -63,6 +63,8 @@ struct ioat_chan_entry {
uint64_t xfer_completed;
uint64_t xfer_failed;
uint64_t current_queue_depth;
uint64_t waiting_for_flush;
uint64_t flush_threshold;
bool is_draining;
struct spdk_mempool *data_pool;
struct spdk_mempool *task_pool;
@ -302,6 +304,7 @@ parse_args(int argc, char **argv)
static void
drain_io(struct ioat_chan_entry *ioat_chan_entry)
{
spdk_ioat_flush(ioat_chan_entry->chan);
while (ioat_chan_entry->current_queue_depth > 0) {
spdk_ioat_process_events(ioat_chan_entry->chan);
}
@ -315,8 +318,13 @@ submit_single_xfer(struct ioat_chan_entry *ioat_chan_entry, struct ioat_task *io
ioat_task->src = src;
ioat_task->dst = dst;
spdk_ioat_submit_copy(ioat_chan_entry->chan, ioat_task, ioat_done, dst, src,
g_user_config.xfer_size_bytes);
spdk_ioat_build_copy(ioat_chan_entry->chan, ioat_task, ioat_done, dst, src,
g_user_config.xfer_size_bytes);
ioat_chan_entry->waiting_for_flush++;
if (ioat_chan_entry->waiting_for_flush >= ioat_chan_entry->flush_threshold) {
spdk_ioat_flush(ioat_chan_entry->chan);
ioat_chan_entry->waiting_for_flush = 0;
}
ioat_chan_entry->current_queue_depth++;
}
@ -355,6 +363,8 @@ work_fn(void *arg)
t = worker->ctx;
while (t != NULL) {
/* begin to submit transfers */
t->waiting_for_flush = 0;
t->flush_threshold = g_user_config.queue_depth / 2;
if (submit_xfers(t, g_user_config.queue_depth) < 0) {
return -1;
}