bdevperf: reuse tasks more efficiently

Instead of inserting a task into the TAILQ and then
immediately taking it back off, just pass the task
to bdevperf_submit_single instead.

This reduces overhead of bdevperf compared to nvme/perf.
nvme/perf does not use a TAILQ at all, and does something
similar by passing the just completed task so it can be
reused by its submit function.

Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: I574b459a32ebe2e91ac1351de360de86cbc4a86d

Reviewed-on: https://review.gerrithub.io/393833
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
This commit is contained in:
Jim Harris 2018-01-05 15:10:18 -07:00
parent 29e017f406
commit 6361254c3d

View File

@ -73,7 +73,7 @@ static unsigned g_master_core;
static struct spdk_poller *g_perf_timer = NULL; static struct spdk_poller *g_perf_timer = NULL;
static void bdevperf_submit_single(struct io_target *target); static void bdevperf_submit_single(struct io_target *target, struct bdevperf_task *task);
#include "../common.c" #include "../common.c"
@ -293,8 +293,6 @@ bdevperf_complete(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
target->io_completed++; target->io_completed++;
} }
TAILQ_INSERT_TAIL(&target->task_list, task, link);
spdk_bdev_free_io(bdev_io); spdk_bdev_free_io(bdev_io);
/* /*
@ -304,12 +302,15 @@ bdevperf_complete(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
* the one just completed. * the one just completed.
*/ */
if (!target->is_draining) { if (!target->is_draining) {
bdevperf_submit_single(target); bdevperf_submit_single(target, task);
} else if (target->current_queue_depth == 0) { } else {
TAILQ_INSERT_TAIL(&target->task_list, task, link);
if (target->current_queue_depth == 0) {
complete = spdk_event_allocate(g_master_core, end_run, target, NULL); complete = spdk_event_allocate(g_master_core, end_run, target, NULL);
spdk_event_call(complete); spdk_event_call(complete);
} }
} }
}
static void static void
bdevperf_unmap_complete(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) bdevperf_unmap_complete(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
@ -374,11 +375,10 @@ bdevperf_verify_write_complete(struct spdk_bdev_io *bdev_io, bool success,
static __thread unsigned int seed = 0; static __thread unsigned int seed = 0;
static void static void
bdevperf_submit_single(struct io_target *target) bdevperf_submit_single(struct io_target *target, struct bdevperf_task *task)
{ {
struct spdk_bdev_desc *desc; struct spdk_bdev_desc *desc;
struct spdk_io_channel *ch; struct spdk_io_channel *ch;
struct bdevperf_task *task = NULL;
uint64_t offset_in_ios; uint64_t offset_in_ios;
void *rbuf; void *rbuf;
int rc; int rc;
@ -386,13 +386,15 @@ bdevperf_submit_single(struct io_target *target)
desc = target->bdev_desc; desc = target->bdev_desc;
ch = target->ch; ch = target->ch;
task = TAILQ_FIRST(&target->task_list);
if (!task) { if (!task) {
if (!TAILQ_EMPTY(&target->task_list)) {
task = TAILQ_FIRST(&target->task_list);
TAILQ_REMOVE(&target->task_list, task, link);
} else {
printf("Task allocation failed\n"); printf("Task allocation failed\n");
abort(); abort();
} }
}
TAILQ_REMOVE(&target->task_list, task, link);
if (g_is_random) { if (g_is_random) {
offset_in_ios = rand_r(&seed) % target->size_in_ios; offset_in_ios = rand_r(&seed) % target->size_in_ios;
@ -447,7 +449,7 @@ static void
bdevperf_submit_io(struct io_target *target, int queue_depth) bdevperf_submit_io(struct io_target *target, int queue_depth)
{ {
while (queue_depth-- > 0) { while (queue_depth-- > 0) {
bdevperf_submit_single(target); bdevperf_submit_single(target, NULL);
} }
} }