2020-02-13 22:13:53 +00:00
|
|
|
/*-
|
|
|
|
* BSD LICENSE
|
|
|
|
*
|
|
|
|
* Copyright (c) Intel Corporation.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
*
|
|
|
|
* * Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* * Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in
|
|
|
|
* the documentation and/or other materials provided with the
|
|
|
|
* distribution.
|
|
|
|
* * Neither the name of Intel Corporation nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "spdk/stdinc.h"
|
|
|
|
#include "spdk/thread.h"
|
|
|
|
#include "spdk/env.h"
|
|
|
|
#include "spdk/event.h"
|
|
|
|
#include "spdk/log.h"
|
|
|
|
#include "spdk/string.h"
|
|
|
|
#include "spdk/accel_engine.h"
|
2020-04-28 22:23:19 +00:00
|
|
|
#include "spdk/crc32.h"
|
2020-07-13 22:14:57 +00:00
|
|
|
#include "spdk/util.h"
|
2020-02-13 22:13:53 +00:00
|
|
|
|
2020-04-29 22:57:06 +00:00
|
|
|
#define DATA_PATTERN 0x5a
|
2020-04-30 22:09:17 +00:00
|
|
|
#define ALIGN_4K 0x1000
|
2020-04-29 22:57:06 +00:00
|
|
|
|
2020-02-13 22:13:53 +00:00
|
|
|
static uint64_t g_tsc_rate;
|
|
|
|
static uint64_t g_tsc_end;
|
2021-02-08 17:19:29 +00:00
|
|
|
static int g_rc;
|
2020-02-13 22:13:53 +00:00
|
|
|
static int g_xfer_size_bytes = 4096;
|
|
|
|
static int g_queue_depth = 32;
|
2020-12-08 17:12:31 +00:00
|
|
|
static int g_ops_per_batch = 0;
|
2021-02-09 00:00:37 +00:00
|
|
|
static int g_threads_per_core = 1;
|
2020-02-13 22:13:53 +00:00
|
|
|
static int g_time_in_sec = 5;
|
2020-04-28 22:23:19 +00:00
|
|
|
static uint32_t g_crc32c_seed = 0;
|
2020-12-21 12:17:06 +00:00
|
|
|
static uint32_t g_crc32c_chained_count = 1;
|
2020-04-29 22:57:06 +00:00
|
|
|
static int g_fail_percent_goal = 0;
|
2020-05-01 22:04:27 +00:00
|
|
|
static uint8_t g_fill_pattern = 255;
|
2020-02-13 22:13:53 +00:00
|
|
|
static bool g_verify = false;
|
2020-04-07 16:16:13 +00:00
|
|
|
static const char *g_workload_type = NULL;
|
2020-04-24 16:50:46 +00:00
|
|
|
static enum accel_capability g_workload_selection;
|
2020-02-13 22:13:53 +00:00
|
|
|
static struct worker_thread *g_workers = NULL;
|
|
|
|
static int g_num_workers = 0;
|
|
|
|
static pthread_mutex_t g_workers_lock = PTHREAD_MUTEX_INITIALIZER;
|
2020-06-02 18:27:47 +00:00
|
|
|
uint64_t g_capabilites;
|
2020-12-08 16:32:49 +00:00
|
|
|
|
|
|
|
struct worker_thread;
|
|
|
|
static void accel_done(void *ref, int status);
|
|
|
|
|
2021-02-09 00:00:37 +00:00
|
|
|
struct display_info {
|
|
|
|
int core;
|
|
|
|
int thread;
|
|
|
|
};
|
|
|
|
|
2020-12-08 16:32:49 +00:00
|
|
|
struct ap_task {
|
|
|
|
void *src;
|
2020-12-21 12:17:06 +00:00
|
|
|
struct iovec *iovs;
|
|
|
|
uint32_t iov_cnt;
|
2020-12-08 16:32:49 +00:00
|
|
|
void *dst;
|
|
|
|
void *dst2;
|
|
|
|
struct worker_thread *worker;
|
|
|
|
int status;
|
|
|
|
int expected_status; /* used for the compare operation */
|
|
|
|
TAILQ_ENTRY(ap_task) link;
|
|
|
|
};
|
2020-02-13 22:13:53 +00:00
|
|
|
|
2020-12-08 17:12:31 +00:00
|
|
|
struct accel_batch {
|
|
|
|
int status;
|
|
|
|
int cmd_count;
|
|
|
|
struct spdk_accel_batch *batch;
|
|
|
|
struct worker_thread *worker;
|
|
|
|
TAILQ_ENTRY(accel_batch) link;
|
|
|
|
};
|
|
|
|
|
2020-02-13 22:13:53 +00:00
|
|
|
struct worker_thread {
|
|
|
|
struct spdk_io_channel *ch;
|
|
|
|
uint64_t xfer_completed;
|
|
|
|
uint64_t xfer_failed;
|
2020-04-29 22:57:06 +00:00
|
|
|
uint64_t injected_miscompares;
|
2020-02-13 22:13:53 +00:00
|
|
|
uint64_t current_queue_depth;
|
2020-12-08 16:59:17 +00:00
|
|
|
TAILQ_HEAD(, ap_task) tasks_pool;
|
2020-02-13 22:13:53 +00:00
|
|
|
struct worker_thread *next;
|
|
|
|
unsigned core;
|
|
|
|
struct spdk_thread *thread;
|
|
|
|
bool is_draining;
|
|
|
|
struct spdk_poller *is_draining_poller;
|
|
|
|
struct spdk_poller *stop_poller;
|
2020-12-08 16:59:17 +00:00
|
|
|
void *task_base;
|
2020-12-08 17:12:31 +00:00
|
|
|
struct accel_batch *batch_base;
|
2021-02-09 00:00:37 +00:00
|
|
|
struct display_info display;
|
2020-12-08 17:12:31 +00:00
|
|
|
TAILQ_HEAD(, accel_batch) in_prep_batches;
|
|
|
|
TAILQ_HEAD(, accel_batch) in_use_batches;
|
|
|
|
TAILQ_HEAD(, accel_batch) to_submit_batches;
|
2020-02-13 22:13:53 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
dump_user_config(struct spdk_app_opts *opts)
|
|
|
|
{
|
|
|
|
printf("SPDK Configuration:\n");
|
|
|
|
printf("Core mask: %s\n\n", opts->reactor_mask);
|
|
|
|
printf("Accel Perf Configuration:\n");
|
2020-04-07 16:16:13 +00:00
|
|
|
printf("Workload Type: %s\n", g_workload_type);
|
2020-04-29 22:57:06 +00:00
|
|
|
if (g_workload_selection == ACCEL_CRC32C) {
|
|
|
|
printf("CRC-32C seed: %u\n", g_crc32c_seed);
|
2020-12-21 12:17:06 +00:00
|
|
|
printf("vector size: %u\n", g_crc32c_chained_count);
|
2020-05-01 22:04:27 +00:00
|
|
|
} else if (g_workload_selection == ACCEL_FILL) {
|
|
|
|
printf("Fill pattern: 0x%x\n", g_fill_pattern);
|
2020-04-29 22:57:06 +00:00
|
|
|
} else if ((g_workload_selection == ACCEL_COMPARE) && g_fail_percent_goal > 0) {
|
2020-05-01 22:04:27 +00:00
|
|
|
printf("Failure inject: %u percent\n", g_fail_percent_goal);
|
2020-04-28 22:23:19 +00:00
|
|
|
}
|
2020-02-13 22:13:53 +00:00
|
|
|
printf("Transfer size: %u bytes\n", g_xfer_size_bytes);
|
|
|
|
printf("Queue depth: %u\n", g_queue_depth);
|
2021-02-09 00:00:37 +00:00
|
|
|
printf("# threads/core: %u\n", g_threads_per_core);
|
2020-02-13 22:13:53 +00:00
|
|
|
printf("Run time: %u seconds\n", g_time_in_sec);
|
2020-12-08 17:12:31 +00:00
|
|
|
if (g_ops_per_batch > 0) {
|
|
|
|
printf("Batching: %u operations\n", g_ops_per_batch);
|
|
|
|
} else {
|
|
|
|
printf("Batching: Disabled\n");
|
|
|
|
}
|
2020-02-13 22:13:53 +00:00
|
|
|
printf("Verify: %s\n\n", g_verify ? "Yes" : "No");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
usage(void)
|
|
|
|
{
|
|
|
|
printf("accel_perf options:\n");
|
|
|
|
printf("\t[-h help message]\n");
|
2020-12-08 17:12:31 +00:00
|
|
|
printf("\t[-q queue depth per core]\n");
|
2020-12-21 12:17:06 +00:00
|
|
|
printf("\t[-C for crc32c workload, use this value to configre the io vector size to test (default 1)\n");
|
2021-02-09 00:00:37 +00:00
|
|
|
printf("\t[-T number of threads per core\n");
|
2020-12-21 12:17:06 +00:00
|
|
|
printf("\t[-n number of channels]\n");
|
2020-02-13 22:13:53 +00:00
|
|
|
printf("\t[-o transfer size in bytes]\n");
|
|
|
|
printf("\t[-t time in seconds]\n");
|
2020-04-30 22:09:17 +00:00
|
|
|
printf("\t[-w workload type must be one of these: copy, fill, crc32c, compare, dualcast\n");
|
2020-04-28 22:23:19 +00:00
|
|
|
printf("\t[-s for crc32c workload, use this seed value (default 0)\n");
|
2020-04-29 22:57:06 +00:00
|
|
|
printf("\t[-P for compare workload, percentage of operations that should miscompare (percent, default 0)\n");
|
2020-05-01 22:04:27 +00:00
|
|
|
printf("\t[-f for fill workload, use this BYTE value (default 255)\n");
|
2020-04-07 16:16:13 +00:00
|
|
|
printf("\t[-y verify result if this switch is on]\n");
|
2020-12-08 17:12:31 +00:00
|
|
|
printf("\t[-b batch this number of operations at a time (default 0 = disabled)]\n");
|
2020-02-13 22:13:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
parse_args(int argc, char *argv)
|
|
|
|
{
|
|
|
|
switch (argc) {
|
2020-12-08 17:12:31 +00:00
|
|
|
case 'b':
|
|
|
|
g_ops_per_batch = spdk_strtol(optarg, 10);
|
|
|
|
break;
|
2020-12-21 12:17:06 +00:00
|
|
|
case 'C':
|
|
|
|
g_crc32c_chained_count = spdk_strtol(optarg, 10);
|
|
|
|
break;
|
2020-05-01 22:04:27 +00:00
|
|
|
case 'f':
|
|
|
|
g_fill_pattern = (uint8_t)spdk_strtol(optarg, 10);
|
|
|
|
break;
|
2021-02-09 00:00:37 +00:00
|
|
|
case 'T':
|
|
|
|
g_threads_per_core = spdk_strtol(optarg, 10);
|
|
|
|
break;
|
2020-02-13 22:13:53 +00:00
|
|
|
case 'o':
|
|
|
|
g_xfer_size_bytes = spdk_strtol(optarg, 10);
|
|
|
|
break;
|
2020-04-29 22:57:06 +00:00
|
|
|
case 'P':
|
|
|
|
g_fail_percent_goal = spdk_strtol(optarg, 10);
|
|
|
|
break;
|
2020-02-13 22:13:53 +00:00
|
|
|
case 'q':
|
|
|
|
g_queue_depth = spdk_strtol(optarg, 10);
|
|
|
|
break;
|
2020-04-28 22:23:19 +00:00
|
|
|
case 's':
|
|
|
|
g_crc32c_seed = spdk_strtol(optarg, 10);
|
|
|
|
break;
|
2020-02-13 22:13:53 +00:00
|
|
|
case 't':
|
|
|
|
g_time_in_sec = spdk_strtol(optarg, 10);
|
|
|
|
break;
|
|
|
|
case 'y':
|
|
|
|
g_verify = true;
|
|
|
|
break;
|
2020-04-07 16:16:13 +00:00
|
|
|
case 'w':
|
|
|
|
g_workload_type = optarg;
|
2020-04-24 16:50:46 +00:00
|
|
|
if (!strcmp(g_workload_type, "copy")) {
|
|
|
|
g_workload_selection = ACCEL_COPY;
|
|
|
|
} else if (!strcmp(g_workload_type, "fill")) {
|
|
|
|
g_workload_selection = ACCEL_FILL;
|
2020-04-28 22:23:19 +00:00
|
|
|
} else if (!strcmp(g_workload_type, "crc32c")) {
|
|
|
|
g_workload_selection = ACCEL_CRC32C;
|
2020-04-29 22:57:06 +00:00
|
|
|
} else if (!strcmp(g_workload_type, "compare")) {
|
|
|
|
g_workload_selection = ACCEL_COMPARE;
|
2020-04-30 22:09:17 +00:00
|
|
|
} else if (!strcmp(g_workload_type, "dualcast")) {
|
|
|
|
g_workload_selection = ACCEL_DUALCAST;
|
2020-04-24 16:50:46 +00:00
|
|
|
}
|
2020-04-07 16:16:13 +00:00
|
|
|
break;
|
2020-02-13 22:13:53 +00:00
|
|
|
default:
|
|
|
|
usage();
|
|
|
|
return 1;
|
|
|
|
}
|
2020-12-21 12:17:06 +00:00
|
|
|
|
2020-02-13 22:13:53 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-02-07 18:46:50 +00:00
|
|
|
static int dump_result(void);
|
2020-02-13 22:13:53 +00:00
|
|
|
static void
|
|
|
|
unregister_worker(void *arg1)
|
|
|
|
{
|
|
|
|
struct worker_thread *worker = arg1;
|
|
|
|
|
2020-12-08 16:59:17 +00:00
|
|
|
free(worker->task_base);
|
2020-12-08 17:12:31 +00:00
|
|
|
free(worker->batch_base);
|
2020-02-13 22:13:53 +00:00
|
|
|
spdk_put_io_channel(worker->ch);
|
|
|
|
pthread_mutex_lock(&g_workers_lock);
|
|
|
|
assert(g_num_workers >= 1);
|
|
|
|
if (--g_num_workers == 0) {
|
|
|
|
pthread_mutex_unlock(&g_workers_lock);
|
2021-02-08 17:19:29 +00:00
|
|
|
g_rc = dump_result();
|
2020-02-13 22:13:53 +00:00
|
|
|
spdk_app_stop(0);
|
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&g_workers_lock);
|
|
|
|
}
|
|
|
|
|
2020-12-08 19:14:10 +00:00
|
|
|
static int
|
|
|
|
_get_task_data_bufs(struct ap_task *task)
|
|
|
|
{
|
|
|
|
uint32_t align = 0;
|
2020-12-21 12:17:06 +00:00
|
|
|
uint32_t i = 0;
|
2020-12-08 19:14:10 +00:00
|
|
|
|
|
|
|
/* For dualcast, the DSA HW requires 4K alignment on destination addresses but
|
|
|
|
* we do this for all engines to keep it simple.
|
|
|
|
*/
|
|
|
|
if (g_workload_selection == ACCEL_DUALCAST) {
|
|
|
|
align = ALIGN_4K;
|
|
|
|
}
|
|
|
|
|
2020-12-21 12:17:06 +00:00
|
|
|
if (g_workload_selection == ACCEL_CRC32C) {
|
|
|
|
assert(g_crc32c_chained_count > 0);
|
|
|
|
task->iov_cnt = g_crc32c_chained_count;
|
|
|
|
task->iovs = calloc(task->iov_cnt, sizeof(struct iovec));
|
|
|
|
if (!task->iovs) {
|
|
|
|
fprintf(stderr, "cannot allocated task->iovs fot task=%p\n", task);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < task->iov_cnt; i++) {
|
|
|
|
task->iovs[i].iov_base = spdk_dma_zmalloc(g_xfer_size_bytes, 0, NULL);
|
|
|
|
if (task->iovs[i].iov_base == NULL) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
memset(task->iovs[i].iov_base, DATA_PATTERN, g_xfer_size_bytes);
|
|
|
|
task->iovs[i].iov_len = g_xfer_size_bytes;
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
task->src = spdk_dma_zmalloc(g_xfer_size_bytes, 0, NULL);
|
|
|
|
if (task->src == NULL) {
|
|
|
|
fprintf(stderr, "Unable to alloc src buffer\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* For fill, set the entire src buffer so we can check if verify is enabled. */
|
|
|
|
if (g_workload_selection == ACCEL_FILL) {
|
|
|
|
memset(task->src, g_fill_pattern, g_xfer_size_bytes);
|
|
|
|
} else {
|
|
|
|
memset(task->src, DATA_PATTERN, g_xfer_size_bytes);
|
|
|
|
}
|
2020-12-08 19:14:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
task->dst = spdk_dma_zmalloc(g_xfer_size_bytes, align, NULL);
|
|
|
|
if (task->dst == NULL) {
|
|
|
|
fprintf(stderr, "Unable to alloc dst buffer\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* For compare we want the buffers to match, otherwise not. */
|
|
|
|
if (g_workload_selection == ACCEL_COMPARE) {
|
|
|
|
memset(task->dst, DATA_PATTERN, g_xfer_size_bytes);
|
|
|
|
} else {
|
|
|
|
memset(task->dst, ~DATA_PATTERN, g_xfer_size_bytes);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (g_workload_selection == ACCEL_DUALCAST) {
|
|
|
|
task->dst2 = spdk_dma_zmalloc(g_xfer_size_bytes, align, NULL);
|
|
|
|
if (task->dst2 == NULL) {
|
|
|
|
fprintf(stderr, "Unable to alloc dst buffer\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
memset(task->dst2, ~DATA_PATTERN, g_xfer_size_bytes);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-12-08 16:59:17 +00:00
|
|
|
inline static struct ap_task *
|
|
|
|
_get_task(struct worker_thread *worker)
|
|
|
|
{
|
|
|
|
struct ap_task *task;
|
|
|
|
|
|
|
|
if (!TAILQ_EMPTY(&worker->tasks_pool)) {
|
|
|
|
task = TAILQ_FIRST(&worker->tasks_pool);
|
|
|
|
TAILQ_REMOVE(&worker->tasks_pool, task, link);
|
|
|
|
} else {
|
|
|
|
fprintf(stderr, "Unable to get ap_task\n");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
task->worker = worker;
|
|
|
|
task->worker->current_queue_depth++;
|
|
|
|
return task;
|
|
|
|
}
|
|
|
|
|
2020-12-08 17:12:31 +00:00
|
|
|
/* Submit one operation using the same ap task that just completed. */
|
2020-02-13 22:13:53 +00:00
|
|
|
static void
|
2020-12-08 16:59:17 +00:00
|
|
|
_submit_single(struct worker_thread *worker, struct ap_task *task)
|
2020-02-13 22:13:53 +00:00
|
|
|
{
|
2020-04-29 22:57:06 +00:00
|
|
|
int random_num;
|
2020-06-12 17:58:31 +00:00
|
|
|
int rc = 0;
|
2020-02-13 22:13:53 +00:00
|
|
|
|
|
|
|
assert(worker);
|
|
|
|
|
2020-04-28 22:23:19 +00:00
|
|
|
switch (g_workload_selection) {
|
|
|
|
case ACCEL_COPY:
|
2020-07-03 14:10:54 +00:00
|
|
|
rc = spdk_accel_submit_copy(worker->ch, task->dst, task->src,
|
|
|
|
g_xfer_size_bytes, accel_done, task);
|
2020-04-28 22:23:19 +00:00
|
|
|
break;
|
|
|
|
case ACCEL_FILL:
|
2020-04-07 16:16:13 +00:00
|
|
|
/* For fill use the first byte of the task->dst buffer */
|
2020-07-03 14:08:47 +00:00
|
|
|
rc = spdk_accel_submit_fill(worker->ch, task->dst, *(uint8_t *)task->src,
|
2020-07-03 14:10:54 +00:00
|
|
|
g_xfer_size_bytes, accel_done, task);
|
2020-04-28 22:23:19 +00:00
|
|
|
break;
|
|
|
|
case ACCEL_CRC32C:
|
2021-01-18 16:07:49 +00:00
|
|
|
rc = spdk_accel_submit_crc32cv(worker->ch, (uint32_t *)task->dst,
|
2020-12-21 12:17:06 +00:00
|
|
|
task->iovs, task->iov_cnt, g_crc32c_seed,
|
2021-01-18 16:07:49 +00:00
|
|
|
accel_done, task);
|
2020-04-28 22:23:19 +00:00
|
|
|
break;
|
2020-04-29 22:57:06 +00:00
|
|
|
case ACCEL_COMPARE:
|
|
|
|
random_num = rand() % 100;
|
|
|
|
if (random_num < g_fail_percent_goal) {
|
|
|
|
task->expected_status = -EILSEQ;
|
|
|
|
*(uint8_t *)task->dst = ~DATA_PATTERN;
|
|
|
|
} else {
|
|
|
|
task->expected_status = 0;
|
|
|
|
*(uint8_t *)task->dst = DATA_PATTERN;
|
|
|
|
}
|
2020-07-03 14:08:47 +00:00
|
|
|
rc = spdk_accel_submit_compare(worker->ch, task->dst, task->src,
|
2020-07-03 14:10:54 +00:00
|
|
|
g_xfer_size_bytes, accel_done, task);
|
2020-04-29 22:57:06 +00:00
|
|
|
break;
|
2020-04-30 22:09:17 +00:00
|
|
|
case ACCEL_DUALCAST:
|
2020-07-03 14:08:47 +00:00
|
|
|
rc = spdk_accel_submit_dualcast(worker->ch, task->dst, task->dst2,
|
2020-07-03 14:10:54 +00:00
|
|
|
task->src, g_xfer_size_bytes, accel_done, task);
|
2020-04-30 22:09:17 +00:00
|
|
|
break;
|
2020-04-28 22:23:19 +00:00
|
|
|
default:
|
2020-04-07 16:16:13 +00:00
|
|
|
assert(false);
|
2020-04-28 22:23:19 +00:00
|
|
|
break;
|
|
|
|
|
2020-04-07 16:16:13 +00:00
|
|
|
}
|
2020-06-12 17:58:31 +00:00
|
|
|
|
|
|
|
if (rc) {
|
2020-07-03 14:10:54 +00:00
|
|
|
accel_done(task, rc);
|
2020-06-12 17:58:31 +00:00
|
|
|
}
|
2020-02-13 22:13:53 +00:00
|
|
|
}
|
|
|
|
|
2020-12-08 19:27:34 +00:00
|
|
|
static int
|
2020-12-08 17:12:31 +00:00
|
|
|
_batch_prep_cmd(struct worker_thread *worker, struct ap_task *task,
|
|
|
|
struct accel_batch *worker_batch)
|
2020-12-08 19:27:34 +00:00
|
|
|
{
|
2020-12-08 17:12:31 +00:00
|
|
|
struct spdk_accel_batch *batch = worker_batch->batch;
|
2020-12-08 19:27:34 +00:00
|
|
|
int rc = 0;
|
|
|
|
|
2020-12-08 17:12:31 +00:00
|
|
|
worker_batch->cmd_count++;
|
|
|
|
assert(worker_batch->cmd_count <= g_ops_per_batch);
|
|
|
|
|
2020-12-08 19:27:34 +00:00
|
|
|
switch (g_workload_selection) {
|
|
|
|
case ACCEL_COPY:
|
|
|
|
rc = spdk_accel_batch_prep_copy(worker->ch, batch, task->dst,
|
|
|
|
task->src, g_xfer_size_bytes, accel_done, task);
|
|
|
|
break;
|
|
|
|
case ACCEL_DUALCAST:
|
|
|
|
rc = spdk_accel_batch_prep_dualcast(worker->ch, batch, task->dst, task->dst2,
|
|
|
|
task->src, g_xfer_size_bytes, accel_done, task);
|
|
|
|
break;
|
|
|
|
case ACCEL_COMPARE:
|
|
|
|
rc = spdk_accel_batch_prep_compare(worker->ch, batch, task->dst, task->src,
|
|
|
|
g_xfer_size_bytes, accel_done, task);
|
|
|
|
break;
|
|
|
|
case ACCEL_FILL:
|
|
|
|
rc = spdk_accel_batch_prep_fill(worker->ch, batch, task->dst,
|
|
|
|
*(uint8_t *)task->src,
|
|
|
|
g_xfer_size_bytes, accel_done, task);
|
|
|
|
break;
|
|
|
|
case ACCEL_CRC32C:
|
2021-01-18 16:07:49 +00:00
|
|
|
rc = spdk_accel_batch_prep_crc32cv(worker->ch, batch, (uint32_t *)task->dst,
|
2020-12-21 12:17:06 +00:00
|
|
|
task->iovs, task->iov_cnt, g_crc32c_seed, accel_done, task);
|
2020-12-08 19:27:34 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
assert(false);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2020-12-08 16:59:17 +00:00
|
|
|
static void
|
2021-01-28 11:40:37 +00:00
|
|
|
_free_task_buffers(struct ap_task *task)
|
2020-12-08 16:59:17 +00:00
|
|
|
{
|
2020-12-21 12:17:06 +00:00
|
|
|
uint32_t i;
|
|
|
|
|
|
|
|
if (g_workload_selection == ACCEL_CRC32C) {
|
|
|
|
if (task->iovs) {
|
|
|
|
for (i = 0; i < task->iov_cnt; i++) {
|
|
|
|
if (task->iovs[i].iov_base) {
|
|
|
|
spdk_dma_free(task->iovs[i].iov_base);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
free(task->iovs);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
spdk_dma_free(task->src);
|
|
|
|
}
|
|
|
|
|
2020-12-08 16:59:17 +00:00
|
|
|
spdk_dma_free(task->dst);
|
|
|
|
if (g_workload_selection == ACCEL_DUALCAST) {
|
|
|
|
spdk_dma_free(task->dst2);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-08 17:12:31 +00:00
|
|
|
static void _batch_done(void *cb_arg);
|
2020-12-08 19:27:34 +00:00
|
|
|
static void
|
2020-12-08 17:12:31 +00:00
|
|
|
_build_batch(struct worker_thread *worker, struct ap_task *task)
|
2020-12-08 19:27:34 +00:00
|
|
|
{
|
2020-12-08 17:12:31 +00:00
|
|
|
struct accel_batch *worker_batch = NULL;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
assert(!TAILQ_EMPTY(&worker->in_prep_batches));
|
|
|
|
|
|
|
|
worker_batch = TAILQ_FIRST(&worker->in_prep_batches);
|
|
|
|
|
|
|
|
/* If an accel batch hasn't been created yet do so now. */
|
|
|
|
if (worker_batch->batch == NULL) {
|
|
|
|
worker_batch->batch = spdk_accel_batch_create(worker->ch);
|
|
|
|
if (worker_batch->batch == NULL) {
|
|
|
|
fprintf(stderr, "error unable to create new batch\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Prep the command re-using the last completed command's task */
|
|
|
|
rc = _batch_prep_cmd(worker, task, worker_batch);
|
|
|
|
if (rc) {
|
|
|
|
fprintf(stderr, "error preping command for batch\n");
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If this batch is full move it to the to_submit list so it gets
|
|
|
|
* submitted as batches complete.
|
|
|
|
*/
|
|
|
|
if (worker_batch->cmd_count == g_ops_per_batch) {
|
|
|
|
TAILQ_REMOVE(&worker->in_prep_batches, worker_batch, link);
|
|
|
|
TAILQ_INSERT_TAIL(&worker->to_submit_batches, worker_batch, link);
|
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
|
|
|
error:
|
|
|
|
spdk_accel_batch_cancel(worker->ch, worker_batch->batch);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
static void batch_done(void *cb_arg, int status);
|
|
|
|
static void
|
|
|
|
_drain_batch(struct worker_thread *worker)
|
|
|
|
{
|
|
|
|
struct accel_batch *worker_batch, *tmp;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
/* submit any batches that were being built up. */
|
|
|
|
TAILQ_FOREACH_SAFE(worker_batch, &worker->in_prep_batches, link, tmp) {
|
|
|
|
if (worker_batch->cmd_count == 0) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
worker->current_queue_depth += worker_batch->cmd_count + 1;
|
|
|
|
|
|
|
|
TAILQ_REMOVE(&worker->in_prep_batches, worker_batch, link);
|
|
|
|
TAILQ_INSERT_TAIL(&worker->in_use_batches, worker_batch, link);
|
|
|
|
rc = spdk_accel_batch_submit(worker->ch, worker_batch->batch, batch_done, worker_batch);
|
|
|
|
if (rc == 0) {
|
|
|
|
worker_batch->cmd_count = 0;
|
|
|
|
} else {
|
|
|
|
fprintf(stderr, "error sending final batch\n");
|
|
|
|
worker->current_queue_depth -= worker_batch->cmd_count + 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
_batch_done(void *cb_arg)
|
|
|
|
{
|
|
|
|
struct accel_batch *worker_batch = (struct accel_batch *)cb_arg;
|
|
|
|
struct worker_thread *worker = worker_batch->worker;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
assert(TAILQ_EMPTY(&worker->in_use_batches) == 0);
|
|
|
|
|
|
|
|
if (worker_batch->status) {
|
|
|
|
SPDK_ERRLOG("error %d\n", worker_batch->status);
|
|
|
|
}
|
2020-12-08 19:27:34 +00:00
|
|
|
|
|
|
|
worker->current_queue_depth--;
|
2020-12-08 17:12:31 +00:00
|
|
|
TAILQ_REMOVE(&worker->in_use_batches, worker_batch, link);
|
|
|
|
TAILQ_INSERT_TAIL(&worker->in_prep_batches, worker_batch, link);
|
|
|
|
worker_batch->batch = NULL;
|
|
|
|
worker_batch->cmd_count = 0;
|
|
|
|
|
|
|
|
if (!worker->is_draining) {
|
|
|
|
worker_batch = TAILQ_FIRST(&worker->to_submit_batches);
|
|
|
|
if (worker_batch != NULL) {
|
|
|
|
|
|
|
|
assert(worker_batch->cmd_count == g_ops_per_batch);
|
|
|
|
|
|
|
|
/* Add one for the batch command itself. */
|
|
|
|
worker->current_queue_depth += g_ops_per_batch + 1;
|
|
|
|
TAILQ_REMOVE(&worker->to_submit_batches, worker_batch, link);
|
|
|
|
TAILQ_INSERT_TAIL(&worker->in_use_batches, worker_batch, link);
|
|
|
|
|
|
|
|
rc = spdk_accel_batch_submit(worker->ch, worker_batch->batch, batch_done, worker_batch);
|
|
|
|
if (rc) {
|
|
|
|
fprintf(stderr, "error ending batch\n");
|
|
|
|
worker->current_queue_depth -= g_ops_per_batch + 1;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
_drain_batch(worker);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
batch_done(void *cb_arg, int status)
|
|
|
|
{
|
|
|
|
struct accel_batch *worker_batch = (struct accel_batch *)cb_arg;
|
|
|
|
|
|
|
|
assert(worker_batch->worker);
|
|
|
|
|
|
|
|
worker_batch->status = status;
|
|
|
|
spdk_thread_send_msg(worker_batch->worker->thread, _batch_done, worker_batch);
|
2020-12-08 19:27:34 +00:00
|
|
|
}
|
|
|
|
|
2020-12-21 12:17:06 +00:00
|
|
|
static uint32_t
|
|
|
|
_update_crc32c_iov(struct iovec *iov, int iovcnt, uint32_t crc32c)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < iovcnt; i++) {
|
|
|
|
assert(iov[i].iov_base != NULL);
|
|
|
|
assert(iov[i].iov_len != 0);
|
|
|
|
crc32c = spdk_crc32c_update(iov[i].iov_base, iov[i].iov_len, crc32c);
|
|
|
|
|
|
|
|
}
|
|
|
|
return crc32c;
|
|
|
|
}
|
|
|
|
|
2020-02-13 22:13:53 +00:00
|
|
|
static void
|
|
|
|
_accel_done(void *arg1)
|
|
|
|
{
|
|
|
|
struct ap_task *task = arg1;
|
|
|
|
struct worker_thread *worker = task->worker;
|
2020-04-28 22:23:19 +00:00
|
|
|
uint32_t sw_crc32c;
|
2020-02-13 22:13:53 +00:00
|
|
|
|
|
|
|
assert(worker);
|
|
|
|
assert(worker->current_queue_depth > 0);
|
|
|
|
|
2020-04-29 22:57:06 +00:00
|
|
|
if (g_verify && task->status == 0) {
|
|
|
|
switch (g_workload_selection) {
|
|
|
|
case ACCEL_CRC32C:
|
2020-12-21 12:17:06 +00:00
|
|
|
sw_crc32c = _update_crc32c_iov(task->iovs, task->iov_cnt, ~g_crc32c_seed);
|
2020-04-28 22:23:19 +00:00
|
|
|
if (*(uint32_t *)task->dst != sw_crc32c) {
|
|
|
|
SPDK_NOTICELOG("CRC-32C miscompare\n");
|
|
|
|
worker->xfer_failed++;
|
|
|
|
}
|
2020-04-29 22:57:06 +00:00
|
|
|
break;
|
|
|
|
case ACCEL_COPY:
|
|
|
|
if (memcmp(task->src, task->dst, g_xfer_size_bytes)) {
|
|
|
|
SPDK_NOTICELOG("Data miscompare\n");
|
|
|
|
worker->xfer_failed++;
|
|
|
|
}
|
|
|
|
break;
|
2020-04-30 22:09:17 +00:00
|
|
|
case ACCEL_DUALCAST:
|
|
|
|
if (memcmp(task->src, task->dst, g_xfer_size_bytes)) {
|
|
|
|
SPDK_NOTICELOG("Data miscompare, first destination\n");
|
|
|
|
worker->xfer_failed++;
|
|
|
|
}
|
|
|
|
if (memcmp(task->src, task->dst2, g_xfer_size_bytes)) {
|
|
|
|
SPDK_NOTICELOG("Data miscompare, second destination\n");
|
|
|
|
worker->xfer_failed++;
|
|
|
|
}
|
|
|
|
break;
|
2020-06-18 19:56:01 +00:00
|
|
|
case ACCEL_FILL:
|
|
|
|
if (memcmp(task->dst, task->src, g_xfer_size_bytes)) {
|
|
|
|
SPDK_NOTICELOG("Data miscompare\n");
|
|
|
|
worker->xfer_failed++;
|
|
|
|
}
|
|
|
|
break;
|
2020-06-18 22:46:27 +00:00
|
|
|
case ACCEL_COMPARE:
|
|
|
|
break;
|
2020-04-29 22:57:06 +00:00
|
|
|
default:
|
|
|
|
assert(false);
|
|
|
|
break;
|
2020-02-13 22:13:53 +00:00
|
|
|
}
|
|
|
|
}
|
2020-04-29 22:57:06 +00:00
|
|
|
|
|
|
|
if (task->expected_status == -EILSEQ) {
|
|
|
|
assert(task->status != 0);
|
|
|
|
worker->injected_miscompares++;
|
|
|
|
} else if (task->status) {
|
2020-12-08 17:12:31 +00:00
|
|
|
/* Expected to pass but the accel engine reported an error (ex: COMPARE operation). */
|
2020-04-29 22:57:06 +00:00
|
|
|
worker->xfer_failed++;
|
|
|
|
}
|
|
|
|
|
2020-02-13 22:13:53 +00:00
|
|
|
worker->xfer_completed++;
|
|
|
|
worker->current_queue_depth--;
|
|
|
|
|
2020-06-12 17:58:31 +00:00
|
|
|
if (!worker->is_draining) {
|
2020-12-08 17:12:31 +00:00
|
|
|
if (g_ops_per_batch == 0) {
|
|
|
|
_submit_single(worker, task);
|
|
|
|
worker->current_queue_depth++;
|
|
|
|
} else {
|
|
|
|
_build_batch(worker, task);
|
|
|
|
}
|
|
|
|
} else if (g_ops_per_batch > 0) {
|
|
|
|
_drain_batch(worker);
|
2021-01-26 20:06:25 +00:00
|
|
|
} else {
|
|
|
|
TAILQ_INSERT_TAIL(&worker->tasks_pool, task, link);
|
2020-02-13 22:13:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
dump_result(void)
|
|
|
|
{
|
|
|
|
uint64_t total_completed = 0;
|
|
|
|
uint64_t total_failed = 0;
|
2020-04-29 22:57:06 +00:00
|
|
|
uint64_t total_miscompared = 0;
|
2020-02-13 22:13:53 +00:00
|
|
|
uint64_t total_xfer_per_sec, total_bw_in_MiBps;
|
|
|
|
struct worker_thread *worker = g_workers;
|
|
|
|
|
2021-02-09 00:00:37 +00:00
|
|
|
printf("\nCore,Thread Transfers Bandwidth Failed Miscompares\n");
|
|
|
|
printf("------------------------------------------------------------------------\n");
|
2020-02-13 22:13:53 +00:00
|
|
|
while (worker != NULL) {
|
|
|
|
|
|
|
|
uint64_t xfer_per_sec = worker->xfer_completed / g_time_in_sec;
|
|
|
|
uint64_t bw_in_MiBps = (worker->xfer_completed * g_xfer_size_bytes) /
|
|
|
|
(g_time_in_sec * 1024 * 1024);
|
|
|
|
|
|
|
|
total_completed += worker->xfer_completed;
|
|
|
|
total_failed += worker->xfer_failed;
|
2020-04-29 22:57:06 +00:00
|
|
|
total_miscompared += worker->injected_miscompares;
|
2020-02-13 22:13:53 +00:00
|
|
|
|
|
|
|
if (xfer_per_sec) {
|
2021-02-09 00:00:37 +00:00
|
|
|
printf("%u,%u%17" PRIu64 "/s%9" PRIu64 " MiB/s%7" PRIu64 " %11" PRIu64 "\n",
|
|
|
|
worker->display.core, worker->display.thread, xfer_per_sec,
|
2020-04-29 22:57:06 +00:00
|
|
|
bw_in_MiBps, worker->xfer_failed, worker->injected_miscompares);
|
2020-02-13 22:13:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
worker = worker->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
total_xfer_per_sec = total_completed / g_time_in_sec;
|
|
|
|
total_bw_in_MiBps = (total_completed * g_xfer_size_bytes) /
|
|
|
|
(g_time_in_sec * 1024 * 1024);
|
|
|
|
|
2021-02-09 00:00:37 +00:00
|
|
|
printf("=========================================================================\n");
|
|
|
|
printf("Total:%15" PRIu64 "/s%9" PRIu64 " MiB/s%6" PRIu64 " %11" PRIu64"\n\n",
|
2020-04-29 22:57:06 +00:00
|
|
|
total_xfer_per_sec, total_bw_in_MiBps, total_failed, total_miscompared);
|
2020-02-13 22:13:53 +00:00
|
|
|
|
|
|
|
return total_failed ? 1 : 0;
|
|
|
|
}
|
|
|
|
|
2021-01-28 11:40:37 +00:00
|
|
|
static inline void
|
|
|
|
_free_task_buffers_in_pool(struct worker_thread *worker)
|
|
|
|
{
|
|
|
|
struct ap_task *task;
|
|
|
|
|
|
|
|
assert(worker);
|
|
|
|
while ((task = TAILQ_FIRST(&worker->tasks_pool))) {
|
|
|
|
TAILQ_REMOVE(&worker->tasks_pool, task, link);
|
|
|
|
_free_task_buffers(task);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-13 22:13:53 +00:00
|
|
|
static int
|
|
|
|
_check_draining(void *arg)
|
|
|
|
{
|
|
|
|
struct worker_thread *worker = arg;
|
|
|
|
|
|
|
|
assert(worker);
|
|
|
|
|
|
|
|
if (worker->current_queue_depth == 0) {
|
2021-01-28 11:40:37 +00:00
|
|
|
_free_task_buffers_in_pool(worker);
|
2020-02-13 22:13:53 +00:00
|
|
|
spdk_poller_unregister(&worker->is_draining_poller);
|
|
|
|
unregister_worker(worker);
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
_worker_stop(void *arg)
|
|
|
|
{
|
|
|
|
struct worker_thread *worker = arg;
|
|
|
|
|
|
|
|
assert(worker);
|
|
|
|
|
|
|
|
spdk_poller_unregister(&worker->stop_poller);
|
|
|
|
|
|
|
|
/* now let the worker drain and check it's outstanding IO with a poller */
|
|
|
|
worker->is_draining = true;
|
2020-04-14 06:49:46 +00:00
|
|
|
worker->is_draining_poller = SPDK_POLLER_REGISTER(_check_draining, worker, 0);
|
2020-02-13 22:13:53 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
_init_thread(void *arg1)
|
|
|
|
{
|
|
|
|
struct worker_thread *worker;
|
|
|
|
struct ap_task *task;
|
2020-12-08 17:12:31 +00:00
|
|
|
int i, rc, num_batches;
|
|
|
|
int max_per_batch;
|
2020-06-02 18:27:47 +00:00
|
|
|
int remaining = g_queue_depth;
|
2020-12-08 17:12:31 +00:00
|
|
|
int num_tasks = g_queue_depth;
|
|
|
|
struct accel_batch *tmp;
|
|
|
|
struct accel_batch *worker_batch = NULL;
|
2021-02-09 00:00:37 +00:00
|
|
|
struct display_info *display = arg1;
|
2020-02-13 22:13:53 +00:00
|
|
|
|
|
|
|
worker = calloc(1, sizeof(*worker));
|
|
|
|
if (worker == NULL) {
|
|
|
|
fprintf(stderr, "Unable to allocate worker\n");
|
2021-02-09 00:00:37 +00:00
|
|
|
free(display);
|
2020-02-13 22:13:53 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-02-09 00:00:37 +00:00
|
|
|
worker->display.core = display->core;
|
|
|
|
worker->display.thread = display->thread;
|
|
|
|
free(display);
|
2020-02-13 22:13:53 +00:00
|
|
|
worker->core = spdk_env_get_current_core();
|
|
|
|
worker->thread = spdk_get_thread();
|
2021-02-07 18:46:50 +00:00
|
|
|
pthread_mutex_lock(&g_workers_lock);
|
|
|
|
g_num_workers++;
|
2020-02-13 22:13:53 +00:00
|
|
|
worker->next = g_workers;
|
2021-02-07 18:46:50 +00:00
|
|
|
g_workers = worker;
|
|
|
|
pthread_mutex_unlock(&g_workers_lock);
|
2020-02-13 22:13:53 +00:00
|
|
|
worker->ch = spdk_accel_engine_get_io_channel();
|
2020-04-29 22:57:06 +00:00
|
|
|
|
2020-12-08 16:59:17 +00:00
|
|
|
TAILQ_INIT(&worker->tasks_pool);
|
2020-12-08 17:12:31 +00:00
|
|
|
|
|
|
|
if (g_ops_per_batch > 0) {
|
|
|
|
|
|
|
|
max_per_batch = spdk_accel_batch_get_max(worker->ch);
|
|
|
|
assert(max_per_batch > 0);
|
|
|
|
|
|
|
|
if (g_ops_per_batch > max_per_batch) {
|
|
|
|
fprintf(stderr, "Reducing requested batch amount to max supported of %d\n", max_per_batch);
|
|
|
|
g_ops_per_batch = max_per_batch;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (g_ops_per_batch > g_queue_depth) {
|
|
|
|
fprintf(stderr, "Batch amount > queue depth, resetting to %d\n", g_queue_depth);
|
|
|
|
g_ops_per_batch = g_queue_depth;
|
|
|
|
}
|
|
|
|
|
|
|
|
TAILQ_INIT(&worker->in_prep_batches);
|
|
|
|
TAILQ_INIT(&worker->to_submit_batches);
|
|
|
|
TAILQ_INIT(&worker->in_use_batches);
|
|
|
|
|
|
|
|
/* A worker_batch will live on one of 3 lists:
|
|
|
|
* IN_PREP: as individual IOs complete new ones are built on on a
|
|
|
|
* worker_batch on this list until it reaches g_ops_per_batch.
|
|
|
|
* TO_SUBMIT: as batches are built up on IO completion they are moved
|
|
|
|
* to this list once they are full. This list is used in
|
|
|
|
* batch completion to start new batches.
|
|
|
|
* IN_USE: the worker_batch is outstanding and will be moved to in prep
|
|
|
|
* list when the batch is completed.
|
|
|
|
*
|
|
|
|
* So we need enough to cover Q depth loading and then one to replace
|
|
|
|
* each one of those and for when everything is outstanding there needs
|
|
|
|
* to be one extra batch to build up while the last batch is completing
|
|
|
|
* IO but before it's completed the batch command.
|
|
|
|
*/
|
|
|
|
num_batches = (g_queue_depth / g_ops_per_batch * 2) + 1;
|
|
|
|
worker->batch_base = calloc(num_batches, sizeof(struct accel_batch));
|
|
|
|
worker_batch = worker->batch_base;
|
|
|
|
for (i = 0; i < num_batches; i++) {
|
|
|
|
worker_batch->worker = worker;
|
|
|
|
TAILQ_INSERT_TAIL(&worker->in_prep_batches, worker_batch, link);
|
|
|
|
worker_batch++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-08 16:59:17 +00:00
|
|
|
worker->task_base = calloc(num_tasks, sizeof(struct ap_task));
|
|
|
|
if (worker->task_base == NULL) {
|
|
|
|
fprintf(stderr, "Could not allocate task base.\n");
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
task = worker->task_base;
|
2020-07-13 22:14:57 +00:00
|
|
|
for (i = 0; i < num_tasks; i++) {
|
2020-12-08 16:59:17 +00:00
|
|
|
TAILQ_INSERT_TAIL(&worker->tasks_pool, task, link);
|
|
|
|
if (_get_task_data_bufs(task)) {
|
|
|
|
fprintf(stderr, "Unable to get data bufs\n");
|
|
|
|
goto error;
|
2020-07-13 22:14:57 +00:00
|
|
|
}
|
2020-12-08 16:59:17 +00:00
|
|
|
task++;
|
2020-02-13 22:13:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Register a poller that will stop the worker at time elapsed */
|
2020-04-14 06:49:46 +00:00
|
|
|
worker->stop_poller = SPDK_POLLER_REGISTER(_worker_stop, worker,
|
2020-02-13 22:13:53 +00:00
|
|
|
g_time_in_sec * 1000000ULL);
|
|
|
|
|
2020-12-08 17:12:31 +00:00
|
|
|
/* If batching is enabled load up to the full Q depth before
|
|
|
|
* processing any completions, then ping pong between two batches,
|
|
|
|
* one processing and one being built up for when the other completes.
|
|
|
|
*/
|
|
|
|
if (g_ops_per_batch > 0) {
|
2020-06-02 18:27:47 +00:00
|
|
|
do {
|
2020-12-08 17:12:31 +00:00
|
|
|
worker_batch = TAILQ_FIRST(&worker->in_prep_batches);
|
|
|
|
if (worker_batch == NULL) {
|
|
|
|
goto error;
|
2020-06-02 18:27:47 +00:00
|
|
|
}
|
2020-04-29 22:57:06 +00:00
|
|
|
|
2020-12-08 17:12:31 +00:00
|
|
|
worker_batch->batch = spdk_accel_batch_create(worker->ch);
|
|
|
|
if (worker_batch->batch == NULL) {
|
|
|
|
raise(SIGINT);
|
|
|
|
break;
|
|
|
|
}
|
2020-06-02 18:27:47 +00:00
|
|
|
|
2020-12-08 17:12:31 +00:00
|
|
|
for (i = 0; i < g_ops_per_batch; i++) {
|
2020-12-08 16:59:17 +00:00
|
|
|
task = _get_task(worker);
|
|
|
|
if (task == NULL) {
|
2020-06-02 18:27:47 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2020-12-08 17:12:31 +00:00
|
|
|
rc = _batch_prep_cmd(worker, task, worker_batch);
|
2020-06-02 18:27:47 +00:00
|
|
|
if (rc) {
|
|
|
|
fprintf(stderr, "error preping command\n");
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-08 17:12:31 +00:00
|
|
|
/* for the batch operation itself. */
|
|
|
|
task->worker->current_queue_depth++;
|
|
|
|
TAILQ_REMOVE(&worker->in_prep_batches, worker_batch, link);
|
|
|
|
TAILQ_INSERT_TAIL(&worker->in_use_batches, worker_batch, link);
|
|
|
|
|
|
|
|
rc = spdk_accel_batch_submit(worker->ch, worker_batch->batch, batch_done, worker_batch);
|
2020-06-02 18:27:47 +00:00
|
|
|
if (rc) {
|
2020-12-08 17:12:31 +00:00
|
|
|
fprintf(stderr, "error ending batch\n");
|
2020-06-02 18:27:47 +00:00
|
|
|
goto error;
|
|
|
|
}
|
2020-12-08 17:12:31 +00:00
|
|
|
assert(remaining >= g_ops_per_batch);
|
|
|
|
remaining -= g_ops_per_batch;
|
|
|
|
} while (remaining > 0);
|
2020-06-02 18:27:47 +00:00
|
|
|
}
|
2020-04-30 22:09:17 +00:00
|
|
|
|
2020-12-08 17:12:31 +00:00
|
|
|
/* Submit as singles when no batching is enabled or we ran out of batches. */
|
2020-06-02 18:27:47 +00:00
|
|
|
for (i = 0; i < remaining; i++) {
|
2020-12-08 16:59:17 +00:00
|
|
|
task = _get_task(worker);
|
|
|
|
if (task == NULL) {
|
2020-06-02 18:27:47 +00:00
|
|
|
goto error;
|
2020-04-29 22:57:06 +00:00
|
|
|
}
|
|
|
|
|
2020-02-13 22:13:53 +00:00
|
|
|
_submit_single(worker, task);
|
|
|
|
}
|
2020-06-02 18:27:47 +00:00
|
|
|
return;
|
|
|
|
error:
|
2020-12-08 17:12:31 +00:00
|
|
|
if (worker_batch && worker_batch->batch) {
|
|
|
|
TAILQ_FOREACH_SAFE(worker_batch, &worker->in_use_batches, link, tmp) {
|
|
|
|
spdk_accel_batch_cancel(worker->ch, worker_batch->batch);
|
|
|
|
TAILQ_REMOVE(&worker->in_use_batches, worker_batch, link);
|
|
|
|
}
|
|
|
|
}
|
2021-01-28 11:40:37 +00:00
|
|
|
|
|
|
|
_free_task_buffers_in_pool(worker);
|
2020-12-08 17:12:31 +00:00
|
|
|
free(worker->batch_base);
|
2020-12-08 16:59:17 +00:00
|
|
|
free(worker->task_base);
|
2020-06-02 18:27:47 +00:00
|
|
|
free(worker);
|
|
|
|
spdk_app_stop(-1);
|
2020-02-13 22:13:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2020-07-03 14:10:54 +00:00
|
|
|
accel_done(void *cb_arg, int status)
|
2020-02-13 22:13:53 +00:00
|
|
|
{
|
2020-07-03 14:10:54 +00:00
|
|
|
struct ap_task *task = (struct ap_task *)cb_arg;
|
2020-02-13 22:13:53 +00:00
|
|
|
struct worker_thread *worker = task->worker;
|
|
|
|
|
|
|
|
assert(worker);
|
|
|
|
|
2020-04-29 22:57:06 +00:00
|
|
|
task->status = status;
|
2020-02-13 22:13:53 +00:00
|
|
|
spdk_thread_send_msg(worker->thread, _accel_done, task);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
accel_perf_start(void *arg1)
|
|
|
|
{
|
2020-04-24 16:50:46 +00:00
|
|
|
struct spdk_io_channel *accel_ch;
|
2021-02-07 18:46:50 +00:00
|
|
|
struct spdk_cpuset tmp_cpumask = {};
|
|
|
|
char thread_name[32];
|
|
|
|
uint32_t i;
|
2021-02-09 00:00:37 +00:00
|
|
|
int j;
|
2021-02-07 18:46:50 +00:00
|
|
|
struct spdk_thread *thread;
|
2021-02-09 00:00:37 +00:00
|
|
|
struct display_info *display;
|
2020-04-24 16:50:46 +00:00
|
|
|
|
|
|
|
accel_ch = spdk_accel_engine_get_io_channel();
|
2020-06-02 18:27:47 +00:00
|
|
|
g_capabilites = spdk_accel_get_capabilities(accel_ch);
|
2020-04-24 16:50:46 +00:00
|
|
|
spdk_put_io_channel(accel_ch);
|
|
|
|
|
2020-06-02 18:27:47 +00:00
|
|
|
if ((g_capabilites & g_workload_selection) != g_workload_selection) {
|
2020-06-17 20:28:15 +00:00
|
|
|
SPDK_WARNLOG("The selected workload is not natively supported by the current engine\n");
|
|
|
|
SPDK_WARNLOG("The software engine will be used instead.\n\n");
|
2020-04-24 16:50:46 +00:00
|
|
|
}
|
|
|
|
|
2020-02-13 22:13:53 +00:00
|
|
|
g_tsc_rate = spdk_get_ticks_hz();
|
|
|
|
g_tsc_end = spdk_get_ticks() + g_time_in_sec * g_tsc_rate;
|
|
|
|
|
|
|
|
printf("Running for %d seconds...\n", g_time_in_sec);
|
|
|
|
fflush(stdout);
|
|
|
|
|
2021-02-07 18:46:50 +00:00
|
|
|
/* Create worker threads for each core that was specified. */
|
|
|
|
SPDK_ENV_FOREACH_CORE(i) {
|
2021-02-09 00:00:37 +00:00
|
|
|
for (j = 0; j < g_threads_per_core; j++) {
|
|
|
|
snprintf(thread_name, sizeof(thread_name), "ap_worker_%u_%u", i, j);
|
|
|
|
spdk_cpuset_zero(&tmp_cpumask);
|
|
|
|
spdk_cpuset_set_cpu(&tmp_cpumask, i, true);
|
|
|
|
thread = spdk_thread_create(thread_name, &tmp_cpumask);
|
|
|
|
display = calloc(1, sizeof(*display));
|
|
|
|
if (display == NULL) {
|
|
|
|
fprintf(stderr, "Unable to allocate memory\n");
|
|
|
|
spdk_app_stop(-1);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
display->core = i;
|
|
|
|
display->thread = j;
|
|
|
|
spdk_thread_send_msg(thread, _init_thread, display);
|
|
|
|
}
|
2021-02-07 18:46:50 +00:00
|
|
|
}
|
2020-02-13 22:13:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
main(int argc, char **argv)
|
|
|
|
{
|
|
|
|
struct spdk_app_opts opts = {};
|
|
|
|
struct worker_thread *worker, *tmp;
|
|
|
|
|
|
|
|
pthread_mutex_init(&g_workers_lock, NULL);
|
2020-11-30 11:38:37 +00:00
|
|
|
spdk_app_opts_init(&opts, sizeof(opts));
|
2020-02-13 22:13:53 +00:00
|
|
|
opts.reactor_mask = "0x1";
|
2020-12-21 12:17:06 +00:00
|
|
|
if (spdk_app_parse_args(argc, argv, &opts, "C:o:q:t:yw:P:f:b:T:", NULL, parse_args,
|
2020-10-22 18:37:39 +00:00
|
|
|
usage) != SPDK_APP_PARSE_ARGS_SUCCESS) {
|
2021-02-08 17:19:29 +00:00
|
|
|
g_rc = -1;
|
2020-02-13 22:13:53 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2020-04-29 22:57:06 +00:00
|
|
|
if ((g_workload_selection != ACCEL_COPY) &&
|
|
|
|
(g_workload_selection != ACCEL_FILL) &&
|
|
|
|
(g_workload_selection != ACCEL_CRC32C) &&
|
2020-04-30 22:09:17 +00:00
|
|
|
(g_workload_selection != ACCEL_COMPARE) &&
|
|
|
|
(g_workload_selection != ACCEL_DUALCAST)) {
|
2020-04-07 16:16:13 +00:00
|
|
|
usage();
|
2021-02-08 17:19:29 +00:00
|
|
|
g_rc = -1;
|
2020-04-07 16:16:13 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2020-12-08 17:12:31 +00:00
|
|
|
if (g_ops_per_batch > 0 && (g_queue_depth % g_ops_per_batch > 0)) {
|
|
|
|
fprintf(stdout, "batch size must be a multiple of queue depth\n");
|
|
|
|
usage();
|
2021-02-08 17:19:29 +00:00
|
|
|
g_rc = -1;
|
2020-12-08 17:12:31 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2020-12-21 12:17:06 +00:00
|
|
|
if (g_workload_selection == ACCEL_CRC32C &&
|
|
|
|
g_crc32c_chained_count == 0) {
|
|
|
|
usage();
|
|
|
|
g_rc = -1;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2020-02-13 22:13:53 +00:00
|
|
|
dump_user_config(&opts);
|
2021-02-08 17:19:29 +00:00
|
|
|
g_rc = spdk_app_start(&opts, accel_perf_start, NULL);
|
|
|
|
if (g_rc) {
|
2020-02-13 22:13:53 +00:00
|
|
|
SPDK_ERRLOG("ERROR starting application\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
pthread_mutex_destroy(&g_workers_lock);
|
|
|
|
|
|
|
|
worker = g_workers;
|
|
|
|
while (worker) {
|
|
|
|
tmp = worker->next;
|
|
|
|
free(worker);
|
|
|
|
worker = tmp;
|
|
|
|
}
|
|
|
|
cleanup:
|
|
|
|
spdk_app_fini();
|
2021-02-08 17:19:29 +00:00
|
|
|
return g_rc;
|
2020-02-13 22:13:53 +00:00
|
|
|
}
|