2022-06-03 19:15:11 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
2022-11-01 20:26:26 +00:00
|
|
|
* Copyright (C) 2020 Intel Corporation.
|
2020-02-13 22:13:53 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "spdk/stdinc.h"
|
|
|
|
#include "spdk/thread.h"
|
|
|
|
#include "spdk/env.h"
|
|
|
|
#include "spdk/event.h"
|
|
|
|
#include "spdk/log.h"
|
|
|
|
#include "spdk/string.h"
|
2022-08-08 20:31:08 +00:00
|
|
|
#include "spdk/accel.h"
|
2020-04-28 22:23:19 +00:00
|
|
|
#include "spdk/crc32.h"
|
2020-07-13 22:14:57 +00:00
|
|
|
#include "spdk/util.h"
|
2022-10-05 10:42:58 +00:00
|
|
|
#include "spdk/xor.h"
|
2020-02-13 22:13:53 +00:00
|
|
|
|
2020-04-29 22:57:06 +00:00
|
|
|
#define DATA_PATTERN 0x5a
|
2020-04-30 22:09:17 +00:00
|
|
|
#define ALIGN_4K 0x1000
|
2023-01-01 16:15:34 +00:00
|
|
|
#define COMP_BUF_PAD_PERCENTAGE 1.1L
|
2020-04-29 22:57:06 +00:00
|
|
|
|
2020-02-13 22:13:53 +00:00
|
|
|
static uint64_t g_tsc_rate;
|
|
|
|
static uint64_t g_tsc_end;
|
2021-02-08 17:19:29 +00:00
|
|
|
static int g_rc;
|
2020-02-13 22:13:53 +00:00
|
|
|
static int g_xfer_size_bytes = 4096;
|
|
|
|
static int g_queue_depth = 32;
|
2021-06-21 17:38:43 +00:00
|
|
|
/* g_allocate_depth indicates how many tasks we allocate per worker. It will
|
|
|
|
* be at least as much as the queue depth.
|
|
|
|
*/
|
|
|
|
static int g_allocate_depth = 0;
|
2021-02-09 00:00:37 +00:00
|
|
|
static int g_threads_per_core = 1;
|
2020-02-13 22:13:53 +00:00
|
|
|
static int g_time_in_sec = 5;
|
2020-04-28 22:23:19 +00:00
|
|
|
static uint32_t g_crc32c_seed = 0;
|
2022-09-22 19:01:56 +00:00
|
|
|
static uint32_t g_chained_count = 1;
|
2020-04-29 22:57:06 +00:00
|
|
|
static int g_fail_percent_goal = 0;
|
2020-05-01 22:04:27 +00:00
|
|
|
static uint8_t g_fill_pattern = 255;
|
2022-10-05 10:42:58 +00:00
|
|
|
static uint32_t g_xor_src_count = 2;
|
2020-02-13 22:13:53 +00:00
|
|
|
static bool g_verify = false;
|
2020-04-07 16:16:13 +00:00
|
|
|
static const char *g_workload_type = NULL;
|
2022-03-15 17:43:07 +00:00
|
|
|
static enum accel_opcode g_workload_selection;
|
2020-02-13 22:13:53 +00:00
|
|
|
static struct worker_thread *g_workers = NULL;
|
|
|
|
static int g_num_workers = 0;
|
2022-06-22 21:35:57 +00:00
|
|
|
static char *g_cd_file_in_name = NULL;
|
2020-02-13 22:13:53 +00:00
|
|
|
static pthread_mutex_t g_workers_lock = PTHREAD_MUTEX_INITIALIZER;
|
2022-06-08 00:11:12 +00:00
|
|
|
static struct spdk_app_opts g_opts = {};
|
2020-12-08 16:32:49 +00:00
|
|
|
|
2022-06-22 21:35:57 +00:00
|
|
|
struct ap_compress_seg {
|
|
|
|
void *uncompressed_data;
|
|
|
|
uint32_t uncompressed_len;
|
|
|
|
struct iovec *uncompressed_iovs;
|
|
|
|
uint32_t uncompressed_iovcnt;
|
|
|
|
|
|
|
|
void *compressed_data;
|
|
|
|
uint32_t compressed_len;
|
2023-01-01 16:15:34 +00:00
|
|
|
uint32_t compressed_len_padded;
|
2022-06-22 21:35:57 +00:00
|
|
|
struct iovec *compressed_iovs;
|
|
|
|
uint32_t compressed_iovcnt;
|
|
|
|
|
|
|
|
STAILQ_ENTRY(ap_compress_seg) link;
|
|
|
|
};
|
|
|
|
|
|
|
|
static STAILQ_HEAD(, ap_compress_seg) g_compress_segs = STAILQ_HEAD_INITIALIZER(g_compress_segs);
|
|
|
|
|
2020-12-08 16:32:49 +00:00
|
|
|
struct worker_thread;
|
|
|
|
static void accel_done(void *ref, int status);
|
|
|
|
|
2021-02-09 00:00:37 +00:00
|
|
|
struct display_info {
|
|
|
|
int core;
|
|
|
|
int thread;
|
|
|
|
};
|
|
|
|
|
2020-12-08 16:32:49 +00:00
|
|
|
struct ap_task {
|
|
|
|
void *src;
|
2022-07-21 21:57:11 +00:00
|
|
|
struct iovec *src_iovs;
|
|
|
|
uint32_t src_iovcnt;
|
2022-10-05 10:42:58 +00:00
|
|
|
void **sources;
|
2022-09-22 19:01:56 +00:00
|
|
|
struct iovec *dst_iovs;
|
|
|
|
uint32_t dst_iovcnt;
|
2020-12-08 16:32:49 +00:00
|
|
|
void *dst;
|
|
|
|
void *dst2;
|
2022-06-22 19:53:08 +00:00
|
|
|
uint32_t crc_dst;
|
2022-06-22 21:35:57 +00:00
|
|
|
uint32_t compressed_sz;
|
|
|
|
struct ap_compress_seg *cur_seg;
|
2020-12-08 16:32:49 +00:00
|
|
|
struct worker_thread *worker;
|
|
|
|
int expected_status; /* used for the compare operation */
|
|
|
|
TAILQ_ENTRY(ap_task) link;
|
|
|
|
};
|
2020-02-13 22:13:53 +00:00
|
|
|
|
|
|
|
struct worker_thread {
|
|
|
|
struct spdk_io_channel *ch;
|
2023-04-18 08:12:16 +00:00
|
|
|
struct spdk_accel_opcode_stats stats;
|
2020-02-13 22:13:53 +00:00
|
|
|
uint64_t xfer_failed;
|
2020-04-29 22:57:06 +00:00
|
|
|
uint64_t injected_miscompares;
|
2020-02-13 22:13:53 +00:00
|
|
|
uint64_t current_queue_depth;
|
2020-12-08 16:59:17 +00:00
|
|
|
TAILQ_HEAD(, ap_task) tasks_pool;
|
2020-02-13 22:13:53 +00:00
|
|
|
struct worker_thread *next;
|
|
|
|
unsigned core;
|
|
|
|
struct spdk_thread *thread;
|
|
|
|
bool is_draining;
|
|
|
|
struct spdk_poller *is_draining_poller;
|
|
|
|
struct spdk_poller *stop_poller;
|
2020-12-08 16:59:17 +00:00
|
|
|
void *task_base;
|
2021-02-09 00:00:37 +00:00
|
|
|
struct display_info display;
|
2022-04-19 12:35:39 +00:00
|
|
|
enum accel_opcode workload;
|
2020-02-13 22:13:53 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
2022-06-08 00:11:12 +00:00
|
|
|
dump_user_config(void)
|
2020-02-13 22:13:53 +00:00
|
|
|
{
|
2022-08-08 21:43:24 +00:00
|
|
|
const char *module_name = NULL;
|
2022-06-08 00:11:12 +00:00
|
|
|
int rc;
|
|
|
|
|
2022-08-08 21:43:24 +00:00
|
|
|
rc = spdk_accel_get_opc_module_name(g_workload_selection, &module_name);
|
2022-06-08 00:11:12 +00:00
|
|
|
if (rc) {
|
2022-08-08 21:43:24 +00:00
|
|
|
printf("error getting module name (%d)\n", rc);
|
2022-06-08 00:11:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
printf("\nSPDK Configuration:\n");
|
|
|
|
printf("Core mask: %s\n\n", g_opts.reactor_mask);
|
2020-02-13 22:13:53 +00:00
|
|
|
printf("Accel Perf Configuration:\n");
|
2020-04-07 16:16:13 +00:00
|
|
|
printf("Workload Type: %s\n", g_workload_type);
|
2022-03-15 17:43:07 +00:00
|
|
|
if (g_workload_selection == ACCEL_OPC_CRC32C || g_workload_selection == ACCEL_OPC_COPY_CRC32C) {
|
2020-04-29 22:57:06 +00:00
|
|
|
printf("CRC-32C seed: %u\n", g_crc32c_seed);
|
2022-03-15 17:43:07 +00:00
|
|
|
} else if (g_workload_selection == ACCEL_OPC_FILL) {
|
2020-05-01 22:04:27 +00:00
|
|
|
printf("Fill pattern: 0x%x\n", g_fill_pattern);
|
2022-03-15 17:43:07 +00:00
|
|
|
} else if ((g_workload_selection == ACCEL_OPC_COMPARE) && g_fail_percent_goal > 0) {
|
2020-05-01 22:04:27 +00:00
|
|
|
printf("Failure inject: %u percent\n", g_fail_percent_goal);
|
2022-10-05 10:42:58 +00:00
|
|
|
} else if (g_workload_selection == ACCEL_OPC_XOR) {
|
|
|
|
printf("Source buffers: %u\n", g_xor_src_count);
|
2020-04-28 22:23:19 +00:00
|
|
|
}
|
2022-03-15 17:43:07 +00:00
|
|
|
if (g_workload_selection == ACCEL_OPC_COPY_CRC32C) {
|
2021-06-07 22:50:43 +00:00
|
|
|
printf("Vector size: %u bytes\n", g_xfer_size_bytes);
|
2022-09-22 19:01:56 +00:00
|
|
|
printf("Transfer size: %u bytes\n", g_xfer_size_bytes * g_chained_count);
|
2021-06-07 22:50:43 +00:00
|
|
|
} else {
|
|
|
|
printf("Transfer size: %u bytes\n", g_xfer_size_bytes);
|
|
|
|
}
|
2022-09-22 19:01:56 +00:00
|
|
|
printf("vector count %u\n", g_chained_count);
|
2022-08-08 21:43:24 +00:00
|
|
|
printf("Module: %s\n", module_name);
|
2022-06-22 21:35:57 +00:00
|
|
|
if (g_workload_selection == ACCEL_OPC_COMPRESS || g_workload_selection == ACCEL_OPC_DECOMPRESS) {
|
|
|
|
printf("File Name: %s\n", g_cd_file_in_name);
|
|
|
|
}
|
2020-02-13 22:13:53 +00:00
|
|
|
printf("Queue depth: %u\n", g_queue_depth);
|
2021-06-21 17:38:43 +00:00
|
|
|
printf("Allocate depth: %u\n", g_allocate_depth);
|
2021-02-09 00:00:37 +00:00
|
|
|
printf("# threads/core: %u\n", g_threads_per_core);
|
2020-02-13 22:13:53 +00:00
|
|
|
printf("Run time: %u seconds\n", g_time_in_sec);
|
|
|
|
printf("Verify: %s\n\n", g_verify ? "Yes" : "No");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
usage(void)
|
|
|
|
{
|
|
|
|
printf("accel_perf options:\n");
|
|
|
|
printf("\t[-h help message]\n");
|
2020-12-08 17:12:31 +00:00
|
|
|
printf("\t[-q queue depth per core]\n");
|
2022-09-22 19:01:56 +00:00
|
|
|
printf("\t[-C for supported workloads, use this value to configure the io vector size to test (default 1)\n");
|
2021-02-09 00:00:37 +00:00
|
|
|
printf("\t[-T number of threads per core\n");
|
2020-12-21 12:17:06 +00:00
|
|
|
printf("\t[-n number of channels]\n");
|
2022-06-22 21:35:57 +00:00
|
|
|
printf("\t[-o transfer size in bytes (default: 4KiB. For compress/decompress, 0 means the input file size)]\n");
|
2020-02-13 22:13:53 +00:00
|
|
|
printf("\t[-t time in seconds]\n");
|
2022-10-05 10:42:58 +00:00
|
|
|
printf("\t[-w workload type must be one of these: copy, fill, crc32c, copy_crc32c, compare, compress, decompress, dualcast, xor\n");
|
2022-06-22 21:35:57 +00:00
|
|
|
printf("\t[-l for compress/decompress workloads, name of uncompressed input file\n");
|
2020-04-28 22:23:19 +00:00
|
|
|
printf("\t[-s for crc32c workload, use this seed value (default 0)\n");
|
2020-04-29 22:57:06 +00:00
|
|
|
printf("\t[-P for compare workload, percentage of operations that should miscompare (percent, default 0)\n");
|
2020-05-01 22:04:27 +00:00
|
|
|
printf("\t[-f for fill workload, use this BYTE value (default 255)\n");
|
2022-10-05 10:42:58 +00:00
|
|
|
printf("\t[-x for xor workload, use this number of source buffers (default, minimum: 2)]\n");
|
2020-04-07 16:16:13 +00:00
|
|
|
printf("\t[-y verify result if this switch is on]\n");
|
2021-06-21 17:38:43 +00:00
|
|
|
printf("\t[-a tasks to allocate per core (default: same value as -q)]\n");
|
|
|
|
printf("\t\tCan be used to spread operations across a wider range of memory.\n");
|
2020-02-13 22:13:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
parse_args(int argc, char *argv)
|
|
|
|
{
|
2021-06-29 10:14:19 +00:00
|
|
|
int argval = 0;
|
2021-06-23 19:38:21 +00:00
|
|
|
|
|
|
|
switch (argc) {
|
2021-06-21 17:38:43 +00:00
|
|
|
case 'a':
|
2021-06-23 19:38:21 +00:00
|
|
|
case 'C':
|
|
|
|
case 'f':
|
|
|
|
case 'T':
|
|
|
|
case 'o':
|
|
|
|
case 'P':
|
|
|
|
case 'q':
|
|
|
|
case 's':
|
|
|
|
case 't':
|
2022-10-05 10:42:58 +00:00
|
|
|
case 'x':
|
2021-06-23 19:38:21 +00:00
|
|
|
argval = spdk_strtol(optarg, 10);
|
|
|
|
if (argval < 0) {
|
|
|
|
fprintf(stderr, "-%c option must be non-negative.\n", argc);
|
|
|
|
usage();
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
};
|
|
|
|
|
2020-02-13 22:13:53 +00:00
|
|
|
switch (argc) {
|
2021-06-21 17:38:43 +00:00
|
|
|
case 'a':
|
|
|
|
g_allocate_depth = argval;
|
|
|
|
break;
|
2020-12-21 12:17:06 +00:00
|
|
|
case 'C':
|
2022-09-22 19:01:56 +00:00
|
|
|
g_chained_count = argval;
|
2020-12-21 12:17:06 +00:00
|
|
|
break;
|
2022-06-22 21:35:57 +00:00
|
|
|
case 'l':
|
|
|
|
g_cd_file_in_name = optarg;
|
|
|
|
break;
|
2020-05-01 22:04:27 +00:00
|
|
|
case 'f':
|
2021-06-23 19:38:21 +00:00
|
|
|
g_fill_pattern = (uint8_t)argval;
|
2020-05-01 22:04:27 +00:00
|
|
|
break;
|
2021-02-09 00:00:37 +00:00
|
|
|
case 'T':
|
2021-06-23 19:38:21 +00:00
|
|
|
g_threads_per_core = argval;
|
2021-02-09 00:00:37 +00:00
|
|
|
break;
|
2020-02-13 22:13:53 +00:00
|
|
|
case 'o':
|
2021-06-23 19:38:21 +00:00
|
|
|
g_xfer_size_bytes = argval;
|
2020-02-13 22:13:53 +00:00
|
|
|
break;
|
2020-04-29 22:57:06 +00:00
|
|
|
case 'P':
|
2021-06-23 19:38:21 +00:00
|
|
|
g_fail_percent_goal = argval;
|
2020-04-29 22:57:06 +00:00
|
|
|
break;
|
2020-02-13 22:13:53 +00:00
|
|
|
case 'q':
|
2021-06-23 19:38:21 +00:00
|
|
|
g_queue_depth = argval;
|
2020-02-13 22:13:53 +00:00
|
|
|
break;
|
2020-04-28 22:23:19 +00:00
|
|
|
case 's':
|
2021-06-23 19:38:21 +00:00
|
|
|
g_crc32c_seed = argval;
|
2020-04-28 22:23:19 +00:00
|
|
|
break;
|
2020-02-13 22:13:53 +00:00
|
|
|
case 't':
|
2021-06-23 19:38:21 +00:00
|
|
|
g_time_in_sec = argval;
|
2020-02-13 22:13:53 +00:00
|
|
|
break;
|
2022-10-05 10:42:58 +00:00
|
|
|
case 'x':
|
|
|
|
g_xor_src_count = argval;
|
|
|
|
break;
|
2020-02-13 22:13:53 +00:00
|
|
|
case 'y':
|
|
|
|
g_verify = true;
|
|
|
|
break;
|
2020-04-07 16:16:13 +00:00
|
|
|
case 'w':
|
|
|
|
g_workload_type = optarg;
|
2020-04-24 16:50:46 +00:00
|
|
|
if (!strcmp(g_workload_type, "copy")) {
|
2022-03-15 17:43:07 +00:00
|
|
|
g_workload_selection = ACCEL_OPC_COPY;
|
2020-04-24 16:50:46 +00:00
|
|
|
} else if (!strcmp(g_workload_type, "fill")) {
|
2022-03-15 17:43:07 +00:00
|
|
|
g_workload_selection = ACCEL_OPC_FILL;
|
2020-04-28 22:23:19 +00:00
|
|
|
} else if (!strcmp(g_workload_type, "crc32c")) {
|
2022-03-15 17:43:07 +00:00
|
|
|
g_workload_selection = ACCEL_OPC_CRC32C;
|
2021-06-07 22:50:43 +00:00
|
|
|
} else if (!strcmp(g_workload_type, "copy_crc32c")) {
|
2022-03-15 17:43:07 +00:00
|
|
|
g_workload_selection = ACCEL_OPC_COPY_CRC32C;
|
2020-04-29 22:57:06 +00:00
|
|
|
} else if (!strcmp(g_workload_type, "compare")) {
|
2022-03-15 17:43:07 +00:00
|
|
|
g_workload_selection = ACCEL_OPC_COMPARE;
|
2020-04-30 22:09:17 +00:00
|
|
|
} else if (!strcmp(g_workload_type, "dualcast")) {
|
2022-03-15 17:43:07 +00:00
|
|
|
g_workload_selection = ACCEL_OPC_DUALCAST;
|
2022-06-22 21:35:57 +00:00
|
|
|
} else if (!strcmp(g_workload_type, "compress")) {
|
|
|
|
g_workload_selection = ACCEL_OPC_COMPRESS;
|
|
|
|
} else if (!strcmp(g_workload_type, "decompress")) {
|
|
|
|
g_workload_selection = ACCEL_OPC_DECOMPRESS;
|
2022-10-05 10:42:58 +00:00
|
|
|
} else if (!strcmp(g_workload_type, "xor")) {
|
|
|
|
g_workload_selection = ACCEL_OPC_XOR;
|
2022-06-07 19:25:02 +00:00
|
|
|
} else {
|
|
|
|
usage();
|
|
|
|
return 1;
|
2020-04-24 16:50:46 +00:00
|
|
|
}
|
2020-04-07 16:16:13 +00:00
|
|
|
break;
|
2020-02-13 22:13:53 +00:00
|
|
|
default:
|
|
|
|
usage();
|
|
|
|
return 1;
|
|
|
|
}
|
2020-12-21 12:17:06 +00:00
|
|
|
|
2020-02-13 22:13:53 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-02-07 18:46:50 +00:00
|
|
|
static int dump_result(void);
|
2020-02-13 22:13:53 +00:00
|
|
|
static void
|
|
|
|
unregister_worker(void *arg1)
|
|
|
|
{
|
|
|
|
struct worker_thread *worker = arg1;
|
|
|
|
|
2023-04-18 08:12:16 +00:00
|
|
|
spdk_accel_get_opcode_stats(worker->ch, worker->workload,
|
|
|
|
&worker->stats, sizeof(worker->stats));
|
2020-12-08 16:59:17 +00:00
|
|
|
free(worker->task_base);
|
2020-02-13 22:13:53 +00:00
|
|
|
spdk_put_io_channel(worker->ch);
|
2022-11-17 03:12:20 +00:00
|
|
|
spdk_thread_exit(spdk_get_thread());
|
2020-02-13 22:13:53 +00:00
|
|
|
pthread_mutex_lock(&g_workers_lock);
|
|
|
|
assert(g_num_workers >= 1);
|
|
|
|
if (--g_num_workers == 0) {
|
|
|
|
pthread_mutex_unlock(&g_workers_lock);
|
2021-02-08 17:19:29 +00:00
|
|
|
g_rc = dump_result();
|
2020-02-13 22:13:53 +00:00
|
|
|
spdk_app_stop(0);
|
2022-12-19 03:18:57 +00:00
|
|
|
} else {
|
|
|
|
pthread_mutex_unlock(&g_workers_lock);
|
2020-02-13 22:13:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-22 21:35:57 +00:00
|
|
|
static void
|
|
|
|
accel_perf_construct_iovs(void *buf, uint64_t sz, struct iovec *iovs, uint32_t iovcnt)
|
|
|
|
{
|
|
|
|
uint64_t ele_size;
|
|
|
|
uint8_t *data;
|
|
|
|
uint32_t i;
|
|
|
|
|
|
|
|
ele_size = spdk_divide_round_up(sz, iovcnt);
|
|
|
|
|
|
|
|
data = buf;
|
|
|
|
for (i = 0; i < iovcnt; i++) {
|
|
|
|
ele_size = spdk_min(ele_size, sz);
|
|
|
|
assert(ele_size > 0);
|
|
|
|
|
|
|
|
iovs[i].iov_base = data;
|
|
|
|
iovs[i].iov_len = ele_size;
|
|
|
|
|
|
|
|
data += ele_size;
|
|
|
|
sz -= ele_size;
|
|
|
|
}
|
|
|
|
assert(sz == 0);
|
|
|
|
}
|
|
|
|
|
2020-12-08 19:14:10 +00:00
|
|
|
static int
|
|
|
|
_get_task_data_bufs(struct ap_task *task)
|
|
|
|
{
|
|
|
|
uint32_t align = 0;
|
2020-12-21 12:17:06 +00:00
|
|
|
uint32_t i = 0;
|
2021-06-07 22:50:43 +00:00
|
|
|
int dst_buff_len = g_xfer_size_bytes;
|
2020-12-08 19:14:10 +00:00
|
|
|
|
|
|
|
/* For dualcast, the DSA HW requires 4K alignment on destination addresses but
|
2022-08-08 21:43:24 +00:00
|
|
|
* we do this for all modules to keep it simple.
|
2020-12-08 19:14:10 +00:00
|
|
|
*/
|
2022-03-15 17:43:07 +00:00
|
|
|
if (g_workload_selection == ACCEL_OPC_DUALCAST) {
|
2020-12-08 19:14:10 +00:00
|
|
|
align = ALIGN_4K;
|
|
|
|
}
|
|
|
|
|
2022-06-22 21:35:57 +00:00
|
|
|
if (g_workload_selection == ACCEL_OPC_COMPRESS ||
|
|
|
|
g_workload_selection == ACCEL_OPC_DECOMPRESS) {
|
|
|
|
task->cur_seg = STAILQ_FIRST(&g_compress_segs);
|
2023-01-01 16:15:34 +00:00
|
|
|
|
|
|
|
if (g_workload_selection == ACCEL_OPC_COMPRESS) {
|
|
|
|
dst_buff_len = task->cur_seg->compressed_len_padded;
|
|
|
|
}
|
|
|
|
|
|
|
|
task->dst = spdk_dma_zmalloc(dst_buff_len, align, NULL);
|
|
|
|
if (task->dst == NULL) {
|
|
|
|
fprintf(stderr, "Unable to alloc dst buffer\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
task->dst_iovs = calloc(g_chained_count, sizeof(struct iovec));
|
|
|
|
if (!task->dst_iovs) {
|
|
|
|
fprintf(stderr, "cannot allocate task->dst_iovs for task=%p\n", task);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
task->dst_iovcnt = g_chained_count;
|
|
|
|
accel_perf_construct_iovs(task->dst, dst_buff_len, task->dst_iovs, task->dst_iovcnt);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (g_workload_selection == ACCEL_OPC_CRC32C ||
|
|
|
|
g_workload_selection == ACCEL_OPC_COPY_CRC32C) {
|
2022-09-22 19:01:56 +00:00
|
|
|
assert(g_chained_count > 0);
|
|
|
|
task->src_iovcnt = g_chained_count;
|
2022-07-21 21:57:11 +00:00
|
|
|
task->src_iovs = calloc(task->src_iovcnt, sizeof(struct iovec));
|
|
|
|
if (!task->src_iovs) {
|
|
|
|
fprintf(stderr, "cannot allocated task->src_iovs fot task=%p\n", task);
|
2020-12-21 12:17:06 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2022-03-15 17:43:07 +00:00
|
|
|
if (g_workload_selection == ACCEL_OPC_COPY_CRC32C) {
|
2022-09-22 19:01:56 +00:00
|
|
|
dst_buff_len = g_xfer_size_bytes * g_chained_count;
|
2021-06-07 22:50:43 +00:00
|
|
|
}
|
|
|
|
|
2022-07-21 21:57:11 +00:00
|
|
|
for (i = 0; i < task->src_iovcnt; i++) {
|
|
|
|
task->src_iovs[i].iov_base = spdk_dma_zmalloc(g_xfer_size_bytes, 0, NULL);
|
|
|
|
if (task->src_iovs[i].iov_base == NULL) {
|
2020-12-21 12:17:06 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2022-07-21 21:57:11 +00:00
|
|
|
memset(task->src_iovs[i].iov_base, DATA_PATTERN, g_xfer_size_bytes);
|
|
|
|
task->src_iovs[i].iov_len = g_xfer_size_bytes;
|
2020-12-21 12:17:06 +00:00
|
|
|
}
|
2022-10-05 10:42:58 +00:00
|
|
|
} else if (g_workload_selection == ACCEL_OPC_XOR) {
|
|
|
|
assert(g_xor_src_count > 1);
|
|
|
|
task->sources = calloc(g_xor_src_count, sizeof(*task->sources));
|
|
|
|
if (!task->sources) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2020-12-21 12:17:06 +00:00
|
|
|
|
2022-10-05 10:42:58 +00:00
|
|
|
for (i = 0; i < g_xor_src_count; i++) {
|
|
|
|
task->sources[i] = spdk_dma_zmalloc(g_xfer_size_bytes, 0, NULL);
|
|
|
|
if (!task->sources[i]) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
memset(task->sources[i], DATA_PATTERN, g_xfer_size_bytes);
|
|
|
|
}
|
2020-12-21 12:17:06 +00:00
|
|
|
} else {
|
|
|
|
task->src = spdk_dma_zmalloc(g_xfer_size_bytes, 0, NULL);
|
|
|
|
if (task->src == NULL) {
|
|
|
|
fprintf(stderr, "Unable to alloc src buffer\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* For fill, set the entire src buffer so we can check if verify is enabled. */
|
2022-03-15 17:43:07 +00:00
|
|
|
if (g_workload_selection == ACCEL_OPC_FILL) {
|
2020-12-21 12:17:06 +00:00
|
|
|
memset(task->src, g_fill_pattern, g_xfer_size_bytes);
|
|
|
|
} else {
|
|
|
|
memset(task->src, DATA_PATTERN, g_xfer_size_bytes);
|
|
|
|
}
|
2020-12-08 19:14:10 +00:00
|
|
|
}
|
|
|
|
|
2022-03-15 17:43:07 +00:00
|
|
|
if (g_workload_selection != ACCEL_OPC_CRC32C) {
|
2021-06-07 22:50:43 +00:00
|
|
|
task->dst = spdk_dma_zmalloc(dst_buff_len, align, NULL);
|
|
|
|
if (task->dst == NULL) {
|
|
|
|
fprintf(stderr, "Unable to alloc dst buffer\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2020-12-08 19:14:10 +00:00
|
|
|
|
2021-06-07 22:50:43 +00:00
|
|
|
/* For compare we want the buffers to match, otherwise not. */
|
2022-03-15 17:43:07 +00:00
|
|
|
if (g_workload_selection == ACCEL_OPC_COMPARE) {
|
2021-06-07 22:50:43 +00:00
|
|
|
memset(task->dst, DATA_PATTERN, dst_buff_len);
|
|
|
|
} else {
|
|
|
|
memset(task->dst, ~DATA_PATTERN, dst_buff_len);
|
|
|
|
}
|
2020-12-08 19:14:10 +00:00
|
|
|
}
|
|
|
|
|
2022-06-22 19:53:08 +00:00
|
|
|
/* For dualcast 2 buffers are needed for the operation. */
|
2022-10-05 10:42:58 +00:00
|
|
|
if (g_workload_selection == ACCEL_OPC_DUALCAST ||
|
|
|
|
(g_workload_selection == ACCEL_OPC_XOR && g_verify)) {
|
2020-12-08 19:14:10 +00:00
|
|
|
task->dst2 = spdk_dma_zmalloc(g_xfer_size_bytes, align, NULL);
|
|
|
|
if (task->dst2 == NULL) {
|
|
|
|
fprintf(stderr, "Unable to alloc dst buffer\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2022-06-22 19:53:08 +00:00
|
|
|
memset(task->dst2, ~DATA_PATTERN, g_xfer_size_bytes);
|
2020-12-08 19:14:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-12-08 16:59:17 +00:00
|
|
|
inline static struct ap_task *
|
|
|
|
_get_task(struct worker_thread *worker)
|
|
|
|
{
|
|
|
|
struct ap_task *task;
|
|
|
|
|
|
|
|
if (!TAILQ_EMPTY(&worker->tasks_pool)) {
|
|
|
|
task = TAILQ_FIRST(&worker->tasks_pool);
|
|
|
|
TAILQ_REMOVE(&worker->tasks_pool, task, link);
|
|
|
|
} else {
|
|
|
|
fprintf(stderr, "Unable to get ap_task\n");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return task;
|
|
|
|
}
|
|
|
|
|
2020-12-08 17:12:31 +00:00
|
|
|
/* Submit one operation using the same ap task that just completed. */
|
2020-02-13 22:13:53 +00:00
|
|
|
static void
|
2020-12-08 16:59:17 +00:00
|
|
|
_submit_single(struct worker_thread *worker, struct ap_task *task)
|
2020-02-13 22:13:53 +00:00
|
|
|
{
|
2020-04-29 22:57:06 +00:00
|
|
|
int random_num;
|
2020-06-12 17:58:31 +00:00
|
|
|
int rc = 0;
|
2021-08-24 21:08:29 +00:00
|
|
|
int flags = 0;
|
2020-02-13 22:13:53 +00:00
|
|
|
|
|
|
|
assert(worker);
|
|
|
|
|
2022-04-19 12:35:39 +00:00
|
|
|
switch (worker->workload) {
|
2022-03-15 17:43:07 +00:00
|
|
|
case ACCEL_OPC_COPY:
|
2020-07-03 14:10:54 +00:00
|
|
|
rc = spdk_accel_submit_copy(worker->ch, task->dst, task->src,
|
2021-08-24 21:08:29 +00:00
|
|
|
g_xfer_size_bytes, flags, accel_done, task);
|
2020-04-28 22:23:19 +00:00
|
|
|
break;
|
2022-03-15 17:43:07 +00:00
|
|
|
case ACCEL_OPC_FILL:
|
2020-04-07 16:16:13 +00:00
|
|
|
/* For fill use the first byte of the task->dst buffer */
|
2020-07-03 14:08:47 +00:00
|
|
|
rc = spdk_accel_submit_fill(worker->ch, task->dst, *(uint8_t *)task->src,
|
2021-08-24 21:08:29 +00:00
|
|
|
g_xfer_size_bytes, flags, accel_done, task);
|
2020-04-28 22:23:19 +00:00
|
|
|
break;
|
2022-03-15 17:43:07 +00:00
|
|
|
case ACCEL_OPC_CRC32C:
|
2021-06-09 19:23:13 +00:00
|
|
|
rc = spdk_accel_submit_crc32cv(worker->ch, &task->crc_dst,
|
2022-07-21 21:57:11 +00:00
|
|
|
task->src_iovs, task->src_iovcnt, g_crc32c_seed,
|
2021-01-18 16:07:49 +00:00
|
|
|
accel_done, task);
|
2020-04-28 22:23:19 +00:00
|
|
|
break;
|
2022-03-15 17:43:07 +00:00
|
|
|
case ACCEL_OPC_COPY_CRC32C:
|
2022-07-21 21:57:11 +00:00
|
|
|
rc = spdk_accel_submit_copy_crc32cv(worker->ch, task->dst, task->src_iovs, task->src_iovcnt,
|
2021-08-24 21:08:29 +00:00
|
|
|
&task->crc_dst, g_crc32c_seed, flags, accel_done, task);
|
2021-06-07 22:50:43 +00:00
|
|
|
break;
|
2022-03-15 17:43:07 +00:00
|
|
|
case ACCEL_OPC_COMPARE:
|
2020-04-29 22:57:06 +00:00
|
|
|
random_num = rand() % 100;
|
|
|
|
if (random_num < g_fail_percent_goal) {
|
|
|
|
task->expected_status = -EILSEQ;
|
|
|
|
*(uint8_t *)task->dst = ~DATA_PATTERN;
|
|
|
|
} else {
|
|
|
|
task->expected_status = 0;
|
|
|
|
*(uint8_t *)task->dst = DATA_PATTERN;
|
|
|
|
}
|
2020-07-03 14:08:47 +00:00
|
|
|
rc = spdk_accel_submit_compare(worker->ch, task->dst, task->src,
|
2020-07-03 14:10:54 +00:00
|
|
|
g_xfer_size_bytes, accel_done, task);
|
2020-04-29 22:57:06 +00:00
|
|
|
break;
|
2022-03-15 17:43:07 +00:00
|
|
|
case ACCEL_OPC_DUALCAST:
|
2020-07-03 14:08:47 +00:00
|
|
|
rc = spdk_accel_submit_dualcast(worker->ch, task->dst, task->dst2,
|
2021-08-24 21:08:29 +00:00
|
|
|
task->src, g_xfer_size_bytes, flags, accel_done, task);
|
2020-04-30 22:09:17 +00:00
|
|
|
break;
|
2022-06-22 21:35:57 +00:00
|
|
|
case ACCEL_OPC_COMPRESS:
|
|
|
|
task->src_iovs = task->cur_seg->uncompressed_iovs;
|
|
|
|
task->src_iovcnt = task->cur_seg->uncompressed_iovcnt;
|
2023-01-01 16:15:34 +00:00
|
|
|
rc = spdk_accel_submit_compress(worker->ch, task->dst, task->cur_seg->compressed_len_padded,
|
|
|
|
task->src_iovs,
|
2022-06-22 21:35:57 +00:00
|
|
|
task->src_iovcnt, &task->compressed_sz, flags, accel_done, task);
|
|
|
|
break;
|
|
|
|
case ACCEL_OPC_DECOMPRESS:
|
|
|
|
task->src_iovs = task->cur_seg->compressed_iovs;
|
|
|
|
task->src_iovcnt = task->cur_seg->compressed_iovcnt;
|
|
|
|
rc = spdk_accel_submit_decompress(worker->ch, task->dst_iovs, task->dst_iovcnt, task->src_iovs,
|
2022-12-20 14:36:43 +00:00
|
|
|
task->src_iovcnt, NULL, flags, accel_done, task);
|
2022-06-22 21:35:57 +00:00
|
|
|
break;
|
2022-10-05 10:42:58 +00:00
|
|
|
case ACCEL_OPC_XOR:
|
|
|
|
rc = spdk_accel_submit_xor(worker->ch, task->dst, task->sources, g_xor_src_count,
|
|
|
|
g_xfer_size_bytes, accel_done, task);
|
|
|
|
break;
|
2020-04-28 22:23:19 +00:00
|
|
|
default:
|
2020-04-07 16:16:13 +00:00
|
|
|
assert(false);
|
2020-04-28 22:23:19 +00:00
|
|
|
break;
|
|
|
|
|
2020-04-07 16:16:13 +00:00
|
|
|
}
|
2020-06-12 17:58:31 +00:00
|
|
|
|
2022-06-03 21:02:12 +00:00
|
|
|
worker->current_queue_depth++;
|
2020-06-12 17:58:31 +00:00
|
|
|
if (rc) {
|
2020-07-03 14:10:54 +00:00
|
|
|
accel_done(task, rc);
|
2020-06-12 17:58:31 +00:00
|
|
|
}
|
2020-02-13 22:13:53 +00:00
|
|
|
}
|
|
|
|
|
2020-12-08 16:59:17 +00:00
|
|
|
static void
|
2021-01-28 11:40:37 +00:00
|
|
|
_free_task_buffers(struct ap_task *task)
|
2020-12-08 16:59:17 +00:00
|
|
|
{
|
2020-12-21 12:17:06 +00:00
|
|
|
uint32_t i;
|
|
|
|
|
2023-01-01 16:15:34 +00:00
|
|
|
if (g_workload_selection == ACCEL_OPC_DECOMPRESS || g_workload_selection == ACCEL_OPC_COMPRESS) {
|
2022-06-22 21:35:57 +00:00
|
|
|
free(task->dst_iovs);
|
|
|
|
} else if (g_workload_selection == ACCEL_OPC_CRC32C ||
|
|
|
|
g_workload_selection == ACCEL_OPC_COPY_CRC32C) {
|
2022-07-21 21:57:11 +00:00
|
|
|
if (task->src_iovs) {
|
|
|
|
for (i = 0; i < task->src_iovcnt; i++) {
|
|
|
|
if (task->src_iovs[i].iov_base) {
|
|
|
|
spdk_dma_free(task->src_iovs[i].iov_base);
|
2020-12-21 12:17:06 +00:00
|
|
|
}
|
|
|
|
}
|
2022-07-21 21:57:11 +00:00
|
|
|
free(task->src_iovs);
|
2020-12-21 12:17:06 +00:00
|
|
|
}
|
2022-10-05 10:42:58 +00:00
|
|
|
} else if (g_workload_selection == ACCEL_OPC_XOR) {
|
|
|
|
if (task->sources) {
|
|
|
|
for (i = 0; i < g_xor_src_count; i++) {
|
|
|
|
spdk_dma_free(task->sources[i]);
|
|
|
|
}
|
|
|
|
free(task->sources);
|
|
|
|
}
|
2020-12-21 12:17:06 +00:00
|
|
|
} else {
|
|
|
|
spdk_dma_free(task->src);
|
|
|
|
}
|
|
|
|
|
2020-12-08 16:59:17 +00:00
|
|
|
spdk_dma_free(task->dst);
|
2022-10-05 10:42:58 +00:00
|
|
|
if (g_workload_selection == ACCEL_OPC_DUALCAST || g_workload_selection == ACCEL_OPC_XOR) {
|
2020-12-08 16:59:17 +00:00
|
|
|
spdk_dma_free(task->dst2);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-07 22:50:43 +00:00
|
|
|
static int
|
2022-07-21 21:57:11 +00:00
|
|
|
_vector_memcmp(void *_dst, struct iovec *src_src_iovs, uint32_t iovcnt)
|
2021-06-07 22:50:43 +00:00
|
|
|
{
|
|
|
|
uint32_t i;
|
|
|
|
uint32_t ttl_len = 0;
|
|
|
|
uint8_t *dst = (uint8_t *)_dst;
|
|
|
|
|
|
|
|
for (i = 0; i < iovcnt; i++) {
|
2022-07-21 21:57:11 +00:00
|
|
|
if (memcmp(dst, src_src_iovs[i].iov_base, src_src_iovs[i].iov_len)) {
|
2021-06-07 22:50:43 +00:00
|
|
|
return -1;
|
|
|
|
}
|
2022-07-21 21:57:11 +00:00
|
|
|
dst += src_src_iovs[i].iov_len;
|
|
|
|
ttl_len += src_src_iovs[i].iov_len;
|
2021-06-07 22:50:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ttl_len != iovcnt * g_xfer_size_bytes) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-04-19 12:35:39 +00:00
|
|
|
static int _worker_stop(void *arg);
|
|
|
|
|
2020-02-13 22:13:53 +00:00
|
|
|
static void
|
2021-08-13 06:12:47 +00:00
|
|
|
accel_done(void *arg1, int status)
|
2020-02-13 22:13:53 +00:00
|
|
|
{
|
|
|
|
struct ap_task *task = arg1;
|
|
|
|
struct worker_thread *worker = task->worker;
|
2020-04-28 22:23:19 +00:00
|
|
|
uint32_t sw_crc32c;
|
2020-02-13 22:13:53 +00:00
|
|
|
|
|
|
|
assert(worker);
|
|
|
|
assert(worker->current_queue_depth > 0);
|
|
|
|
|
2021-08-13 06:12:47 +00:00
|
|
|
if (g_verify && status == 0) {
|
2022-04-19 12:35:39 +00:00
|
|
|
switch (worker->workload) {
|
2022-03-15 17:43:07 +00:00
|
|
|
case ACCEL_OPC_COPY_CRC32C:
|
2022-07-21 21:57:11 +00:00
|
|
|
sw_crc32c = spdk_crc32c_iov_update(task->src_iovs, task->src_iovcnt, ~g_crc32c_seed);
|
2021-06-07 22:50:43 +00:00
|
|
|
if (task->crc_dst != sw_crc32c) {
|
|
|
|
SPDK_NOTICELOG("CRC-32C miscompare\n");
|
|
|
|
worker->xfer_failed++;
|
|
|
|
}
|
2022-07-21 21:57:11 +00:00
|
|
|
if (_vector_memcmp(task->dst, task->src_iovs, task->src_iovcnt)) {
|
2021-06-07 22:50:43 +00:00
|
|
|
SPDK_NOTICELOG("Data miscompare\n");
|
|
|
|
worker->xfer_failed++;
|
|
|
|
}
|
|
|
|
break;
|
2022-03-15 17:43:07 +00:00
|
|
|
case ACCEL_OPC_CRC32C:
|
2022-07-21 21:57:11 +00:00
|
|
|
sw_crc32c = spdk_crc32c_iov_update(task->src_iovs, task->src_iovcnt, ~g_crc32c_seed);
|
2021-06-09 19:23:13 +00:00
|
|
|
if (task->crc_dst != sw_crc32c) {
|
2020-04-28 22:23:19 +00:00
|
|
|
SPDK_NOTICELOG("CRC-32C miscompare\n");
|
|
|
|
worker->xfer_failed++;
|
|
|
|
}
|
2020-04-29 22:57:06 +00:00
|
|
|
break;
|
2022-03-15 17:43:07 +00:00
|
|
|
case ACCEL_OPC_COPY:
|
2020-04-29 22:57:06 +00:00
|
|
|
if (memcmp(task->src, task->dst, g_xfer_size_bytes)) {
|
|
|
|
SPDK_NOTICELOG("Data miscompare\n");
|
|
|
|
worker->xfer_failed++;
|
|
|
|
}
|
|
|
|
break;
|
2022-03-15 17:43:07 +00:00
|
|
|
case ACCEL_OPC_DUALCAST:
|
2020-04-30 22:09:17 +00:00
|
|
|
if (memcmp(task->src, task->dst, g_xfer_size_bytes)) {
|
|
|
|
SPDK_NOTICELOG("Data miscompare, first destination\n");
|
|
|
|
worker->xfer_failed++;
|
|
|
|
}
|
|
|
|
if (memcmp(task->src, task->dst2, g_xfer_size_bytes)) {
|
|
|
|
SPDK_NOTICELOG("Data miscompare, second destination\n");
|
|
|
|
worker->xfer_failed++;
|
|
|
|
}
|
|
|
|
break;
|
2022-03-15 17:43:07 +00:00
|
|
|
case ACCEL_OPC_FILL:
|
2020-06-18 19:56:01 +00:00
|
|
|
if (memcmp(task->dst, task->src, g_xfer_size_bytes)) {
|
|
|
|
SPDK_NOTICELOG("Data miscompare\n");
|
|
|
|
worker->xfer_failed++;
|
|
|
|
}
|
|
|
|
break;
|
2022-03-15 17:43:07 +00:00
|
|
|
case ACCEL_OPC_COMPARE:
|
2020-06-18 22:46:27 +00:00
|
|
|
break;
|
2022-06-22 21:35:57 +00:00
|
|
|
case ACCEL_OPC_COMPRESS:
|
|
|
|
break;
|
|
|
|
case ACCEL_OPC_DECOMPRESS:
|
|
|
|
if (memcmp(task->dst, task->cur_seg->uncompressed_data, task->cur_seg->uncompressed_len)) {
|
|
|
|
SPDK_NOTICELOG("Data miscompare on decompression\n");
|
|
|
|
worker->xfer_failed++;
|
|
|
|
}
|
|
|
|
break;
|
2022-10-05 10:42:58 +00:00
|
|
|
case ACCEL_OPC_XOR:
|
|
|
|
if (spdk_xor_gen(task->dst2, task->sources, g_xor_src_count,
|
|
|
|
g_xfer_size_bytes) != 0) {
|
|
|
|
SPDK_ERRLOG("Failed to generate xor for verification\n");
|
|
|
|
} else if (memcmp(task->dst, task->dst2, g_xfer_size_bytes)) {
|
|
|
|
SPDK_NOTICELOG("Data miscompare\n");
|
|
|
|
worker->xfer_failed++;
|
|
|
|
}
|
|
|
|
break;
|
2020-04-29 22:57:06 +00:00
|
|
|
default:
|
|
|
|
assert(false);
|
|
|
|
break;
|
2020-02-13 22:13:53 +00:00
|
|
|
}
|
|
|
|
}
|
2020-04-29 22:57:06 +00:00
|
|
|
|
2022-06-22 21:35:57 +00:00
|
|
|
if (worker->workload == ACCEL_OPC_COMPRESS || g_workload_selection == ACCEL_OPC_DECOMPRESS) {
|
|
|
|
/* Advance the task to the next segment */
|
|
|
|
task->cur_seg = STAILQ_NEXT(task->cur_seg, link);
|
|
|
|
if (task->cur_seg == NULL) {
|
|
|
|
task->cur_seg = STAILQ_FIRST(&g_compress_segs);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-29 22:57:06 +00:00
|
|
|
if (task->expected_status == -EILSEQ) {
|
2021-08-13 06:12:47 +00:00
|
|
|
assert(status != 0);
|
2020-04-29 22:57:06 +00:00
|
|
|
worker->injected_miscompares++;
|
2022-04-19 12:35:39 +00:00
|
|
|
status = 0;
|
2021-08-13 06:12:47 +00:00
|
|
|
} else if (status) {
|
2022-08-08 21:43:24 +00:00
|
|
|
/* Expected to pass but the accel module reported an error (ex: COMPARE operation). */
|
2020-04-29 22:57:06 +00:00
|
|
|
worker->xfer_failed++;
|
|
|
|
}
|
|
|
|
|
2020-02-13 22:13:53 +00:00
|
|
|
worker->current_queue_depth--;
|
|
|
|
|
2022-04-19 12:35:39 +00:00
|
|
|
if (!worker->is_draining && status == 0) {
|
2021-06-21 18:06:28 +00:00
|
|
|
TAILQ_INSERT_TAIL(&worker->tasks_pool, task, link);
|
|
|
|
task = _get_task(worker);
|
2021-10-12 22:05:31 +00:00
|
|
|
_submit_single(worker, task);
|
2021-01-26 20:06:25 +00:00
|
|
|
} else {
|
|
|
|
TAILQ_INSERT_TAIL(&worker->tasks_pool, task, link);
|
2020-02-13 22:13:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
dump_result(void)
|
|
|
|
{
|
|
|
|
uint64_t total_completed = 0;
|
|
|
|
uint64_t total_failed = 0;
|
2020-04-29 22:57:06 +00:00
|
|
|
uint64_t total_miscompared = 0;
|
2020-02-13 22:13:53 +00:00
|
|
|
uint64_t total_xfer_per_sec, total_bw_in_MiBps;
|
|
|
|
struct worker_thread *worker = g_workers;
|
|
|
|
|
2021-02-09 00:00:37 +00:00
|
|
|
printf("\nCore,Thread Transfers Bandwidth Failed Miscompares\n");
|
|
|
|
printf("------------------------------------------------------------------------\n");
|
2020-02-13 22:13:53 +00:00
|
|
|
while (worker != NULL) {
|
|
|
|
|
2023-04-18 08:12:16 +00:00
|
|
|
uint64_t xfer_per_sec = worker->stats.executed / g_time_in_sec;
|
|
|
|
uint64_t bw_in_MiBps = worker->stats.num_bytes /
|
2020-02-13 22:13:53 +00:00
|
|
|
(g_time_in_sec * 1024 * 1024);
|
|
|
|
|
2023-04-18 08:12:16 +00:00
|
|
|
total_completed += worker->stats.executed;
|
2020-02-13 22:13:53 +00:00
|
|
|
total_failed += worker->xfer_failed;
|
2020-04-29 22:57:06 +00:00
|
|
|
total_miscompared += worker->injected_miscompares;
|
2020-02-13 22:13:53 +00:00
|
|
|
|
|
|
|
if (xfer_per_sec) {
|
2021-02-09 00:00:37 +00:00
|
|
|
printf("%u,%u%17" PRIu64 "/s%9" PRIu64 " MiB/s%7" PRIu64 " %11" PRIu64 "\n",
|
|
|
|
worker->display.core, worker->display.thread, xfer_per_sec,
|
2020-04-29 22:57:06 +00:00
|
|
|
bw_in_MiBps, worker->xfer_failed, worker->injected_miscompares);
|
2020-02-13 22:13:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
worker = worker->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
total_xfer_per_sec = total_completed / g_time_in_sec;
|
|
|
|
total_bw_in_MiBps = (total_completed * g_xfer_size_bytes) /
|
|
|
|
(g_time_in_sec * 1024 * 1024);
|
|
|
|
|
2021-02-09 00:00:37 +00:00
|
|
|
printf("=========================================================================\n");
|
|
|
|
printf("Total:%15" PRIu64 "/s%9" PRIu64 " MiB/s%6" PRIu64 " %11" PRIu64"\n\n",
|
2020-04-29 22:57:06 +00:00
|
|
|
total_xfer_per_sec, total_bw_in_MiBps, total_failed, total_miscompared);
|
2020-02-13 22:13:53 +00:00
|
|
|
|
|
|
|
return total_failed ? 1 : 0;
|
|
|
|
}
|
|
|
|
|
2021-01-28 11:40:37 +00:00
|
|
|
static inline void
|
|
|
|
_free_task_buffers_in_pool(struct worker_thread *worker)
|
|
|
|
{
|
|
|
|
struct ap_task *task;
|
|
|
|
|
|
|
|
assert(worker);
|
|
|
|
while ((task = TAILQ_FIRST(&worker->tasks_pool))) {
|
|
|
|
TAILQ_REMOVE(&worker->tasks_pool, task, link);
|
|
|
|
_free_task_buffers(task);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-13 22:13:53 +00:00
|
|
|
static int
|
|
|
|
_check_draining(void *arg)
|
|
|
|
{
|
|
|
|
struct worker_thread *worker = arg;
|
|
|
|
|
|
|
|
assert(worker);
|
|
|
|
|
|
|
|
if (worker->current_queue_depth == 0) {
|
2021-01-28 11:40:37 +00:00
|
|
|
_free_task_buffers_in_pool(worker);
|
2020-02-13 22:13:53 +00:00
|
|
|
spdk_poller_unregister(&worker->is_draining_poller);
|
|
|
|
unregister_worker(worker);
|
|
|
|
}
|
|
|
|
|
2021-07-01 18:45:51 +00:00
|
|
|
return SPDK_POLLER_BUSY;
|
2020-02-13 22:13:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
_worker_stop(void *arg)
|
|
|
|
{
|
|
|
|
struct worker_thread *worker = arg;
|
|
|
|
|
|
|
|
assert(worker);
|
|
|
|
|
|
|
|
spdk_poller_unregister(&worker->stop_poller);
|
|
|
|
|
|
|
|
/* now let the worker drain and check it's outstanding IO with a poller */
|
|
|
|
worker->is_draining = true;
|
2020-04-14 06:49:46 +00:00
|
|
|
worker->is_draining_poller = SPDK_POLLER_REGISTER(_check_draining, worker, 0);
|
2020-02-13 22:13:53 +00:00
|
|
|
|
2021-07-01 18:45:51 +00:00
|
|
|
return SPDK_POLLER_BUSY;
|
2020-02-13 22:13:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
_init_thread(void *arg1)
|
|
|
|
{
|
|
|
|
struct worker_thread *worker;
|
|
|
|
struct ap_task *task;
|
2021-10-12 22:05:31 +00:00
|
|
|
int i, num_tasks = g_allocate_depth;
|
2021-02-09 00:00:37 +00:00
|
|
|
struct display_info *display = arg1;
|
2020-02-13 22:13:53 +00:00
|
|
|
|
|
|
|
worker = calloc(1, sizeof(*worker));
|
|
|
|
if (worker == NULL) {
|
|
|
|
fprintf(stderr, "Unable to allocate worker\n");
|
2021-02-09 00:00:37 +00:00
|
|
|
free(display);
|
2020-02-13 22:13:53 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-04-19 12:35:39 +00:00
|
|
|
worker->workload = g_workload_selection;
|
2021-02-09 00:00:37 +00:00
|
|
|
worker->display.core = display->core;
|
|
|
|
worker->display.thread = display->thread;
|
|
|
|
free(display);
|
2020-02-13 22:13:53 +00:00
|
|
|
worker->core = spdk_env_get_current_core();
|
|
|
|
worker->thread = spdk_get_thread();
|
2021-02-07 18:46:50 +00:00
|
|
|
pthread_mutex_lock(&g_workers_lock);
|
|
|
|
g_num_workers++;
|
2020-02-13 22:13:53 +00:00
|
|
|
worker->next = g_workers;
|
2021-02-07 18:46:50 +00:00
|
|
|
g_workers = worker;
|
|
|
|
pthread_mutex_unlock(&g_workers_lock);
|
2022-08-08 20:51:25 +00:00
|
|
|
worker->ch = spdk_accel_get_io_channel();
|
2022-04-11 19:45:34 +00:00
|
|
|
if (worker->ch == NULL) {
|
|
|
|
fprintf(stderr, "Unable to get an accel channel\n");
|
|
|
|
goto error;
|
|
|
|
}
|
2020-04-29 22:57:06 +00:00
|
|
|
|
2020-12-08 16:59:17 +00:00
|
|
|
TAILQ_INIT(&worker->tasks_pool);
|
2020-12-08 17:12:31 +00:00
|
|
|
|
2020-12-08 16:59:17 +00:00
|
|
|
worker->task_base = calloc(num_tasks, sizeof(struct ap_task));
|
|
|
|
if (worker->task_base == NULL) {
|
|
|
|
fprintf(stderr, "Could not allocate task base.\n");
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
task = worker->task_base;
|
2020-07-13 22:14:57 +00:00
|
|
|
for (i = 0; i < num_tasks; i++) {
|
2020-12-08 16:59:17 +00:00
|
|
|
TAILQ_INSERT_TAIL(&worker->tasks_pool, task, link);
|
2021-06-21 17:41:12 +00:00
|
|
|
task->worker = worker;
|
2020-12-08 16:59:17 +00:00
|
|
|
if (_get_task_data_bufs(task)) {
|
|
|
|
fprintf(stderr, "Unable to get data bufs\n");
|
|
|
|
goto error;
|
2020-07-13 22:14:57 +00:00
|
|
|
}
|
2020-12-08 16:59:17 +00:00
|
|
|
task++;
|
2020-02-13 22:13:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Register a poller that will stop the worker at time elapsed */
|
2020-04-14 06:49:46 +00:00
|
|
|
worker->stop_poller = SPDK_POLLER_REGISTER(_worker_stop, worker,
|
2020-02-13 22:13:53 +00:00
|
|
|
g_time_in_sec * 1000000ULL);
|
|
|
|
|
2021-10-12 22:05:31 +00:00
|
|
|
/* Load up queue depth worth of operations. */
|
|
|
|
for (i = 0; i < g_queue_depth; i++) {
|
2020-12-08 16:59:17 +00:00
|
|
|
task = _get_task(worker);
|
|
|
|
if (task == NULL) {
|
2020-06-02 18:27:47 +00:00
|
|
|
goto error;
|
2020-04-29 22:57:06 +00:00
|
|
|
}
|
|
|
|
|
2020-02-13 22:13:53 +00:00
|
|
|
_submit_single(worker, task);
|
|
|
|
}
|
2020-06-02 18:27:47 +00:00
|
|
|
return;
|
|
|
|
error:
|
2021-01-28 11:40:37 +00:00
|
|
|
|
|
|
|
_free_task_buffers_in_pool(worker);
|
2020-12-08 16:59:17 +00:00
|
|
|
free(worker->task_base);
|
2020-06-02 18:27:47 +00:00
|
|
|
spdk_app_stop(-1);
|
2020-02-13 22:13:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
accel_perf_start(void *arg1)
|
|
|
|
{
|
2021-02-07 18:46:50 +00:00
|
|
|
struct spdk_cpuset tmp_cpumask = {};
|
|
|
|
char thread_name[32];
|
|
|
|
uint32_t i;
|
2021-02-09 00:00:37 +00:00
|
|
|
int j;
|
2021-02-07 18:46:50 +00:00
|
|
|
struct spdk_thread *thread;
|
2021-02-09 00:00:37 +00:00
|
|
|
struct display_info *display;
|
2020-04-24 16:50:46 +00:00
|
|
|
|
2020-02-13 22:13:53 +00:00
|
|
|
g_tsc_rate = spdk_get_ticks_hz();
|
|
|
|
g_tsc_end = spdk_get_ticks() + g_time_in_sec * g_tsc_rate;
|
|
|
|
|
2022-06-08 00:11:12 +00:00
|
|
|
dump_user_config();
|
|
|
|
|
2020-02-13 22:13:53 +00:00
|
|
|
printf("Running for %d seconds...\n", g_time_in_sec);
|
|
|
|
fflush(stdout);
|
|
|
|
|
2021-02-07 18:46:50 +00:00
|
|
|
/* Create worker threads for each core that was specified. */
|
|
|
|
SPDK_ENV_FOREACH_CORE(i) {
|
2021-02-09 00:00:37 +00:00
|
|
|
for (j = 0; j < g_threads_per_core; j++) {
|
|
|
|
snprintf(thread_name, sizeof(thread_name), "ap_worker_%u_%u", i, j);
|
|
|
|
spdk_cpuset_zero(&tmp_cpumask);
|
|
|
|
spdk_cpuset_set_cpu(&tmp_cpumask, i, true);
|
|
|
|
thread = spdk_thread_create(thread_name, &tmp_cpumask);
|
|
|
|
display = calloc(1, sizeof(*display));
|
|
|
|
if (display == NULL) {
|
|
|
|
fprintf(stderr, "Unable to allocate memory\n");
|
|
|
|
spdk_app_stop(-1);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
display->core = i;
|
|
|
|
display->thread = j;
|
|
|
|
spdk_thread_send_msg(thread, _init_thread, display);
|
|
|
|
}
|
2021-02-07 18:46:50 +00:00
|
|
|
}
|
2020-02-13 22:13:53 +00:00
|
|
|
}
|
|
|
|
|
2022-06-22 21:35:57 +00:00
|
|
|
static void
|
|
|
|
accel_perf_free_compress_segs(void)
|
|
|
|
{
|
|
|
|
struct ap_compress_seg *seg, *tmp;
|
|
|
|
|
|
|
|
STAILQ_FOREACH_SAFE(seg, &g_compress_segs, link, tmp) {
|
|
|
|
free(seg->uncompressed_iovs);
|
|
|
|
free(seg->compressed_iovs);
|
|
|
|
spdk_dma_free(seg->compressed_data);
|
|
|
|
spdk_dma_free(seg->uncompressed_data);
|
|
|
|
STAILQ_REMOVE_HEAD(&g_compress_segs, link);
|
|
|
|
free(seg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
struct accel_perf_prep_ctx {
|
|
|
|
FILE *file;
|
|
|
|
long remaining;
|
|
|
|
struct spdk_io_channel *ch;
|
|
|
|
struct ap_compress_seg *cur_seg;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void accel_perf_prep_process_seg(struct accel_perf_prep_ctx *ctx);
|
|
|
|
|
|
|
|
static void
|
|
|
|
accel_perf_prep_process_seg_cpl(void *ref, int status)
|
|
|
|
{
|
|
|
|
struct accel_perf_prep_ctx *ctx = ref;
|
|
|
|
struct ap_compress_seg *seg;
|
|
|
|
|
|
|
|
if (status != 0) {
|
|
|
|
fprintf(stderr, "error (%d) on initial compress completion\n", status);
|
|
|
|
spdk_dma_free(ctx->cur_seg->compressed_data);
|
|
|
|
spdk_dma_free(ctx->cur_seg->uncompressed_data);
|
|
|
|
free(ctx->cur_seg);
|
|
|
|
spdk_put_io_channel(ctx->ch);
|
|
|
|
fclose(ctx->file);
|
|
|
|
free(ctx);
|
|
|
|
spdk_app_stop(-status);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
seg = ctx->cur_seg;
|
|
|
|
|
|
|
|
if (g_workload_selection == ACCEL_OPC_DECOMPRESS) {
|
|
|
|
seg->compressed_iovs = calloc(g_chained_count, sizeof(struct iovec));
|
|
|
|
if (seg->compressed_iovs == NULL) {
|
|
|
|
fprintf(stderr, "unable to allocate iovec\n");
|
|
|
|
spdk_dma_free(seg->compressed_data);
|
|
|
|
spdk_dma_free(seg->uncompressed_data);
|
|
|
|
free(seg);
|
|
|
|
spdk_put_io_channel(ctx->ch);
|
|
|
|
fclose(ctx->file);
|
|
|
|
free(ctx);
|
|
|
|
spdk_app_stop(-ENOMEM);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
seg->compressed_iovcnt = g_chained_count;
|
|
|
|
|
|
|
|
accel_perf_construct_iovs(seg->compressed_data, seg->compressed_len, seg->compressed_iovs,
|
|
|
|
seg->compressed_iovcnt);
|
|
|
|
}
|
|
|
|
|
|
|
|
STAILQ_INSERT_TAIL(&g_compress_segs, seg, link);
|
|
|
|
ctx->remaining -= seg->uncompressed_len;
|
|
|
|
|
|
|
|
accel_perf_prep_process_seg(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
accel_perf_prep_process_seg(struct accel_perf_prep_ctx *ctx)
|
|
|
|
{
|
|
|
|
struct ap_compress_seg *seg;
|
2023-01-01 16:15:34 +00:00
|
|
|
int sz, sz_read, sz_padded;
|
2022-06-22 21:35:57 +00:00
|
|
|
void *ubuf, *cbuf;
|
|
|
|
struct iovec iov[1];
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (ctx->remaining == 0) {
|
|
|
|
spdk_put_io_channel(ctx->ch);
|
|
|
|
fclose(ctx->file);
|
|
|
|
free(ctx);
|
|
|
|
accel_perf_start(NULL);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
sz = spdk_min(ctx->remaining, g_xfer_size_bytes);
|
2023-01-01 16:15:34 +00:00
|
|
|
/* Add 10% pad to the compress buffer for incompressible data. Note that a real app
|
|
|
|
* would likely either deal with the failure of not having a large enough buffer
|
|
|
|
* by submitting another operation with a larger one. Or, like the vbdev module
|
|
|
|
* does, just accept the error and use the data uncompressed marking it as such in
|
|
|
|
* its own metadata so that in the future it doesn't try to decompress uncompressed
|
|
|
|
* data, etc.
|
|
|
|
*/
|
|
|
|
sz_padded = sz * COMP_BUF_PAD_PERCENTAGE;
|
2022-06-22 21:35:57 +00:00
|
|
|
|
|
|
|
ubuf = spdk_dma_zmalloc(sz, ALIGN_4K, NULL);
|
|
|
|
if (!ubuf) {
|
|
|
|
fprintf(stderr, "unable to allocate uncompress buffer\n");
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2023-01-01 16:15:34 +00:00
|
|
|
cbuf = spdk_dma_malloc(sz_padded, ALIGN_4K, NULL);
|
2022-06-22 21:35:57 +00:00
|
|
|
if (!cbuf) {
|
|
|
|
fprintf(stderr, "unable to allocate compress buffer\n");
|
|
|
|
rc = -ENOMEM;
|
|
|
|
spdk_dma_free(ubuf);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
seg = calloc(1, sizeof(*seg));
|
|
|
|
if (!seg) {
|
|
|
|
fprintf(stderr, "unable to allocate comp/decomp segment\n");
|
|
|
|
spdk_dma_free(ubuf);
|
|
|
|
spdk_dma_free(cbuf);
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
sz_read = fread(ubuf, sizeof(uint8_t), sz, ctx->file);
|
|
|
|
if (sz_read != sz) {
|
|
|
|
fprintf(stderr, "unable to read input file\n");
|
|
|
|
free(seg);
|
|
|
|
spdk_dma_free(ubuf);
|
|
|
|
spdk_dma_free(cbuf);
|
|
|
|
rc = -errno;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (g_workload_selection == ACCEL_OPC_COMPRESS) {
|
|
|
|
seg->uncompressed_iovs = calloc(g_chained_count, sizeof(struct iovec));
|
|
|
|
if (seg->uncompressed_iovs == NULL) {
|
|
|
|
fprintf(stderr, "unable to allocate iovec\n");
|
|
|
|
free(seg);
|
|
|
|
spdk_dma_free(ubuf);
|
|
|
|
spdk_dma_free(cbuf);
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
seg->uncompressed_iovcnt = g_chained_count;
|
|
|
|
accel_perf_construct_iovs(ubuf, sz, seg->uncompressed_iovs, seg->uncompressed_iovcnt);
|
|
|
|
}
|
|
|
|
|
|
|
|
seg->uncompressed_data = ubuf;
|
|
|
|
seg->uncompressed_len = sz;
|
|
|
|
seg->compressed_data = cbuf;
|
|
|
|
seg->compressed_len = sz;
|
2023-01-01 16:15:34 +00:00
|
|
|
seg->compressed_len_padded = sz_padded;
|
2022-06-22 21:35:57 +00:00
|
|
|
|
|
|
|
ctx->cur_seg = seg;
|
|
|
|
iov[0].iov_base = seg->uncompressed_data;
|
|
|
|
iov[0].iov_len = seg->uncompressed_len;
|
|
|
|
/* Note that anytime a call is made to spdk_accel_submit_compress() there's a chance
|
|
|
|
* it will fail with -ENOMEM in the event that the destination buffer is not large enough
|
2023-01-01 16:15:34 +00:00
|
|
|
* to hold the compressed data. This example app simply adds 10% buffer for compressed data
|
|
|
|
* but real applications may want to consider a more sophisticated method.
|
2022-06-22 21:35:57 +00:00
|
|
|
*/
|
2023-01-01 16:15:34 +00:00
|
|
|
rc = spdk_accel_submit_compress(ctx->ch, seg->compressed_data, seg->compressed_len_padded, iov, 1,
|
2022-06-22 21:35:57 +00:00
|
|
|
&seg->compressed_len, 0, accel_perf_prep_process_seg_cpl, ctx);
|
|
|
|
if (rc < 0) {
|
|
|
|
fprintf(stderr, "error (%d) on initial compress submission\n", rc);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
error:
|
|
|
|
spdk_put_io_channel(ctx->ch);
|
|
|
|
fclose(ctx->file);
|
|
|
|
free(ctx);
|
|
|
|
spdk_app_stop(rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
accel_perf_prep(void *arg1)
|
|
|
|
{
|
|
|
|
struct accel_perf_prep_ctx *ctx;
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
if (g_workload_selection != ACCEL_OPC_COMPRESS &&
|
|
|
|
g_workload_selection != ACCEL_OPC_DECOMPRESS) {
|
|
|
|
accel_perf_start(arg1);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (g_cd_file_in_name == NULL) {
|
|
|
|
fprintf(stdout, "A filename is required.\n");
|
|
|
|
rc = -EINVAL;
|
|
|
|
goto error_end;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (g_workload_selection == ACCEL_OPC_COMPRESS && g_verify) {
|
|
|
|
fprintf(stdout, "\nCompression does not support the verify option, aborting.\n");
|
|
|
|
rc = -ENOTSUP;
|
|
|
|
goto error_end;
|
|
|
|
}
|
|
|
|
|
|
|
|
printf("Preparing input file...\n");
|
|
|
|
|
|
|
|
ctx = calloc(1, sizeof(*ctx));
|
|
|
|
if (ctx == NULL) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto error_end;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx->file = fopen(g_cd_file_in_name, "r");
|
|
|
|
if (ctx->file == NULL) {
|
|
|
|
fprintf(stderr, "Could not open file %s.\n", g_cd_file_in_name);
|
|
|
|
rc = -errno;
|
|
|
|
goto error_ctx;
|
|
|
|
}
|
|
|
|
|
|
|
|
fseek(ctx->file, 0L, SEEK_END);
|
|
|
|
ctx->remaining = ftell(ctx->file);
|
|
|
|
fseek(ctx->file, 0L, SEEK_SET);
|
|
|
|
|
|
|
|
ctx->ch = spdk_accel_get_io_channel();
|
|
|
|
if (ctx->ch == NULL) {
|
|
|
|
rc = -EAGAIN;
|
|
|
|
goto error_file;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (g_xfer_size_bytes == 0) {
|
|
|
|
/* size of 0 means "file at a time" */
|
|
|
|
g_xfer_size_bytes = ctx->remaining;
|
|
|
|
}
|
|
|
|
|
|
|
|
accel_perf_prep_process_seg(ctx);
|
|
|
|
return;
|
|
|
|
|
|
|
|
error_file:
|
|
|
|
fclose(ctx->file);
|
|
|
|
error_ctx:
|
|
|
|
free(ctx);
|
|
|
|
error_end:
|
|
|
|
spdk_app_stop(rc);
|
|
|
|
}
|
|
|
|
|
2023-04-25 13:13:30 +00:00
|
|
|
static void
|
|
|
|
worker_shutdown(void *ctx)
|
|
|
|
{
|
|
|
|
_worker_stop(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
shutdown_cb(void)
|
|
|
|
{
|
|
|
|
struct worker_thread *worker;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&g_workers_lock);
|
|
|
|
worker = g_workers;
|
|
|
|
while (worker) {
|
|
|
|
spdk_thread_send_msg(worker->thread, worker_shutdown, worker);
|
|
|
|
worker = worker->next;
|
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&g_workers_lock);
|
|
|
|
}
|
|
|
|
|
2020-02-13 22:13:53 +00:00
|
|
|
int
|
|
|
|
main(int argc, char **argv)
|
|
|
|
{
|
|
|
|
struct worker_thread *worker, *tmp;
|
|
|
|
|
|
|
|
pthread_mutex_init(&g_workers_lock, NULL);
|
2022-06-08 00:11:12 +00:00
|
|
|
spdk_app_opts_init(&g_opts, sizeof(g_opts));
|
|
|
|
g_opts.name = "accel_perf";
|
|
|
|
g_opts.reactor_mask = "0x1";
|
2023-04-25 13:13:30 +00:00
|
|
|
g_opts.shutdown_cb = shutdown_cb;
|
2022-10-05 10:42:58 +00:00
|
|
|
if (spdk_app_parse_args(argc, argv, &g_opts, "a:C:o:q:t:yw:P:f:T:l:x:", NULL, parse_args,
|
2020-10-22 18:37:39 +00:00
|
|
|
usage) != SPDK_APP_PARSE_ARGS_SUCCESS) {
|
2021-02-08 17:19:29 +00:00
|
|
|
g_rc = -1;
|
2020-02-13 22:13:53 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2022-03-15 17:43:07 +00:00
|
|
|
if ((g_workload_selection != ACCEL_OPC_COPY) &&
|
|
|
|
(g_workload_selection != ACCEL_OPC_FILL) &&
|
|
|
|
(g_workload_selection != ACCEL_OPC_CRC32C) &&
|
|
|
|
(g_workload_selection != ACCEL_OPC_COPY_CRC32C) &&
|
|
|
|
(g_workload_selection != ACCEL_OPC_COMPARE) &&
|
2022-06-22 21:35:57 +00:00
|
|
|
(g_workload_selection != ACCEL_OPC_COMPRESS) &&
|
|
|
|
(g_workload_selection != ACCEL_OPC_DECOMPRESS) &&
|
2022-10-05 10:42:58 +00:00
|
|
|
(g_workload_selection != ACCEL_OPC_DUALCAST) &&
|
|
|
|
(g_workload_selection != ACCEL_OPC_XOR)) {
|
2020-04-07 16:16:13 +00:00
|
|
|
usage();
|
2021-02-08 17:19:29 +00:00
|
|
|
g_rc = -1;
|
2020-04-07 16:16:13 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2021-06-21 17:38:43 +00:00
|
|
|
if (g_allocate_depth > 0 && g_queue_depth > g_allocate_depth) {
|
|
|
|
fprintf(stdout, "allocate depth must be at least as big as queue depth\n");
|
|
|
|
usage();
|
|
|
|
g_rc = -1;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (g_allocate_depth == 0) {
|
|
|
|
g_allocate_depth = g_queue_depth;
|
|
|
|
}
|
|
|
|
|
2022-03-15 17:43:07 +00:00
|
|
|
if ((g_workload_selection == ACCEL_OPC_CRC32C || g_workload_selection == ACCEL_OPC_COPY_CRC32C) &&
|
2022-09-22 19:01:56 +00:00
|
|
|
g_chained_count == 0) {
|
2020-12-21 12:17:06 +00:00
|
|
|
usage();
|
|
|
|
g_rc = -1;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2022-10-05 10:42:58 +00:00
|
|
|
if (g_workload_selection == ACCEL_OPC_XOR && g_xor_src_count < 2) {
|
|
|
|
usage();
|
|
|
|
g_rc = -1;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2022-06-22 21:35:57 +00:00
|
|
|
g_rc = spdk_app_start(&g_opts, accel_perf_prep, NULL);
|
2021-02-08 17:19:29 +00:00
|
|
|
if (g_rc) {
|
2020-02-13 22:13:53 +00:00
|
|
|
SPDK_ERRLOG("ERROR starting application\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
pthread_mutex_destroy(&g_workers_lock);
|
|
|
|
|
|
|
|
worker = g_workers;
|
|
|
|
while (worker) {
|
|
|
|
tmp = worker->next;
|
|
|
|
free(worker);
|
|
|
|
worker = tmp;
|
|
|
|
}
|
|
|
|
cleanup:
|
2022-06-22 21:35:57 +00:00
|
|
|
accel_perf_free_compress_segs();
|
2020-02-13 22:13:53 +00:00
|
|
|
spdk_app_fini();
|
2021-02-08 17:19:29 +00:00
|
|
|
return g_rc;
|
2020-02-13 22:13:53 +00:00
|
|
|
}
|