modules/accel_dsa: update IDXD references to DSA where it makes sense

IDXD has always been used everywhere but technically it stands for
the driver, not the HW (Intel Data Streaming Accelerator Driver)
where the X comes from "Streaming Accelerator" somehow.  Anyway, the
underlying hardware is just DSA.  It doesn't matter much now but
upcoming patches will add support for a new HW accelerator called
the Intel In-Memory Analytics Accelerator which we'll call IAA and
it will use the same (mostly) device driver (IDXD) as DSA.  So, calling
the HW what it is will lessen confusion when adding IAA support.

This patch just does renaming for the accel_fw module and associated
files (RPC, etc).

Signed-off-by: paul luse <paul.e.luse@intel.com>
Change-Id: Ib3b1f982cc60359ecfea5dbcbeeb33e4d69aee6a
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/11984
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
paul luse 2022-03-18 07:29:39 -07:00 committed by Tomasz Zawadzki
parent 07e31b028a
commit ffef30ae0d
18 changed files with 138 additions and 131 deletions

View File

@ -32,7 +32,7 @@ in multipath mode.
A new option `disable_auto_failback` was added to the `bdev_nvme_set_options` RPC to disable
automatic failback.
### idxd
### idxd / dsa
A new parameter `flags` was added to all low level submission and preparation
APIs to enable the caller to pass operation flags per the DSA specification.
@ -40,6 +40,11 @@ APIs to enable the caller to pass operation flags per the DSA specification.
A new flag 'SPDK_IDXD_FLAG_PERSISTENT' was added to let DSA know that
the destination is persistent.
The RPC `idxd_scan_accel_engine` has been renamed to `dsa_scan_accel_engine`
Many HW related structs/functions with the name `idxd` have been renamed `dsa`
to more accurately represent the HW they are associated with.
### accel_fw
A new parameter `flags` was added to accel API.

View File

@ -72,10 +72,12 @@ RPC is provided, the framework is available and will use the software plug-in mo
To use the IOAT engine, use the RPC [`ioat_scan_accel_engine`](https://spdk.io/doc/jsonrpc.html) before starting the application.
### IDXD Module {#accel_idxd}
### DSA Module {#accel_dsa}
The DSA module supports the DSA hardware and relies on the low level IDXD library.
To use the DSA engine, use the RPC
[`idxd_scan_accel_engine`](https://spdk.io/doc/jsonrpc.html). By default, this
[`dsa_scan_accel_engine`](https://spdk.io/doc/jsonrpc.html). By default, this
will attempt to load the SPDK user-space idxd driver. To use the built-in
kernel driver on Linux, add the `-k` parameter. See the next section for
details on using the kernel driver.

View File

@ -442,7 +442,7 @@ Example response:
"framework_monitor_context_switch",
"spdk_kill_instance",
"ioat_scan_accel_engine",
"idxd_scan_accel_engine",
"dsa_scan_accel_engine",
"bdev_virtio_attach_controller",
"bdev_virtio_scsi_get_devices",
"bdev_virtio_detach_controller",
@ -1493,9 +1493,9 @@ Example response:
## Acceleration Framework Layer {#jsonrpc_components_accel_fw}
### idxd_scan_accel_engine {#rpc_idxd_scan_accel_engine}
### dsa_scan_accel_engine {#rpc_dsa_scan_accel_engine}
Set config and enable idxd accel engine offload.
Set config and enable dsa accel engine offload.
This feature is considered as experimental.
#### Parameters
@ -1514,7 +1514,7 @@ Example request:
"config_kernel_mode": false
},
"jsonrpc": "2.0",
"method": "idxd_scan_accel_engine",
"method": "dsa_scan_accel_engine",
"id": 1
}
~~~

View File

@ -61,7 +61,7 @@
#define TRACE_GROUP_FTL 0x6
#define TRACE_GROUP_BLOBFS 0x7
#define TRACE_GROUP_NVMF_FC 0x8
#define TRACE_GROUP_IDXD 0x9
#define TRACE_GROUP_ACCEL_DSA 0x9
#define TRACE_GROUP_THREAD 0xA
#define TRACE_GROUP_NVME_PCIE 0xB
@ -166,7 +166,7 @@
#define TRACE_NVME_PCIE_COMPLETE SPDK_TPOINT_ID(TRACE_GROUP_NVME_PCIE, 0x1)
/* idxd trace definitions */
#define TRACE_IDXD_OP_SUBMIT SPDK_TPOINT_ID(TRACE_GROUP_IDXD, 0x0)
#define TRACE_IDXD_OP_COMPLETE SPDK_TPOINT_ID(TRACE_GROUP_IDXD, 0x1)
#define TRACE_ACCEL_DSA_OP_SUBMIT SPDK_TPOINT_ID(TRACE_GROUP_ACCEL_DSA, 0x0)
#define TRACE_ACCEL_DSA_OP_COMPLETE SPDK_TPOINT_ID(TRACE_GROUP_ACCEL_DSA, 0x1)
#endif /* SPDK_INTERNAL_TRACE_DEFS */

View File

@ -119,7 +119,7 @@ endif
# module/accel
DEPDIRS-accel_ioat := log ioat thread jsonrpc rpc accel
DEPDIRS-accel_idxd := log idxd thread $(JSON_LIBS) accel trace
DEPDIRS-accel_dsa := log idxd thread $(JSON_LIBS) accel trace
# module/env_dpdk
DEPDIRS-env_dpdk_rpc := log $(JSON_LIBS)

View File

@ -118,7 +118,7 @@ endif
ACCEL_MODULES_LIST = accel_ioat ioat
ifeq ($(CONFIG_IDXD),y)
ACCEL_MODULES_LIST += accel_idxd idxd
ACCEL_MODULES_LIST += accel_dsa idxd
endif
SCHEDULER_MODULES_LIST = scheduler_dynamic

View File

@ -36,7 +36,7 @@ include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
DIRS-y = ioat
DIRS-$(CONFIG_IDXD) += idxd
DIRS-$(CONFIG_IDXD) += dsa
.PHONY: all clean $(DIRS-y)

View File

@ -37,8 +37,8 @@ include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
SO_VER := 3
SO_MINOR := 0
LIBNAME = accel_idxd
C_SRCS = accel_engine_idxd.c accel_engine_idxd_rpc.c
LIBNAME = accel_dsa
C_SRCS = accel_engine_dsa.c accel_engine_dsa_rpc.c
SPDK_MAP_FILE = $(SPDK_ROOT_DIR)/mk/spdk_blank.map

View File

@ -31,7 +31,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "accel_engine_idxd.h"
#include "accel_engine_dsa.h"
#include "spdk/stdinc.h"
@ -48,7 +48,7 @@
#include "spdk/trace.h"
#include "spdk_internal/trace_defs.h"
static bool g_idxd_enable = false;
static bool g_dsa_enable = false;
static bool g_kernel_mode = false;
enum channel_state {
@ -56,13 +56,13 @@ enum channel_state {
IDXD_CHANNEL_ERROR,
};
static bool g_idxd_initialized = false;
static bool g_dsa_initialized = false;
struct idxd_device {
struct spdk_idxd_device *idxd;
struct spdk_idxd_device *dsa;
TAILQ_ENTRY(idxd_device) tailq;
};
static TAILQ_HEAD(, idxd_device) g_idxd_devices = TAILQ_HEAD_INITIALIZER(g_idxd_devices);
static TAILQ_HEAD(, idxd_device) g_dsa_devices = TAILQ_HEAD_INITIALIZER(g_dsa_devices);
static struct idxd_device *g_next_dev = NULL;
static uint32_t g_num_devices = 0;
static pthread_mutex_t g_dev_lock = PTHREAD_MUTEX_INITIALIZER;
@ -76,7 +76,7 @@ struct idxd_io_channel {
TAILQ_HEAD(, spdk_accel_task) queued_tasks;
};
static struct spdk_io_channel *idxd_get_io_channel(void);
static struct spdk_io_channel *dsa_get_io_channel(void);
static struct idxd_device *
idxd_select_device(struct idxd_io_channel *chan)
@ -95,12 +95,12 @@ idxd_select_device(struct idxd_io_channel *chan)
pthread_mutex_lock(&g_dev_lock);
g_next_dev = TAILQ_NEXT(g_next_dev, tailq);
if (g_next_dev == NULL) {
g_next_dev = TAILQ_FIRST(&g_idxd_devices);
g_next_dev = TAILQ_FIRST(&g_dsa_devices);
}
dev = g_next_dev;
pthread_mutex_unlock(&g_dev_lock);
if (socket_id != spdk_idxd_get_socket(dev->idxd)) {
if (socket_id != spdk_idxd_get_socket(dev->dsa)) {
continue;
}
@ -109,10 +109,10 @@ idxd_select_device(struct idxd_io_channel *chan)
* allow a specific number of channels to share a device
* to limit outstanding IO for flow control purposes.
*/
chan->chan = spdk_idxd_get_channel(dev->idxd);
chan->chan = spdk_idxd_get_channel(dev->dsa);
if (chan->chan != NULL) {
SPDK_DEBUGLOG(accel_idxd, "On socket %d using device on socket %d\n",
socket_id, spdk_idxd_get_socket(dev->idxd));
SPDK_DEBUGLOG(accel_dsa, "On socket %d using device on socket %d\n",
socket_id, spdk_idxd_get_socket(dev->dsa));
return dev;
}
} while (count++ < g_num_devices);
@ -127,7 +127,7 @@ idxd_select_device(struct idxd_io_channel *chan)
}
static void
idxd_done(void *cb_arg, int status)
dsa_done(void *cb_arg, int status)
{
struct spdk_accel_task *accel_task = cb_arg;
struct idxd_io_channel *chan;
@ -135,7 +135,7 @@ idxd_done(void *cb_arg, int status)
chan = spdk_io_channel_get_ctx(accel_task->accel_ch->engine_ch[accel_task->op_code]);
assert(chan->num_outstanding > 0);
spdk_trace_record(TRACE_IDXD_OP_COMPLETE, 0, 0, 0, chan->num_outstanding - 1);
spdk_trace_record(TRACE_ACCEL_DSA_OP_COMPLETE, 0, 0, 0, chan->num_outstanding - 1);
chan->num_outstanding--;
spdk_accel_task_complete(accel_task, status);
@ -162,7 +162,7 @@ _process_single_task(struct spdk_io_channel *ch, struct spdk_accel_task *task)
flags |= SPDK_IDXD_FLAG_PERSISTENT;
flags |= SPDK_IDXD_FLAG_NONTEMPORAL;
}
rc = spdk_idxd_submit_copy(chan->chan, &diov, 1, &siov, 1, flags, idxd_done, task);
rc = spdk_idxd_submit_copy(chan->chan, &diov, 1, &siov, 1, flags, dsa_done, task);
break;
case ACCEL_OPC_DUALCAST:
if (task->flags & ACCEL_FLAG_PERSISTENT) {
@ -170,14 +170,14 @@ _process_single_task(struct spdk_io_channel *ch, struct spdk_accel_task *task)
flags |= SPDK_IDXD_FLAG_NONTEMPORAL;
}
rc = spdk_idxd_submit_dualcast(chan->chan, task->dst, task->dst2, task->src, task->nbytes,
flags, idxd_done, task);
flags, dsa_done, task);
break;
case ACCEL_OPC_COMPARE:
siov.iov_base = task->src;
siov.iov_len = task->nbytes;
diov.iov_base = task->dst;
diov.iov_len = task->nbytes;
rc = spdk_idxd_submit_compare(chan->chan, &siov, 1, &diov, 1, flags, idxd_done, task);
rc = spdk_idxd_submit_compare(chan->chan, &siov, 1, &diov, 1, flags, dsa_done, task);
break;
case ACCEL_OPC_FILL:
diov.iov_base = task->dst;
@ -186,7 +186,7 @@ _process_single_task(struct spdk_io_channel *ch, struct spdk_accel_task *task)
flags |= SPDK_IDXD_FLAG_PERSISTENT;
flags |= SPDK_IDXD_FLAG_NONTEMPORAL;
}
rc = spdk_idxd_submit_fill(chan->chan, &diov, 1, task->fill_pattern, flags, idxd_done,
rc = spdk_idxd_submit_fill(chan->chan, &diov, 1, task->fill_pattern, flags, dsa_done,
task);
break;
case ACCEL_OPC_CRC32C:
@ -200,7 +200,7 @@ _process_single_task(struct spdk_io_channel *ch, struct spdk_accel_task *task)
iovcnt = task->v.iovcnt;
}
rc = spdk_idxd_submit_crc32c(chan->chan, iov, iovcnt, task->seed, task->crc_dst,
flags, idxd_done, task);
flags, dsa_done, task);
break;
case ACCEL_OPC_COPY_CRC32C:
if (task->v.iovcnt == 0) {
@ -220,7 +220,7 @@ _process_single_task(struct spdk_io_channel *ch, struct spdk_accel_task *task)
}
rc = spdk_idxd_submit_copy_crc32c(chan->chan, &diov, 1, iov, iovcnt,
task->seed, task->crc_dst, flags,
idxd_done, task);
dsa_done, task);
break;
default:
assert(false);
@ -230,14 +230,14 @@ _process_single_task(struct spdk_io_channel *ch, struct spdk_accel_task *task)
if (rc == 0) {
chan->num_outstanding++;
spdk_trace_record(TRACE_IDXD_OP_SUBMIT, 0, 0, 0, chan->num_outstanding);
spdk_trace_record(TRACE_ACCEL_DSA_OP_SUBMIT, 0, 0, 0, chan->num_outstanding);
}
return rc;
}
static int
idxd_submit_tasks(struct spdk_io_channel *ch, struct spdk_accel_task *first_task)
dsa_submit_tasks(struct spdk_io_channel *ch, struct spdk_accel_task *first_task)
{
struct idxd_io_channel *chan = spdk_io_channel_get_ctx(ch);
struct spdk_accel_task *task, *tmp;
@ -304,7 +304,7 @@ idxd_poll(void *arg)
TAILQ_INIT(&chan->queued_tasks);
idxd_submit_tasks(task->accel_ch->engine_ch[task->op_code], task);
dsa_submit_tasks(task->accel_ch->engine_ch[task->op_code], task);
}
}
@ -312,13 +312,13 @@ idxd_poll(void *arg)
}
static size_t
accel_engine_idxd_get_ctx_size(void)
accel_engine_dsa_get_ctx_size(void)
{
return 0;
}
static bool
idxd_supports_opcode(enum accel_opcode opc)
dsa_supports_opcode(enum accel_opcode opc)
{
switch (opc) {
case ACCEL_OPC_COPY:
@ -333,26 +333,26 @@ idxd_supports_opcode(enum accel_opcode opc)
}
}
static struct spdk_accel_engine idxd_accel_engine = {
.name = "idxd",
.supports_opcode = idxd_supports_opcode,
.get_io_channel = idxd_get_io_channel,
.submit_tasks = idxd_submit_tasks,
static struct spdk_accel_engine dsa_accel_engine = {
.name = "dsa",
.supports_opcode = dsa_supports_opcode,
.get_io_channel = dsa_get_io_channel,
.submit_tasks = dsa_submit_tasks,
};
static int
idxd_create_cb(void *io_device, void *ctx_buf)
dsa_create_cb(void *io_device, void *ctx_buf)
{
struct idxd_io_channel *chan = ctx_buf;
struct idxd_device *dev;
struct idxd_device *dsa;
dev = idxd_select_device(chan);
if (dev == NULL) {
dsa = idxd_select_device(chan);
if (dsa == NULL) {
SPDK_ERRLOG("Failed to get an idxd channel\n");
return -EINVAL;
}
chan->dev = dev;
chan->dev = dsa;
chan->poller = SPDK_POLLER_REGISTER(idxd_poll, chan, 0);
TAILQ_INIT(&chan->queued_tasks);
chan->num_outstanding = 0;
@ -362,7 +362,7 @@ idxd_create_cb(void *io_device, void *ctx_buf)
}
static void
idxd_destroy_cb(void *io_device, void *ctx_buf)
dsa_destroy_cb(void *io_device, void *ctx_buf)
{
struct idxd_io_channel *chan = ctx_buf;
@ -371,9 +371,9 @@ idxd_destroy_cb(void *io_device, void *ctx_buf)
}
static struct spdk_io_channel *
idxd_get_io_channel(void)
dsa_get_io_channel(void)
{
return spdk_get_io_channel(&idxd_accel_engine);
return spdk_get_io_channel(&dsa_accel_engine);
}
static void
@ -387,27 +387,27 @@ attach_cb(void *cb_ctx, struct spdk_idxd_device *idxd)
return;
}
dev->idxd = idxd;
dev->dsa = idxd;
if (g_next_dev == NULL) {
g_next_dev = dev;
}
TAILQ_INSERT_TAIL(&g_idxd_devices, dev, tailq);
TAILQ_INSERT_TAIL(&g_dsa_devices, dev, tailq);
g_num_devices++;
}
void
accel_engine_idxd_enable_probe(bool kernel_mode)
accel_engine_dsa_enable_probe(bool kernel_mode)
{
g_kernel_mode = kernel_mode;
g_idxd_enable = true;
g_dsa_enable = true;
spdk_idxd_set_config(g_kernel_mode);
}
static int
accel_engine_idxd_init(void)
accel_engine_dsa_init(void)
{
if (!g_idxd_enable) {
if (!g_dsa_enable) {
return -EINVAL;
}
@ -416,32 +416,32 @@ accel_engine_idxd_init(void)
return -EINVAL;
}
if (TAILQ_EMPTY(&g_idxd_devices)) {
SPDK_NOTICELOG("no available idxd devices\n");
if (TAILQ_EMPTY(&g_dsa_devices)) {
SPDK_NOTICELOG("no available dsa devices\n");
return -EINVAL;
}
g_idxd_initialized = true;
SPDK_NOTICELOG("Accel framework IDXD engine initialized.\n");
spdk_accel_engine_register(&idxd_accel_engine);
spdk_io_device_register(&idxd_accel_engine, idxd_create_cb, idxd_destroy_cb,
sizeof(struct idxd_io_channel), "idxd_accel_engine");
g_dsa_initialized = true;
SPDK_NOTICELOG("Accel framework DSA engine initialized.\n");
spdk_accel_engine_register(&dsa_accel_engine);
spdk_io_device_register(&dsa_accel_engine, dsa_create_cb, dsa_destroy_cb,
sizeof(struct idxd_io_channel), "dsa_accel_engine");
return 0;
}
static void
accel_engine_idxd_exit(void *ctx)
accel_engine_dsa_exit(void *ctx)
{
struct idxd_device *dev;
if (g_idxd_initialized) {
spdk_io_device_unregister(&idxd_accel_engine, NULL);
if (g_dsa_initialized) {
spdk_io_device_unregister(&dsa_accel_engine, NULL);
}
while (!TAILQ_EMPTY(&g_idxd_devices)) {
dev = TAILQ_FIRST(&g_idxd_devices);
TAILQ_REMOVE(&g_idxd_devices, dev, tailq);
spdk_idxd_detach(dev->idxd);
while (!TAILQ_EMPTY(&g_dsa_devices)) {
dev = TAILQ_FIRST(&g_dsa_devices);
TAILQ_REMOVE(&g_dsa_devices, dev, tailq);
spdk_idxd_detach(dev->dsa);
free(dev);
}
@ -449,11 +449,11 @@ accel_engine_idxd_exit(void *ctx)
}
static void
accel_engine_idxd_write_config_json(struct spdk_json_write_ctx *w)
accel_engine_dsa_write_config_json(struct spdk_json_write_ctx *w)
{
if (g_idxd_enable) {
if (g_dsa_enable) {
spdk_json_write_object_begin(w);
spdk_json_write_named_string(w, "method", "idxd_scan_accel_engine");
spdk_json_write_named_string(w, "method", "dsa_scan_accel_engine");
spdk_json_write_named_object_begin(w, "params");
spdk_json_write_named_bool(w, "config_kernel_mode", g_kernel_mode);
spdk_json_write_object_end(w);
@ -461,16 +461,18 @@ accel_engine_idxd_write_config_json(struct spdk_json_write_ctx *w)
}
}
SPDK_TRACE_REGISTER_FN(idxd_trace, "idxd", TRACE_GROUP_IDXD)
SPDK_TRACE_REGISTER_FN(dsa_trace, "dsa", TRACE_GROUP_ACCEL_DSA)
{
spdk_trace_register_description("IDXD_OP_SUBMIT", TRACE_IDXD_OP_SUBMIT, OWNER_NONE, OBJECT_NONE, 0,
spdk_trace_register_description("DSA_OP_SUBMIT", TRACE_ACCEL_DSA_OP_SUBMIT, OWNER_NONE, OBJECT_NONE,
0,
SPDK_TRACE_ARG_TYPE_INT, "count");
spdk_trace_register_description("IDXD_OP_COMPLETE", TRACE_IDXD_OP_COMPLETE, OWNER_NONE, OBJECT_NONE,
spdk_trace_register_description("DSA_OP_COMPLETE", TRACE_ACCEL_DSA_OP_COMPLETE, OWNER_NONE,
OBJECT_NONE,
0, SPDK_TRACE_ARG_TYPE_INT, "count");
}
SPDK_ACCEL_MODULE_REGISTER(accel_engine_idxd_init, accel_engine_idxd_exit,
accel_engine_idxd_write_config_json,
accel_engine_idxd_get_ctx_size)
SPDK_ACCEL_MODULE_REGISTER(accel_engine_dsa_init, accel_engine_dsa_exit,
accel_engine_dsa_write_config_json,
accel_engine_dsa_get_ctx_size)
SPDK_LOG_REGISTER_COMPONENT(accel_idxd)
SPDK_LOG_REGISTER_COMPONENT(accel_dsa)

View File

@ -31,13 +31,11 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SPDK_ACCEL_ENGINE_IDXD_H
#define SPDK_ACCEL_ENGINE_IDXD_H
#ifndef SPDK_ACCEL_ENGINE_DSA_H
#define SPDK_ACCEL_ENGINE_DSA_H
#include "spdk/stdinc.h"
#define IDXD_MAX_DEVICES 16
void accel_engine_dsa_enable_probe(bool kernel_mode);
void accel_engine_idxd_enable_probe(bool kernel_mode);
#endif /* SPDK_ACCEL_ENGINE_IDXD_H */
#endif /* SPDK_ACCEL_ENGINE_DSA_H */

View File

@ -31,7 +31,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "accel_engine_idxd.h"
#include "accel_engine_dsa.h"
#include "spdk/rpc.h"
#include "spdk/util.h"
@ -39,23 +39,23 @@
#include "spdk/stdinc.h"
#include "spdk/env.h"
struct rpc_idxd_scan_accel_engine {
struct rpc_dsa_scan_accel_engine {
bool config_kernel_mode;
};
static const struct spdk_json_object_decoder rpc_idxd_scan_accel_engine_decoder[] = {
{"config_kernel_mode", offsetof(struct rpc_idxd_scan_accel_engine, config_kernel_mode), spdk_json_decode_bool, true},
static const struct spdk_json_object_decoder rpc_dsa_scan_accel_engine_decoder[] = {
{"config_kernel_mode", offsetof(struct rpc_dsa_scan_accel_engine, config_kernel_mode), spdk_json_decode_bool, true},
};
static void
rpc_idxd_scan_accel_engine(struct spdk_jsonrpc_request *request,
const struct spdk_json_val *params)
rpc_dsa_scan_accel_engine(struct spdk_jsonrpc_request *request,
const struct spdk_json_val *params)
{
struct rpc_idxd_scan_accel_engine req = {};
struct rpc_dsa_scan_accel_engine req = {};
if (params != NULL) {
if (spdk_json_decode_object(params, rpc_idxd_scan_accel_engine_decoder,
SPDK_COUNTOF(rpc_idxd_scan_accel_engine_decoder),
if (spdk_json_decode_object(params, rpc_dsa_scan_accel_engine_decoder,
SPDK_COUNTOF(rpc_dsa_scan_accel_engine_decoder),
&req)) {
SPDK_ERRLOG("spdk_json_decode_object() failed\n");
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS,
@ -65,12 +65,12 @@ rpc_idxd_scan_accel_engine(struct spdk_jsonrpc_request *request,
}
if (req.config_kernel_mode) {
SPDK_NOTICELOG("Enabling IDXD kernel-mode\n");
SPDK_NOTICELOG("Enabling DSA kernel-mode\n");
} else {
SPDK_NOTICELOG("Enabling IDXD user-mode\n");
SPDK_NOTICELOG("Enabling DSA user-mode\n");
}
accel_engine_idxd_enable_probe(req.config_kernel_mode);
accel_engine_dsa_enable_probe(req.config_kernel_mode);
spdk_jsonrpc_send_bool_response(request, true);
}
SPDK_RPC_REGISTER("idxd_scan_accel_engine", rpc_idxd_scan_accel_engine, SPDK_RPC_STARTUP)
SPDK_RPC_REGISTER("dsa_scan_accel_engine", rpc_dsa_scan_accel_engine, SPDK_RPC_STARTUP)

View File

@ -8,7 +8,7 @@ from . import app
from . import bdev
from . import blobfs
from . import env_dpdk
from . import idxd
from . import dsa
from . import ioat
from . import iscsi
from . import log

11
python/spdk/rpc/dsa.py Normal file
View File

@ -0,0 +1,11 @@
def dsa_scan_accel_engine(client, config_kernel_mode=None):
"""Scan and enable DSA accel engine.
Args:
config_kernel_mode: Use kernel DSA driver. (optional)
"""
params = {}
if config_kernel_mode is not None:
params['config_kernel_mode'] = config_kernel_mode
return client.call('dsa_scan_accel_engine', params)

View File

@ -1,11 +0,0 @@
def idxd_scan_accel_engine(client, config_kernel_mode=None):
"""Scan and enable IDXD accel engine.
Args:
config_kernel_mode: Use kernel IDXD driver. (optional)
"""
params = {}
if config_kernel_mode is not None:
params['config_kernel_mode'] = config_kernel_mode
return client.call('idxd_scan_accel_engine', params)

View File

@ -153,8 +153,8 @@ Optional, SPDK Target only:
for ADQ testing. You need and ADQ-capable NIC like the Intel E810.
- bpf_scripts - list of bpftrace scripts that will be attached during the
test run. Available scripts can be found in the spdk/scripts/bpf directory.
- idxd_settings - bool. Only for TCP transport. Enable offloading CRC32C
calculation to IDXD. You need a CPU with the Intel(R) Data Streaming
- dsa_settings - bool. Only for TCP transport. Enable offloading CRC32C
calculation to DSA. You need a CPU with the Intel(R) Data Streaming
Accelerator (DSA) engine.
### Initiator system settings section

View File

@ -1086,7 +1086,7 @@ class SPDKTarget(Target):
self.max_queue_depth = 128
self.bpf_proc = None
self.bpf_scripts = []
self.enable_idxd = False
self.enable_dsa = False
if "num_shared_buffers" in target_config:
self.num_shared_buffers = target_config["num_shared_buffers"]
@ -1098,11 +1098,11 @@ class SPDKTarget(Target):
self.dif_insert_strip = target_config["dif_insert_strip"]
if "bpf_scripts" in target_config:
self.bpf_scripts = target_config["bpf_scripts"]
if "idxd_settings" in target_config:
self.enable_idxd = target_config["idxd_settings"]
if "dsa_settings" in target_config:
self.enable_dsa = target_config["dsa_settings"]
self.log_print("====IDXD settings:====")
self.log_print("IDXD enabled: %s" % (self.enable_idxd))
self.log_print("====DSA settings:====")
self.log_print("DSA enabled: %s" % (self.enable_dsa))
@staticmethod
def get_num_cores(core_mask):
@ -1249,9 +1249,9 @@ class SPDKTarget(Target):
rpc.bdev.bdev_nvme_set_options(self.client, timeout_us=0, action_on_timeout=None,
nvme_adminq_poll_period_us=100000, retry_count=4)
if self.enable_idxd:
rpc.idxd.idxd_scan_accel_engine(self.client, config_kernel_mode=None)
self.log_print("Target IDXD accel engine enabled")
if self.enable_dsa:
rpc.dsa.dsa_scan_accel_engine(self.client, config_kernel_mode=None)
self.log_print("Target DSA accel engine enabled")
rpc.app.framework_set_scheduler(self.client, name=self.scheduler_name)
rpc.framework_start_init(self.client)

View File

@ -2600,15 +2600,15 @@ Format: 'user:u1 secret:s1 muser:mu1 msecret:ms1,user:u2 secret:s2 muser:mu2 mse
help='Enable IOAT accel engine offload.')
p.set_defaults(func=ioat_scan_accel_engine)
# idxd
def idxd_scan_accel_engine(args):
rpc.idxd.idxd_scan_accel_engine(args.client, config_kernel_mode=args.config_kernel_mode)
# dsa
def dsa_scan_accel_engine(args):
rpc.dsa.dsa_scan_accel_engine(args.client, config_kernel_mode=args.config_kernel_mode)
p = subparsers.add_parser('idxd_scan_accel_engine',
help='Set config and enable idxd accel engine offload.')
p.add_argument('-k', '--config-kernel-mode', help='Use Kernel mode idxd',
p = subparsers.add_parser('dsa_scan_accel_engine',
help='Set config and enable dsa accel engine offload.')
p.add_argument('-k', '--config-kernel-mode', help='Use Kernel mode dsa',
action='store_true', dest='config_kernel_mode')
p.set_defaults(func=idxd_scan_accel_engine, config_kernel_mode=None)
p.set_defaults(func=dsa_scan_accel_engine, config_kernel_mode=None)
# opal
def bdev_nvme_opal_init(args):

View File

@ -23,7 +23,7 @@ def sort_json_object(o):
def filter_methods(do_remove_global_rpcs):
global_rpcs = [
'idxd_scan_accel_engine',
'dsa_scan_accel_engine',
'iscsi_set_options',
'nvmf_set_config',
'nvmf_set_max_subsystems',