modules/accel/iaa: add IAA accel_fw module

And associated RPC to enable.

Signed-off-by: paul luse <paul.e.luse@intel.com>
Change-Id: I06785bcd8b8957293ad41d13bab556fe62f29fd5
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/12765
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
paul luse 2022-05-20 12:08:38 -07:00 committed by Jim Harris
parent 0ff560ea3b
commit b483811ff1
17 changed files with 736 additions and 8 deletions

View File

@ -42,6 +42,8 @@ the destination is persistent.
The RPC `idxd_scan_accel_engine` has been renamed to `dsa_scan_accel_engine` The RPC `idxd_scan_accel_engine` has been renamed to `dsa_scan_accel_engine`
The RPC `iaa_scan_accel_engine` has been added.
Many HW related structs/functions with the name `idxd` have been renamed `dsa` Many HW related structs/functions with the name `idxd` have been renamed `dsa`
to more accurately represent the HW they are associated with. to more accurately represent the HW they are associated with.

View File

@ -1529,6 +1529,37 @@ Example response:
} }
~~~ ~~~
### iaa_scan_accel_engine {#rpc_iaa_scan_accel_engine}
Enable IAA accel engine offload.
This feature is considered as experimental.
#### Parameters
None
#### Example
Example request:
~~~json
{
"jsonrpc": "2.0",
"method": "iaa_scan_accel_engine",
"id": 1
}
~~~
Example response:
~~~json
{
"jsonrpc": "2.0",
"id": 1,
"result": true
}
~~~
### ioat_scan_accel_engine {#rpc_ioat_scan_accel_engine} ### ioat_scan_accel_engine {#rpc_ioat_scan_accel_engine}
Enable ioat accel engine offload. Enable ioat accel engine offload.

View File

@ -54,7 +54,9 @@ enum accel_opcode {
ACCEL_OPC_COMPARE = 3, ACCEL_OPC_COMPARE = 3,
ACCEL_OPC_CRC32C = 4, ACCEL_OPC_CRC32C = 4,
ACCEL_OPC_COPY_CRC32C = 5, ACCEL_OPC_COPY_CRC32C = 5,
ACCEL_OPC_LAST = 6, ACCEL_OPC_COMPRESS = 6,
ACCEL_OPC_DECOMPRESS = 7,
ACCEL_OPC_LAST = 8,
}; };
/** /**
@ -246,6 +248,48 @@ int spdk_accel_submit_copy_crc32cv(struct spdk_io_channel *ch, void *dst, struct
uint32_t iovcnt, uint32_t *crc_dst, uint32_t seed, uint32_t iovcnt, uint32_t *crc_dst, uint32_t seed,
int flags, spdk_accel_completion_cb cb_fn, void *cb_arg); int flags, spdk_accel_completion_cb cb_fn, void *cb_arg);
/**
* Build and submit a memory compress request.
*
* This function will build the compress descriptor and submit it.
*
* \param ch I/O channel associated with this call
* \param dst Destination to compress to.
* \param src Source to read from.
* \param nbytes_dst Length in bytes of output buffer.
* \param nbytes_src Length in bytes of input buffer.
* \param output_size The size of the compressed data
* \param flags Flags, optional flags that can vary per operation.
* \param cb_fn Callback function which will be called when the request is complete.
* \param cb_arg Opaque value which will be passed back as the arg parameter in
* the completion callback.
*
* \return 0 on success, negative errno on failure.
*/
int spdk_accel_submit_compress(struct spdk_io_channel *ch, void *dst, void *src,
uint64_t nbytes_dst, uint64_t nbytes_src, uint32_t *output_size,
int flags, spdk_accel_completion_cb cb_fn, void *cb_arg);
/**
* Build and submit a memory decompress request.
*
* This function will build the decompress descriptor and submit it.
*
* \param ch I/O channel associated with this call
* \param dst Destination. Must be large enough to hold decompressed data.
* \param src Source to read from.
* \param nbytes_dst Length in bytes of output buffer.
* \param nbytes_src Length in bytes of input buffer.
* \param flags Flags, optional flags that can vary per operation.
* \param cb_fn Callback function which will be called when the request is complete.
* \param cb_arg Opaque value which will be passed back as the arg parameter in
* the completion callback.
*
* \return 0 on success, negative errno on failure.
*/
int spdk_accel_submit_decompress(struct spdk_io_channel *ch, void *dst, void *src,
uint64_t nbytes_dst, uint64_t nbytes_src, int flags,
spdk_accel_completion_cb cb_fn, void *cb_arg);
struct spdk_json_write_ctx; struct spdk_json_write_ctx;

View File

@ -74,9 +74,13 @@ struct spdk_accel_task {
uint32_t seed; uint32_t seed;
uint64_t fill_pattern; uint64_t fill_pattern;
}; };
union {
uint32_t *crc_dst; uint32_t *crc_dst;
uint32_t *output_size;
};
enum accel_opcode op_code; enum accel_opcode op_code;
uint64_t nbytes; uint64_t nbytes;
uint64_t nbytes_dst;
int flags; int flags;
int status; int status;
TAILQ_ENTRY(spdk_accel_task) link; TAILQ_ENTRY(spdk_accel_task) link;

View File

@ -64,6 +64,7 @@
#define TRACE_GROUP_ACCEL_DSA 0x9 #define TRACE_GROUP_ACCEL_DSA 0x9
#define TRACE_GROUP_THREAD 0xA #define TRACE_GROUP_THREAD 0xA
#define TRACE_GROUP_NVME_PCIE 0xB #define TRACE_GROUP_NVME_PCIE 0xB
#define TRACE_GROUP_ACCEL_IAA 0xC
/* Bdev tracepoint definitions */ /* Bdev tracepoint definitions */
#define TRACE_BDEV_IO_START SPDK_TPOINT_ID(TRACE_GROUP_BDEV, 0x0) #define TRACE_BDEV_IO_START SPDK_TPOINT_ID(TRACE_GROUP_BDEV, 0x0)
@ -168,5 +169,7 @@
/* idxd trace definitions */ /* idxd trace definitions */
#define TRACE_ACCEL_DSA_OP_SUBMIT SPDK_TPOINT_ID(TRACE_GROUP_ACCEL_DSA, 0x0) #define TRACE_ACCEL_DSA_OP_SUBMIT SPDK_TPOINT_ID(TRACE_GROUP_ACCEL_DSA, 0x0)
#define TRACE_ACCEL_DSA_OP_COMPLETE SPDK_TPOINT_ID(TRACE_GROUP_ACCEL_DSA, 0x1) #define TRACE_ACCEL_DSA_OP_COMPLETE SPDK_TPOINT_ID(TRACE_GROUP_ACCEL_DSA, 0x1)
#define TRACE_ACCEL_IAA_OP_SUBMIT SPDK_TPOINT_ID(TRACE_GROUP_ACCEL_IAA, 0x0)
#define TRACE_ACCEL_IAA_OP_COMPLETE SPDK_TPOINT_ID(TRACE_GROUP_ACCEL_IAA, 0x1)
#endif /* SPDK_INTERNAL_TRACE_DEFS */ #endif /* SPDK_INTERNAL_TRACE_DEFS */

View File

@ -414,6 +414,60 @@ spdk_accel_submit_copy_crc32cv(struct spdk_io_channel *ch, void *dst,
return engine->submit_tasks(engine_ch, accel_task); return engine->submit_tasks(engine_ch, accel_task);
} }
int
spdk_accel_submit_compress(struct spdk_io_channel *ch, void *dst, void *src, uint64_t nbytes_dst,
uint64_t nbytes_src, uint32_t *output_size, int flags,
spdk_accel_completion_cb cb_fn, void *cb_arg)
{
struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
struct spdk_accel_task *accel_task;
struct spdk_accel_engine *engine = g_engines_opc[ACCEL_OPC_COMPRESS];
struct spdk_io_channel *engine_ch = accel_ch->engine_ch[ACCEL_OPC_COMPRESS];
accel_task = _get_task(accel_ch, cb_fn, cb_arg);
if (accel_task == NULL) {
return -ENOMEM;
}
accel_task->output_size = output_size;
accel_task->src = src;
accel_task->dst = dst;
accel_task->nbytes = nbytes_src;
accel_task->nbytes_dst = nbytes_dst;
accel_task->flags = flags;
accel_task->op_code = ACCEL_OPC_COMPRESS;
return engine->submit_tasks(engine_ch, accel_task);
return 0;
}
int
spdk_accel_submit_decompress(struct spdk_io_channel *ch, void *dst, void *src, uint64_t nbytes_dst,
uint64_t nbytes_src, int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
{
struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
struct spdk_accel_task *accel_task;
struct spdk_accel_engine *engine = g_engines_opc[ACCEL_OPC_DECOMPRESS];
struct spdk_io_channel *engine_ch = accel_ch->engine_ch[ACCEL_OPC_DECOMPRESS];
accel_task = _get_task(accel_ch, cb_fn, cb_arg);
if (accel_task == NULL) {
return -ENOMEM;
}
accel_task->src = src;
accel_task->dst = dst;
accel_task->nbytes = nbytes_src;
accel_task->nbytes_dst = nbytes_dst;
accel_task->flags = flags;
accel_task->op_code = ACCEL_OPC_DECOMPRESS;
return engine->submit_tasks(engine_ch, accel_task);
return 0;
}
/* Helper function when when accel modules register with the framework. */ /* Helper function when when accel modules register with the framework. */
void spdk_accel_module_list_add(struct spdk_accel_module_if *accel_module) void spdk_accel_module_list_add(struct spdk_accel_module_if *accel_module)
{ {
@ -447,12 +501,15 @@ accel_engine_create_cb(void *io_device, void *ctx_buf)
/* Assign engines and get IO channels for each */ /* Assign engines and get IO channels for each */
for (i = 0; i < ACCEL_OPC_LAST; i++) { for (i = 0; i < ACCEL_OPC_LAST; i++) {
/* TODO this check goes away once SW implementation of comp/decomp is implemented */
if (g_engines_opc[i]) {
accel_ch->engine_ch[i] = g_engines_opc[i]->get_io_channel(); accel_ch->engine_ch[i] = g_engines_opc[i]->get_io_channel();
/* This can happen if idxd runs out of channels. */ /* This can happen if idxd runs out of channels. */
if (accel_ch->engine_ch[i] == NULL) { if (accel_ch->engine_ch[i] == NULL) {
goto err; goto err;
} }
} }
}
return 0; return 0;
err: err:
@ -470,6 +527,8 @@ accel_engine_destroy_cb(void *io_device, void *ctx_buf)
int i; int i;
for (i = 0; i < ACCEL_OPC_LAST; i++) { for (i = 0; i < ACCEL_OPC_LAST; i++) {
/* TODO this check goes away once SW implementation of comp/decomp is implemented,
* or it can be assert */
if (accel_ch->engine_ch[i]) { if (accel_ch->engine_ch[i]) {
spdk_put_io_channel(accel_ch->engine_ch[i]); spdk_put_io_channel(accel_ch->engine_ch[i]);
accel_ch->engine_ch[i] = NULL; accel_ch->engine_ch[i] = NULL;
@ -518,7 +577,9 @@ spdk_accel_engine_initialize(void)
} }
} }
#ifdef DEBUG #ifdef DEBUG
for (op = 0; op < ACCEL_OPC_LAST; op++) { /* TODO change ACCEL_OPC_LAST to ACCEL_OPC_LAST once SW implmentation of
* compress/decomp is done */
for (op = 0; op < ACCEL_OPC_COMPRESS; op++) {
assert(g_engines_opc[op] != NULL); assert(g_engines_opc[op] != NULL);
} }
#endif #endif

View File

@ -14,6 +14,8 @@
spdk_accel_submit_crc32cv; spdk_accel_submit_crc32cv;
spdk_accel_submit_copy_crc32c; spdk_accel_submit_copy_crc32c;
spdk_accel_submit_copy_crc32cv; spdk_accel_submit_copy_crc32cv;
spdk_accel_submit_compress;
spdk_accel_submit_decompress;
spdk_accel_write_config_json; spdk_accel_write_config_json;
# functions needed by modules # functions needed by modules

View File

@ -120,6 +120,7 @@ endif
# module/accel # module/accel
DEPDIRS-accel_ioat := log ioat thread jsonrpc rpc accel DEPDIRS-accel_ioat := log ioat thread jsonrpc rpc accel
DEPDIRS-accel_dsa := log idxd thread $(JSON_LIBS) accel trace DEPDIRS-accel_dsa := log idxd thread $(JSON_LIBS) accel trace
DEPDIRS-accel_iaa := log idxd thread $(JSON_LIBS) accel trace
# module/env_dpdk # module/env_dpdk
DEPDIRS-env_dpdk_rpc := log $(JSON_LIBS) DEPDIRS-env_dpdk_rpc := log $(JSON_LIBS)

View File

@ -118,7 +118,7 @@ endif
ACCEL_MODULES_LIST = accel_ioat ioat ACCEL_MODULES_LIST = accel_ioat ioat
ifeq ($(CONFIG_IDXD),y) ifeq ($(CONFIG_IDXD),y)
ACCEL_MODULES_LIST += accel_dsa idxd ACCEL_MODULES_LIST += accel_dsa accel_iaa idxd
endif endif
SCHEDULER_MODULES_LIST = scheduler_dynamic SCHEDULER_MODULES_LIST = scheduler_dynamic

View File

@ -37,6 +37,7 @@ include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
DIRS-y = ioat DIRS-y = ioat
DIRS-$(CONFIG_IDXD) += dsa DIRS-$(CONFIG_IDXD) += dsa
DIRS-$(CONFIG_IDXD) += iaa
.PHONY: all clean $(DIRS-y) .PHONY: all clean $(DIRS-y)

45
module/accel/iaa/Makefile Normal file
View File

@ -0,0 +1,45 @@
#
# BSD LICENSE
#
# Copyright (c) Intel Corporation.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
SO_VER := 1
SO_MINOR := 0
LIBNAME = accel_iaa
C_SRCS = accel_engine_iaa.c accel_engine_iaa_rpc.c
SPDK_MAP_FILE = $(SPDK_ROOT_DIR)/mk/spdk_blank.map
include $(SPDK_ROOT_DIR)/mk/spdk.lib.mk

View File

@ -0,0 +1,424 @@
/*-
* BSD LICENSE
*
* Copyright (c) Intel Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "accel_engine_iaa.h"
#include "spdk/stdinc.h"
#include "spdk_internal/accel_engine.h"
#include "spdk/log.h"
#include "spdk_internal/idxd.h"
#include "spdk/env.h"
#include "spdk/event.h"
#include "spdk/thread.h"
#include "spdk/idxd.h"
#include "spdk/util.h"
#include "spdk/json.h"
#include "spdk/trace.h"
#include "spdk_internal/trace_defs.h"
static bool g_iaa_enable = false;
enum channel_state {
IDXD_CHANNEL_ACTIVE,
IDXD_CHANNEL_ERROR,
};
static bool g_iaa_initialized = false;
struct idxd_device {
struct spdk_idxd_device *iaa;
TAILQ_ENTRY(idxd_device) tailq;
};
static TAILQ_HEAD(, idxd_device) g_iaa_devices = TAILQ_HEAD_INITIALIZER(g_iaa_devices);
static struct idxd_device *g_next_dev = NULL;
static uint32_t g_num_devices = 0;
static pthread_mutex_t g_dev_lock = PTHREAD_MUTEX_INITIALIZER;
struct idxd_io_channel {
struct spdk_idxd_io_channel *chan;
struct idxd_device *dev;
enum channel_state state;
struct spdk_poller *poller;
uint32_t num_outstanding;
TAILQ_HEAD(, spdk_accel_task) queued_tasks;
};
static struct spdk_io_channel *iaa_get_io_channel(void);
static struct idxd_device *
idxd_select_device(struct idxd_io_channel *chan)
{
uint32_t count = 0;
struct idxd_device *dev;
uint32_t socket_id = spdk_env_get_socket_id(spdk_env_get_current_core());
/*
* We allow channels to share underlying devices,
* selection is round-robin based with a limitation
* on how many channel can share one device.
*/
do {
/* select next device */
pthread_mutex_lock(&g_dev_lock);
g_next_dev = TAILQ_NEXT(g_next_dev, tailq);
if (g_next_dev == NULL) {
g_next_dev = TAILQ_FIRST(&g_iaa_devices);
}
dev = g_next_dev;
pthread_mutex_unlock(&g_dev_lock);
if (socket_id != spdk_idxd_get_socket(dev->iaa)) {
continue;
}
/*
* Now see if a channel is available on this one. We only
* allow a specific number of channels to share a device
* to limit outstanding IO for flow control purposes.
*/
chan->chan = spdk_idxd_get_channel(dev->iaa);
if (chan->chan != NULL) {
SPDK_DEBUGLOG(accel_iaa, "On socket %d using device on socket %d\n",
socket_id, spdk_idxd_get_socket(dev->iaa));
return dev;
}
} while (count++ < g_num_devices);
/* We are out of available channels and/or devices for the local socket. We fix the number
* of channels that we allocate per device and only allocate devices on the same socket
* that the current thread is on. If on a 2 socket system it may be possible to avoid
* this situation by spreading threads across the sockets.
*/
SPDK_ERRLOG("No more IAA devices available on the local socket.\n");
return NULL;
}
static void
iaa_done(void *cb_arg, int status)
{
struct spdk_accel_task *accel_task = cb_arg;
struct idxd_io_channel *chan;
chan = spdk_io_channel_get_ctx(accel_task->accel_ch->engine_ch[accel_task->op_code]);
assert(chan->num_outstanding > 0);
spdk_trace_record(TRACE_ACCEL_IAA_OP_COMPLETE, 0, 0, 0, chan->num_outstanding - 1);
chan->num_outstanding--;
spdk_accel_task_complete(accel_task, status);
}
static int
_process_single_task(struct spdk_io_channel *ch, struct spdk_accel_task *task)
{
struct idxd_io_channel *chan = spdk_io_channel_get_ctx(ch);
int rc = 0;
struct iovec siov = {};
struct iovec diov = {};
int flags = 0;
switch (task->op_code) {
case ACCEL_OPC_COMPRESS:
siov.iov_base = task->src;
siov.iov_len = task->nbytes;
diov.iov_base = task->dst;
diov.iov_len = task->nbytes_dst;
rc = spdk_idxd_submit_compress(chan->chan, &diov, 1, &siov, 1, task->output_size,
flags, iaa_done, task);
break;
case ACCEL_OPC_DECOMPRESS:
siov.iov_base = task->src;
siov.iov_len = task->nbytes;
diov.iov_base = task->dst;
diov.iov_len = task->nbytes_dst;
rc = spdk_idxd_submit_decompress(chan->chan, &diov, 1, &siov, 1, flags, iaa_done, task);
break;
default:
assert(false);
rc = -EINVAL;
break;
}
if (rc == 0) {
chan->num_outstanding++;
spdk_trace_record(TRACE_ACCEL_IAA_OP_SUBMIT, 0, 0, 0, chan->num_outstanding);
}
return rc;
}
static int
iaa_submit_tasks(struct spdk_io_channel *ch, struct spdk_accel_task *first_task)
{
struct idxd_io_channel *chan = spdk_io_channel_get_ctx(ch);
struct spdk_accel_task *task, *tmp;
int rc = 0;
task = first_task;
if (chan->state == IDXD_CHANNEL_ERROR) {
while (task) {
tmp = TAILQ_NEXT(task, link);
spdk_accel_task_complete(task, -EINVAL);
task = tmp;
}
return 0;
}
if (!TAILQ_EMPTY(&chan->queued_tasks)) {
goto queue_tasks;
}
/* The caller will either submit a single task or a group of tasks that are
* linked together but they cannot be on a list. For example, see idxd_poll()
* where a list of queued tasks is being resubmitted, the list they are on
* is initialized after saving off the first task from the list which is then
* passed in here. Similar thing is done in the accel framework.
*/
while (task) {
tmp = TAILQ_NEXT(task, link);
rc = _process_single_task(ch, task);
if (rc == -EBUSY) {
goto queue_tasks;
} else if (rc) {
spdk_accel_task_complete(task, rc);
}
task = tmp;
}
return 0;
queue_tasks:
while (task != NULL) {
tmp = TAILQ_NEXT(task, link);
TAILQ_INSERT_TAIL(&chan->queued_tasks, task, link);
task = tmp;
}
return 0;
}
static int
idxd_poll(void *arg)
{
struct idxd_io_channel *chan = arg;
struct spdk_accel_task *task = NULL;
int count;
count = spdk_idxd_process_events(chan->chan);
/* Check if there are any pending ops to process if the channel is active */
if (chan->state == IDXD_CHANNEL_ACTIVE) {
/* Submit queued tasks */
if (!TAILQ_EMPTY(&chan->queued_tasks)) {
task = TAILQ_FIRST(&chan->queued_tasks);
TAILQ_INIT(&chan->queued_tasks);
iaa_submit_tasks(task->accel_ch->engine_ch[task->op_code], task);
}
}
return count > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
}
static size_t
accel_engine_iaa_get_ctx_size(void)
{
return 0;
}
static bool
iaa_supports_opcode(enum accel_opcode opc)
{
switch (opc) {
case ACCEL_OPC_COMPRESS:
case ACCEL_OPC_DECOMPRESS:
return true;
default:
return false;
}
}
static struct spdk_accel_engine iaa_accel_engine = {
.name = "iaa",
.supports_opcode = iaa_supports_opcode,
.get_io_channel = iaa_get_io_channel,
.submit_tasks = iaa_submit_tasks,
};
static int
idxd_create_cb(void *io_device, void *ctx_buf)
{
struct idxd_io_channel *chan = ctx_buf;
struct idxd_device *iaa;
iaa = idxd_select_device(chan);
if (iaa == NULL) {
SPDK_ERRLOG("Failed to get an idxd channel\n");
return -EINVAL;
}
chan->dev = iaa;
chan->poller = SPDK_POLLER_REGISTER(idxd_poll, chan, 0);
TAILQ_INIT(&chan->queued_tasks);
chan->num_outstanding = 0;
chan->state = IDXD_CHANNEL_ACTIVE;
return 0;
}
static void
idxd_destroy_cb(void *io_device, void *ctx_buf)
{
struct idxd_io_channel *chan = ctx_buf;
spdk_poller_unregister(&chan->poller);
spdk_idxd_put_channel(chan->chan);
}
static struct spdk_io_channel *
iaa_get_io_channel(void)
{
return spdk_get_io_channel(&iaa_accel_engine);
}
static void
attach_cb(void *cb_ctx, struct spdk_idxd_device *iaa)
{
struct idxd_device *dev;
dev = calloc(1, sizeof(*dev));
if (dev == NULL) {
SPDK_ERRLOG("Failed to allocate device struct\n");
return;
}
dev->iaa = iaa;
if (g_next_dev == NULL) {
g_next_dev = dev;
}
TAILQ_INSERT_TAIL(&g_iaa_devices, dev, tailq);
g_num_devices++;
}
void
accel_engine_iaa_enable_probe(void)
{
g_iaa_enable = true;
/* TODO initially only support user mode w/IAA */
spdk_idxd_set_config(false);
}
static bool
caller_probe_cb(void *cb_ctx, struct spdk_pci_device *dev)
{
if (dev->id.device_id == PCI_DEVICE_ID_INTEL_IAA) {
return true;
}
return false;
}
static int
iaccel_engine_iaa_init(void)
{
if (!g_iaa_enable) {
return -EINVAL;
}
if (spdk_idxd_probe(NULL, attach_cb, caller_probe_cb) != 0) {
SPDK_ERRLOG("spdk_idxd_probe() failed\n");
return -EINVAL;
}
if (TAILQ_EMPTY(&g_iaa_devices)) {
SPDK_NOTICELOG("no available idxd devices\n");
return -EINVAL;
}
g_iaa_initialized = true;
SPDK_NOTICELOG("Accel framework IAA engine initialized.\n");
spdk_accel_engine_register(&iaa_accel_engine);
spdk_io_device_register(&iaa_accel_engine, idxd_create_cb, idxd_destroy_cb,
sizeof(struct idxd_io_channel), "iaa_accel_engine");
return 0;
}
static void
accel_engine_iaa_exit(void *ctx)
{
struct idxd_device *dev;
if (g_iaa_initialized) {
spdk_io_device_unregister(&iaa_accel_engine, NULL);
}
while (!TAILQ_EMPTY(&g_iaa_devices)) {
dev = TAILQ_FIRST(&g_iaa_devices);
TAILQ_REMOVE(&g_iaa_devices, dev, tailq);
spdk_idxd_detach(dev->iaa);
free(dev);
}
spdk_accel_engine_module_finish();
}
static void
accel_engine_iaa_write_config_json(struct spdk_json_write_ctx *w)
{
if (g_iaa_enable) {
spdk_json_write_object_begin(w);
spdk_json_write_named_string(w, "method", "iaa_scan_accel_engine");
spdk_json_write_object_end(w);
spdk_json_write_object_end(w);
}
}
SPDK_TRACE_REGISTER_FN(iaa_trace, "iaa", TRACE_GROUP_ACCEL_IAA)
{
spdk_trace_register_description("IAA_OP_SUBMIT", TRACE_ACCEL_IAA_OP_SUBMIT, OWNER_NONE, OBJECT_NONE,
0, SPDK_TRACE_ARG_TYPE_INT, "count");
spdk_trace_register_description("IAA_OP_COMPLETE", TRACE_ACCEL_IAA_OP_COMPLETE, OWNER_NONE,
OBJECT_NONE, 0, SPDK_TRACE_ARG_TYPE_INT, "count");
}
SPDK_ACCEL_MODULE_REGISTER(iaccel_engine_iaa_init, accel_engine_iaa_exit,
accel_engine_iaa_write_config_json,
accel_engine_iaa_get_ctx_size)
SPDK_LOG_REGISTER_COMPONENT(accel_iaa)

View File

@ -0,0 +1,41 @@
/*-
* BSD LICENSE
*
* Copyright (c) Intel Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SPDK_ACCEL_ENGINE_IAA_H
#define SPDK_ACCEL_ENGINE_IAA_H
#include "spdk/stdinc.h"
void accel_engine_iaa_enable_probe(void);
#endif /* SPDK_ACCEL_ENGINE_IAA_H */

View File

@ -0,0 +1,56 @@
/*-
* BSD LICENSE
*
* Copyright (c) Intel Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "accel_engine_iaa.h"
#include "spdk/rpc.h"
#include "spdk/util.h"
#include "spdk/event.h"
#include "spdk/stdinc.h"
#include "spdk/env.h"
static void
rpc_iaa_scan_accel_engine(struct spdk_jsonrpc_request *request,
const struct spdk_json_val *params)
{
if (params != NULL) {
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS,
"iaa_scan_accel_engine requires no parameters");
return;
}
SPDK_NOTICELOG("Enabling IAA user-mode\n");
accel_engine_iaa_enable_probe();
spdk_jsonrpc_send_bool_response(request, true);
}
SPDK_RPC_REGISTER("iaa_scan_accel_engine", rpc_iaa_scan_accel_engine, SPDK_RPC_STARTUP)

View File

@ -9,6 +9,7 @@ from . import bdev
from . import blobfs from . import blobfs
from . import env_dpdk from . import env_dpdk
from . import dsa from . import dsa
from . import iaa
from . import ioat from . import ioat
from . import iscsi from . import iscsi
from . import log from . import log

4
python/spdk/rpc/iaa.py Normal file
View File

@ -0,0 +1,4 @@
def iaa_scan_accel_engine(client):
"""Scan and enable IAA accel engine.
"""
return client.call('iaa_scan_accel_engine')

View File

@ -2609,6 +2609,14 @@ Format: 'user:u1 secret:s1 muser:mu1 msecret:ms1,user:u2 secret:s2 muser:mu2 mse
action='store_true', dest='config_kernel_mode') action='store_true', dest='config_kernel_mode')
p.set_defaults(func=dsa_scan_accel_engine, config_kernel_mode=None) p.set_defaults(func=dsa_scan_accel_engine, config_kernel_mode=None)
# iaa
def iaa_scan_accel_engine(args):
rpc.iaa.iaa_scan_accel_engine(args.client)
p = subparsers.add_parser('iaa_scan_accel_engine',
help='Set config and enable iaa accel engine offload.')
p.set_defaults(func=iaa_scan_accel_engine)
# opal # opal
def bdev_nvme_opal_init(args): def bdev_nvme_opal_init(args):
rpc.nvme.bdev_nvme_opal_init(args.client, rpc.nvme.bdev_nvme_opal_init(args.client,