ioat: add user-mode Intel I/OAT driver

The ioat driver supports DMA engine copy offload hardware available on
Intel Xeon platforms.

Change-Id: Ida0b17b25816576948ddb1b0443587e0f09574d4
Signed-off-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
Daniel Verkamp 2015-12-03 14:30:38 -07:00
parent 8925d9dec0
commit d4ab30ba33
32 changed files with 5256 additions and 9 deletions

4
CONFIG
View File

@ -45,3 +45,7 @@ CONFIG_DPDK_DIR?=/path/to/dpdk
# Header file to use for NVMe implementation specific functions. # Header file to use for NVMe implementation specific functions.
# Defaults to depending on DPDK. # Defaults to depending on DPDK.
CONFIG_NVME_IMPL?=nvme_impl.h CONFIG_NVME_IMPL?=nvme_impl.h
# Header file to use for IOAT implementation specific functions.
# Defaults to depending on DPDK.
CONFIG_IOAT_IMPL?=ioat_impl.h

View File

@ -82,8 +82,8 @@ Hugepages and Device Binding
============================ ============================
Before running an SPDK application, some hugepages must be allocated and Before running an SPDK application, some hugepages must be allocated and
any NVMe devices must be unbound from the native NVMe kernel driver. any NVMe and I/OAT devices must be unbound from the native kernel drivers.
SPDK includes scripts to automate this process on both Linux and FreeBSD. SPDK includes scripts to automate this process on both Linux and FreeBSD.
1) scripts/configure_hugepages.sh 1) scripts/configure_hugepages.sh
2) scripts/unbind_nvme.sh 2) scripts/unbind.sh

View File

@ -38,7 +38,7 @@ timing_enter afterboot
./scripts/configure_hugepages.sh 1024 ./scripts/configure_hugepages.sh 1024
timing_exit afterboot timing_exit afterboot
./scripts/unbind_nvme.sh ./scripts/unbind.sh
##################### #####################
# Unit Tests # Unit Tests
@ -48,6 +48,7 @@ timing_enter lib
time test/lib/nvme/nvme.sh time test/lib/nvme/nvme.sh
time test/lib/memory/memory.sh time test/lib/memory/memory.sh
time test/lib/ioat/ioat.sh
timing_exit lib timing_exit lib

2367
doc/Doxyfile.ioat Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
DOXYFILES = Doxyfile.nvme DOXYFILES = Doxyfile.ioat Doxyfile.nvme
OUTPUT_DIRS = $(patsubst Doxyfile.%,output.%,$(DOXYFILES)) OUTPUT_DIRS = $(patsubst Doxyfile.%,output.%,$(DOXYFILES))
all: doc all: doc

48
doc/ioat.index.txt Normal file
View File

@ -0,0 +1,48 @@
/*
* BSD LICENSE
*
* Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*!
\mainpage SPDK Userspace I/OAT Driver
\section interface Public Interface
- ioat.h
\section key_functions Key Functions
- ioat_attach() \copybrief ioat_attach()
- ioat_register_thread() \copybrief ioat_register_thread()
- ioat_submit_copy() \copybrief ioat_submit_copy()
*/

View File

@ -34,7 +34,7 @@
SPDK_ROOT_DIR := $(CURDIR)/.. SPDK_ROOT_DIR := $(CURDIR)/..
include $(SPDK_ROOT_DIR)/mk/spdk.common.mk include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
DIRS-y += nvme DIRS-y += ioat nvme
.PHONY: all clean $(DIRS-y) .PHONY: all clean $(DIRS-y)

44
examples/ioat/Makefile Normal file
View File

@ -0,0 +1,44 @@
#
# BSD LICENSE
#
# Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
SPDK_ROOT_DIR := $(CURDIR)/../..
include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
DIRS-y += perf verify
.PHONY: all clean $(DIRS-y)
all: $(DIRS-y)
clean: $(DIRS-y)
include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk

1
examples/ioat/perf/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
perf

View File

@ -0,0 +1,57 @@
#
# BSD LICENSE
#
# Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
SPDK_ROOT_DIR := $(CURDIR)/../../..
include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
APP = perf
C_SRCS := perf.c
CFLAGS += -I. $(DPDK_INC)
SPDK_LIBS += $(SPDK_ROOT_DIR)/lib/ioat/libspdk_ioat.a \
$(SPDK_ROOT_DIR)/lib/util/libspdk_util.a \
$(SPDK_ROOT_DIR)/lib/memory/libspdk_memory.a
LIBS += $(SPDK_LIBS) -lpciaccess -lpthread $(DPDK_LIB) -lrt
all: $(APP)
$(APP): $(OBJS) $(SPDK_LIBS)
$(LINK_C)
clean:
$(CLEAN_C) $(APP)
include $(SPDK_ROOT_DIR)/mk/spdk.deps.mk

435
examples/ioat/perf/perf.c Normal file
View File

@ -0,0 +1,435 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <rte_config.h>
#include <rte_malloc.h>
#include <rte_eal.h>
#include <rte_lcore.h>
#include <rte_cycles.h>
#include <rte_mempool.h>
#include <pciaccess.h>
#include "spdk/ioat.h"
#include "spdk/pci.h"
#include "spdk/string.h"
struct user_config {
int xfer_size_bytes;
int queue_depth;
int time_in_sec;
bool verify;
char *core_mask;
};
struct ioat_device {
struct ioat_channel *ioat;
TAILQ_ENTRY(ioat_device) tailq;
};
static TAILQ_HEAD(, ioat_device) g_devices;
static struct user_config g_user_config;
struct thread_entry {
uint64_t xfer_completed;
uint64_t xfer_failed;
uint64_t current_queue_depth;
unsigned lcore_id;
bool is_draining;
struct rte_mempool *data_pool;
struct rte_mempool *task_pool;
};
struct ioat_task {
struct thread_entry *thread_entry;
void *src;
void *dst;
};
static void submit_single_xfer(struct thread_entry *thread_entry, struct ioat_task *ioat_task,
void *dst, void *src);
static void
construct_user_config(struct user_config *self)
{
self->xfer_size_bytes = 4096;
self->queue_depth = 256;
self->time_in_sec = 10;
self->verify = false;
self->core_mask = "0x1";
}
static void
dump_user_config(struct user_config *self)
{
printf("User configuration:\n");
printf("Transfer size: %u bytes\n", self->xfer_size_bytes);
printf("Queue depth: %u\n", self->queue_depth);
printf("Run time: %u seconds\n", self->time_in_sec);
printf("Core mask: %s\n", self->core_mask);
printf("Verify: %s\n\n", self->verify ? "Yes" : "No");
}
static void
ioat_exit(void)
{
struct ioat_device *dev;
while (!TAILQ_EMPTY(&g_devices)) {
dev = TAILQ_FIRST(&g_devices);
TAILQ_REMOVE(&g_devices, dev, tailq);
ioat_detach(dev->ioat);
rte_free(dev);
}
}
static void
ioat_done(void *cb_arg)
{
struct ioat_task *ioat_task = (struct ioat_task *)cb_arg;
struct thread_entry *thread_entry = ioat_task->thread_entry;
if (g_user_config.verify && memcmp(ioat_task->src, ioat_task->dst, g_user_config.xfer_size_bytes)) {
thread_entry->xfer_failed++;
} else {
thread_entry->xfer_completed++;
}
thread_entry->current_queue_depth--;
if (thread_entry->is_draining) {
rte_mempool_put(thread_entry->data_pool, ioat_task->src);
rte_mempool_put(thread_entry->data_pool, ioat_task->dst);
rte_mempool_put(thread_entry->task_pool, ioat_task);
} else {
submit_single_xfer(thread_entry, ioat_task, ioat_task->dst, ioat_task->src);
}
}
static int
ioat_init(void)
{
struct pci_device_iterator *iter;
struct pci_device *pci_dev;
int err = 0;
struct pci_id_match match;
struct ioat_device *dev;
pci_system_init();
TAILQ_INIT(&g_devices);
match.vendor_id = PCI_MATCH_ANY;
match.subvendor_id = PCI_MATCH_ANY;
match.subdevice_id = PCI_MATCH_ANY;
match.device_id = PCI_MATCH_ANY;
match.device_class = 0x088000;
match.device_class_mask = 0xFFFFFF;
iter = pci_id_match_iterator_create(&match);
while ((pci_dev = pci_device_next(iter)) != NULL) {
/* Check if the PCI devices is a supported IOAT channel. */
if (!(ioat_pci_device_match_id(pci_dev->vendor_id,
pci_dev->device_id))) {
continue;
}
printf(" Found matching device at %d:%d:%d "
"vendor:0x%04x device:0x%04x\n name:%s\n",
pci_dev->bus, pci_dev->dev, pci_dev->func,
pci_dev->vendor_id, pci_dev->device_id,
pci_device_get_device_name(pci_dev));
if (pci_device_has_non_null_driver(pci_dev)) {
printf("Device has kernel driver, skipping...\n");
continue;
}
pci_device_probe(pci_dev);
dev = rte_malloc(NULL, sizeof(*dev), 0);
if (dev == NULL) {
printf("Failed to allocate device struct\n");
err = -1;
goto cleanup;
}
dev->ioat = ioat_attach(pci_dev);
if (dev->ioat == NULL) {
rte_free(dev);
/* Likely no device found. */
err = -1;
goto cleanup;
}
TAILQ_INSERT_TAIL(&g_devices, dev, tailq);
}
cleanup:
pci_iterator_destroy(iter);
if (err != 0) {
ioat_exit();
}
return 0;
}
static void
usage(char *program_name)
{
printf("%s options\n", program_name);
printf("\t[-h help message]\n");
printf("\t[-c core mask for distributing I/O submission/completion work]\n");
printf("\t[-q queue depth]\n");
printf("\t[-s transfer size in bytes]\n");
printf("\t[-t time in seconds]\n");
printf("\t[-v verify copy result if this switch is on]\n");
}
static int
parse_args(int argc, char **argv)
{
int op;
construct_user_config(&g_user_config);
while ((op = getopt(argc, argv, "c:hq:s:t:v")) != -1) {
switch (op) {
case 's':
g_user_config.xfer_size_bytes = atoi(optarg);
break;
case 'q':
g_user_config.queue_depth = atoi(optarg);
break;
case 't':
g_user_config.time_in_sec = atoi(optarg);
break;
case 'c':
g_user_config.core_mask = optarg;
break;
case 'v':
g_user_config.verify = true;
break;
case 'h':
usage(argv[0]);
exit(0);
default:
usage(argv[0]);
return 1;
}
}
if (!g_user_config.xfer_size_bytes || !g_user_config.queue_depth ||
!g_user_config.time_in_sec || !g_user_config.core_mask) {
usage(argv[0]);
return 1;
}
optind = 1;
return 0;
}
static void
drain_io(struct thread_entry *thread_entry)
{
while (thread_entry->current_queue_depth > 0) {
ioat_process_events();
}
}
static void
submit_single_xfer(struct thread_entry *thread_entry, struct ioat_task *ioat_task, void *dst,
void *src)
{
ioat_task->thread_entry = thread_entry;
ioat_task->src = src;
ioat_task->dst = dst;
ioat_submit_copy(ioat_task, ioat_done, dst, src, g_user_config.xfer_size_bytes);
thread_entry->current_queue_depth++;
}
static void
submit_xfers(struct thread_entry *thread_entry, uint64_t queue_depth)
{
while (queue_depth-- > 0) {
void *src = NULL, *dst = NULL;
struct ioat_task *ioat_task = NULL;
rte_mempool_get(thread_entry->data_pool, &src);
rte_mempool_get(thread_entry->data_pool, &dst);
rte_mempool_get(thread_entry->task_pool, (void **)&ioat_task);
submit_single_xfer(thread_entry, ioat_task, dst, src);
}
}
static int
work_fn(void *arg)
{
char buf_pool_name[20], task_pool_name[20];
uint64_t tsc_end;
struct thread_entry *t = (struct thread_entry *)arg;
t->lcore_id = rte_lcore_id();
snprintf(buf_pool_name, sizeof(buf_pool_name), "buf_pool_%d", rte_lcore_id());
snprintf(task_pool_name, sizeof(task_pool_name), "task_pool_%d", rte_lcore_id());
t->data_pool = rte_mempool_create(buf_pool_name, 512, g_user_config.xfer_size_bytes, 0, 0, NULL,
NULL,
NULL, NULL, SOCKET_ID_ANY, 0);
t->task_pool = rte_mempool_create(task_pool_name, 512, sizeof(struct ioat_task), 0, 0, NULL, NULL,
NULL, NULL, SOCKET_ID_ANY, 0);
if (!t->data_pool || !t->task_pool) {
fprintf(stderr, "Could not allocate buffer pool.\n");
return 1;
}
if (ioat_register_thread() != 0) {
fprintf(stderr, "lcore %u: No ioat channels found. Check that ioatdma driver is unloaded.\n",
rte_lcore_id());
return 0;
}
tsc_end = rte_get_timer_cycles() + g_user_config.time_in_sec * rte_get_timer_hz();
// begin to submit transfers
submit_xfers(t, g_user_config.queue_depth);
while (rte_get_timer_cycles() < tsc_end) {
ioat_process_events();
}
// begin to drain io
t->is_draining = true;
drain_io(t);
ioat_unregister_thread();
return 0;
}
static int
init(void)
{
char *core_mask_conf;
core_mask_conf = sprintf_alloc("-c %s", g_user_config.core_mask);
if (!core_mask_conf) {
return 1;
}
char *ealargs[] = {"perf", core_mask_conf, "-n 4", "--no-pci"};
if (rte_eal_init(sizeof(ealargs) / sizeof(ealargs[0]), ealargs) < 0) {
free(core_mask_conf);
fprintf(stderr, "Could not init eal\n");
return 1;
}
free(core_mask_conf);
if (ioat_init() != 0) {
fprintf(stderr, "Could not init ioat\n");
return 1;
}
return 0;
}
static int
dump_result(struct thread_entry *threads, int len)
{
int i;
uint64_t total_completed = 0;
uint64_t total_failed = 0;
uint64_t total_xfer_per_sec, total_bw_in_MBps;
printf("lcore Transfers Bandwidth Failed\n");
printf("--------------------------------------------\n");
for (i = 0; i < len; i++) {
struct thread_entry *t = &threads[i];
uint64_t xfer_per_sec = t->xfer_completed / g_user_config.time_in_sec;
uint64_t bw_in_MBps = (t->xfer_completed * g_user_config.xfer_size_bytes) /
(g_user_config.time_in_sec * 1024 * 1024);
total_completed += t->xfer_completed;
total_failed += t->xfer_failed;
if (xfer_per_sec) {
printf("%5d %10" PRIu64 "/s %10" PRIu64 " MB/s %6" PRIu64 "\n",
t->lcore_id, xfer_per_sec, bw_in_MBps, t->xfer_failed);
}
}
total_xfer_per_sec = total_completed / g_user_config.time_in_sec;
total_bw_in_MBps = (total_completed * g_user_config.xfer_size_bytes) /
(g_user_config.time_in_sec * 1024 * 1024);
printf("============================================\n");
printf("Total: %10" PRIu64 "/s %10" PRIu64 " MB/s %6" PRIu64 "\n",
total_xfer_per_sec, total_bw_in_MBps, total_failed);
return total_failed ? 1 : 0;
}
int
main(int argc, char **argv)
{
unsigned lcore_id;
struct thread_entry threads[RTE_MAX_LCORE] = {};
if (parse_args(argc, argv) != 0) {
return 1;
}
if (init() != 0) {
return 1;
}
dump_user_config(&g_user_config);
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
rte_eal_remote_launch(work_fn, &threads[lcore_id], lcore_id);
}
if (work_fn(&threads[rte_get_master_lcore()]) != 0) {
return 1;
}
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) != 0) {
return 1;
}
}
return dump_result(threads, RTE_MAX_LCORE);
}

1
examples/ioat/verify/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
verify

View File

@ -0,0 +1,57 @@
#
# BSD LICENSE
#
# Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
SPDK_ROOT_DIR := $(CURDIR)/../../..
include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
APP = verify
C_SRCS := verify.c
CFLAGS += -I. $(DPDK_INC)
SPDK_LIBS += $(SPDK_ROOT_DIR)/lib/ioat/libspdk_ioat.a \
$(SPDK_ROOT_DIR)/lib/util/libspdk_util.a \
$(SPDK_ROOT_DIR)/lib/memory/libspdk_memory.a
LIBS += $(SPDK_LIBS) -lpciaccess -lpthread $(DPDK_LIB) -lrt
all: $(APP)
$(APP): $(OBJS) $(SPDK_LIBS)
$(LINK_C)
clean:
$(CLEAN_C) $(APP)
include $(SPDK_ROOT_DIR)/mk/spdk.deps.mk

View File

@ -0,0 +1,439 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <rte_config.h>
#include <rte_lcore.h>
#include <rte_malloc.h>
#include <rte_eal.h>
#include <rte_cycles.h>
#include <rte_mempool.h>
#include <pciaccess.h>
#include "spdk/ioat.h"
#include "spdk/pci.h"
#include "spdk/string.h"
#define SRC_BUFFER_SIZE (512*1024)
struct user_config {
int queue_depth;
int time_in_sec;
char *core_mask;
};
struct ioat_device {
struct ioat_channel *ioat;
TAILQ_ENTRY(ioat_device) tailq;
};
static TAILQ_HEAD(, ioat_device) g_devices;
static struct user_config g_user_config;
struct thread_entry {
uint64_t xfer_completed;
uint64_t xfer_failed;
uint64_t current_queue_depth;
unsigned lcore_id;
bool is_draining;
struct rte_mempool *data_pool;
struct rte_mempool *task_pool;
};
struct ioat_task {
struct thread_entry *thread_entry;
void *buffer;
int len;
void *src;
void *dst;
};
static __thread unsigned int seed = 0;
static unsigned char *g_src;
static void submit_single_xfer(struct ioat_task *ioat_task);
static void
construct_user_config(struct user_config *self)
{
self->queue_depth = 32;
self->time_in_sec = 10;
self->core_mask = "0x1";
}
static void
dump_user_config(struct user_config *self)
{
printf("User configuration:\n");
printf("Run time: %u seconds\n", self->time_in_sec);
printf("Core mask: %s\n", self->core_mask);
printf("Queue depth: %u\n", self->queue_depth);
}
static void
ioat_exit(void)
{
struct ioat_device *dev;
while (!TAILQ_EMPTY(&g_devices)) {
dev = TAILQ_FIRST(&g_devices);
TAILQ_REMOVE(&g_devices, dev, tailq);
ioat_detach(dev->ioat);
rte_free(dev);
}
}
static void prepare_ioat_task(struct thread_entry *thread_entry, struct ioat_task *ioat_task)
{
int len;
int src_offset;
int dst_offset;
src_offset = rand_r(&seed) % SRC_BUFFER_SIZE;
len = rand_r(&seed) % (SRC_BUFFER_SIZE - src_offset);
dst_offset = rand_r(&seed) % (SRC_BUFFER_SIZE - len);
memset(ioat_task->buffer, 0, SRC_BUFFER_SIZE);
ioat_task->len = len;
ioat_task->src = g_src + src_offset;
ioat_task->dst = ioat_task->buffer + dst_offset;
ioat_task->thread_entry = thread_entry;
}
static void
ioat_done(void *cb_arg)
{
struct ioat_task *ioat_task = (struct ioat_task *)cb_arg;
struct thread_entry *thread_entry = ioat_task->thread_entry;
if (memcmp(ioat_task->src, ioat_task->dst, ioat_task->len)) {
thread_entry->xfer_failed++;
} else {
thread_entry->xfer_completed++;
}
thread_entry->current_queue_depth--;
if (thread_entry->is_draining) {
rte_mempool_put(thread_entry->data_pool, ioat_task->buffer);
rte_mempool_put(thread_entry->task_pool, ioat_task);
} else {
prepare_ioat_task(thread_entry, ioat_task);
submit_single_xfer(ioat_task);
}
}
static int
ioat_init(void)
{
struct pci_device_iterator *iter;
struct pci_device *pci_dev;
int err = 0;
struct pci_id_match match;
struct ioat_device *dev;
pci_system_init();
TAILQ_INIT(&g_devices);
match.vendor_id = PCI_MATCH_ANY;
match.subvendor_id = PCI_MATCH_ANY;
match.subdevice_id = PCI_MATCH_ANY;
match.device_id = PCI_MATCH_ANY;
match.device_class = 0x088000;
match.device_class_mask = 0xFFFFFF;
iter = pci_id_match_iterator_create(&match);
while ((pci_dev = pci_device_next(iter)) != NULL) {
/* Check if the PCI devices is a supported IOAT channel. */
if (!(ioat_pci_device_match_id(pci_dev->vendor_id,
pci_dev->device_id))) {
continue;
}
printf(" Found matching device at %d:%d:%d "
"vendor:0x%04x device:0x%04x\n name:%s\n",
pci_dev->bus, pci_dev->dev, pci_dev->func,
pci_dev->vendor_id, pci_dev->device_id,
pci_device_get_device_name(pci_dev));
if (pci_device_has_non_null_driver(pci_dev)) {
printf("Device has kernel driver, skipping...\n");
continue;
}
pci_device_probe(pci_dev);
dev = malloc(sizeof(*dev));
if (dev == NULL) {
printf("Failed to allocate device struct\n");
err = -1;
goto cleanup;
}
memset(dev, 0, sizeof(*dev));
dev->ioat = ioat_attach(pci_dev);
if (dev->ioat == NULL) {
free(dev);
/* Likely no device found. */
err = -1;
goto cleanup;
}
TAILQ_INSERT_TAIL(&g_devices, dev, tailq);
}
cleanup:
pci_iterator_destroy(iter);
if (err != 0) {
ioat_exit();
}
return 0;
}
static void
usage(char *program_name)
{
printf("%s options\n", program_name);
printf("\t[-h help message]\n");
printf("\t[-c core mask for distributing I/O submission/completion work]\n");
printf("\t[-t time in seconds]\n");
printf("\t[-q queue depth]\n");
}
static int
parse_args(int argc, char **argv)
{
int op;
construct_user_config(&g_user_config);
while ((op = getopt(argc, argv, "c:ht:q:")) != -1) {
switch (op) {
case 't':
g_user_config.time_in_sec = atoi(optarg);
break;
case 'c':
g_user_config.core_mask = optarg;
break;
case 'q':
g_user_config.queue_depth = atoi(optarg);
break;
case 'h':
usage(argv[0]);
exit(0);
default:
usage(argv[0]);
return 1;
}
}
if (!g_user_config.time_in_sec || !g_user_config.core_mask || !g_user_config.queue_depth) {
usage(argv[0]);
return 1;
}
optind = 1;
return 0;
}
static void
drain_xfers(struct thread_entry *thread_entry)
{
while (thread_entry->current_queue_depth > 0) {
ioat_process_events();
}
}
static void
submit_single_xfer(struct ioat_task *ioat_task)
{
ioat_submit_copy(ioat_task, ioat_done, ioat_task->dst, ioat_task->src, ioat_task->len);
ioat_task->thread_entry->current_queue_depth++;
}
static void
submit_xfers(struct thread_entry *thread_entry, uint64_t queue_depth)
{
while (queue_depth-- > 0) {
struct ioat_task *ioat_task = NULL;
rte_mempool_get(thread_entry->task_pool, (void **)&ioat_task);
rte_mempool_get(thread_entry->data_pool, &(ioat_task->buffer));
prepare_ioat_task(thread_entry, ioat_task);
submit_single_xfer(ioat_task);
}
}
static int
work_fn(void *arg)
{
uint64_t tsc_end;
char buf_pool_name[20], task_pool_name[20];
struct thread_entry *t = (struct thread_entry *)arg;
t->lcore_id = rte_lcore_id();
snprintf(buf_pool_name, sizeof(buf_pool_name), "buf_pool_%d", rte_lcore_id());
snprintf(task_pool_name, sizeof(task_pool_name), "task_pool_%d", rte_lcore_id());
t->data_pool = rte_mempool_create(buf_pool_name, g_user_config.queue_depth, SRC_BUFFER_SIZE, 0, 0,
NULL, NULL,
NULL, NULL, SOCKET_ID_ANY, 0);
t->task_pool = rte_mempool_create(task_pool_name, g_user_config.queue_depth,
sizeof(struct ioat_task), 0, 0, NULL, NULL,
NULL, NULL, SOCKET_ID_ANY, 0);
if (!t->data_pool || !t->task_pool) {
fprintf(stderr, "Could not allocate buffer pool.\n");
return 1;
}
if (ioat_register_thread() != 0) {
fprintf(stderr, "lcore %u: No ioat channels found. Check that ioatdma driver is unloaded.\n",
rte_lcore_id());
return 0;
}
tsc_end = rte_get_timer_cycles() + g_user_config.time_in_sec * rte_get_timer_hz();
submit_xfers(t, g_user_config.queue_depth);
while (rte_get_timer_cycles() < tsc_end) {
ioat_process_events();
}
t->is_draining = true;
drain_xfers(t);
ioat_unregister_thread();
return 0;
}
static int
init_src_buffer(void)
{
int i;
g_src = rte_malloc(NULL, SRC_BUFFER_SIZE, 512);
if (g_src == NULL) {
fprintf(stderr, "Allocate src buffer failed\n");
return -1;
}
for (i = 0; i < SRC_BUFFER_SIZE / 4; i++) {
memset((g_src + (4 * i)), i, 4);
}
return 0;
}
static int
init(void)
{
char *core_mask_conf;
core_mask_conf = sprintf_alloc("-c %s", g_user_config.core_mask);
if (!core_mask_conf) {
return 1;
}
char *ealargs[] = {"verify", core_mask_conf, "-n 4", "--no-pci"};
if (rte_eal_init(sizeof(ealargs) / sizeof(ealargs[0]), ealargs) < 0) {
free(core_mask_conf);
fprintf(stderr, "Could not init eal\n");
return 1;
}
free(core_mask_conf);
if (init_src_buffer() != 0) {
fprintf(stderr, "Could not init src buffer\n");
return 1;
}
if (ioat_init() != 0) {
fprintf(stderr, "Could not init ioat\n");
return 1;
}
return 0;
}
static int
dump_result(struct thread_entry *threads, int len)
{
int i;
uint64_t total_completed = 0;
uint64_t total_failed = 0;
for (i = 0; i < len; i++) {
struct thread_entry *t = &threads[i];
total_completed += t->xfer_completed;
total_failed += t->xfer_failed;
if (t->xfer_completed || t->xfer_failed)
printf("lcore = %d, success = %ld, failed = %ld \n",
t->lcore_id, t->xfer_completed, t->xfer_failed);
}
return total_failed ? 1 : 0;
}
int
main(int argc, char **argv)
{
unsigned lcore_id;
struct thread_entry threads[RTE_MAX_LCORE] = {};
if (parse_args(argc, argv) != 0) {
return 1;
}
if (init() != 0) {
return 1;
}
dump_user_config(&g_user_config);
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
rte_eal_remote_launch(work_fn, &threads[lcore_id], lcore_id);
}
if (work_fn(&threads[rte_get_master_lcore()]) != 0) {
return 1;
}
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) != 0) {
return 1;
}
}
rte_free(g_src);
return dump_result(threads, RTE_MAX_LCORE);
}

101
include/spdk/ioat.h Normal file
View File

@ -0,0 +1,101 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/** \file
* This file defines the public interface to the I/OAT DMA engine driver.
*/
#ifndef __IOAT_H__
#define __IOAT_H__
#include <inttypes.h>
#include <stdbool.h>
/**
* Signature for callback function invoked when a request is completed.
*/
typedef void (*ioat_callback_t)(void *arg);
/**
* Returns true if vendor_id and device_id match a known IOAT PCI device ID.
*/
bool ioat_pci_device_match_id(uint16_t vendor_id, uint16_t device_id);
/**
* Attach an I/OAT PCI device to the I/OAT userspace driver.
*
* To stop using the the device and release its associated resources,
* call \ref ioat_detach with the ioat_channel instance returned by this function.
*/
struct ioat_channel *ioat_attach(void *device);
/**
* Detaches specified device returned by \ref ioat_attach() from the I/OAT driver.
*/
int ioat_detach(struct ioat_channel *ioat);
/**
* Request a DMA engine channel for the calling thread.
*
* Must be called before submitting any requests from a thread.
*
* The \ref ioat_unregister_thread() function can be called to release the channel.
*/
int ioat_register_thread(void);
/**
* Unregister the current thread's I/OAT channel.
*
* This function can be called after \ref ioat_register_thread() to release the thread's
* DMA engine channel for use by other threads.
*/
void ioat_unregister_thread(void);
/**
* Submit a DMA engine memory copy request.
*
* Before submitting any requests on a thread, the thread must be registered
* using the \ref ioat_register_thread() function.
*/
int64_t ioat_submit_copy(void *cb_arg, ioat_callback_t cb_fn,
void *dst, const void *src, uint64_t nbytes);
/*
* Check for completed requests on the current thread.
*
* Before submitting any requests on a thread, the thread must be registered
* using the \ref ioat_register_thread() function.
*/
void ioat_process_events(void);
#endif

268
include/spdk/ioat_spec.h Normal file
View File

@ -0,0 +1,268 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __IOAT_SPEC_H__
#define __IOAT_SPEC_H__
#include <inttypes.h>
#define IOAT_INTRCTRL_MASTER_INT_EN 0x01
#define IOAT_VER_3_0 0x30
#define IOAT_VER_3_3 0x33
/* DMA Channel Registers */
#define IOAT_CHANCTRL_CHANNEL_PRIORITY_MASK 0xF000
#define IOAT_CHANCTRL_COMPL_DCA_EN 0x0200
#define IOAT_CHANCTRL_CHANNEL_IN_USE 0x0100
#define IOAT_CHANCTRL_DESCRIPTOR_ADDR_SNOOP_CONTROL 0x0020
#define IOAT_CHANCTRL_ERR_INT_EN 0x0010
#define IOAT_CHANCTRL_ANY_ERR_ABORT_EN 0x0008
#define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004
#define IOAT_CHANCTRL_INT_REARM 0x0001
struct ioat_registers {
uint8_t chancnt;
uint8_t xfercap;
uint8_t genctrl;
uint8_t intrctrl;
uint32_t attnstatus;
uint8_t cbver; /* 0x08 */
uint8_t reserved4[0x3]; /* 0x09 */
uint16_t intrdelay; /* 0x0C */
uint16_t cs_status; /* 0x0E */
uint32_t dmacapability; /* 0x10 */
uint8_t reserved5[0x6C]; /* 0x14 */
uint16_t chanctrl; /* 0x80 */
uint8_t reserved6[0x2]; /* 0x82 */
uint8_t chancmd; /* 0x84 */
uint8_t reserved3[1]; /* 0x85 */
uint16_t dmacount; /* 0x86 */
uint64_t chansts; /* 0x88 */
uint64_t chainaddr; /* 0x90 */
uint64_t chancmp; /* 0x98 */
uint8_t reserved2[0x8]; /* 0xA0 */
uint32_t chanerr; /* 0xA8 */
uint32_t chanerrmask; /* 0xAC */
} __attribute__((packed));
#define IOAT_CHANCMD_RESET 0x20
#define IOAT_CHANCMD_SUSPEND 0x04
#define IOAT_CHANSTS_STATUS 0x7ULL
#define IOAT_CHANSTS_ACTIVE 0x0
#define IOAT_CHANSTS_IDLE 0x1
#define IOAT_CHANSTS_SUSPENDED 0x2
#define IOAT_CHANSTS_HALTED 0x3
#define IOAT_CHANSTS_ARMED 0x4
#define IOAT_CHANSTS_UNAFFILIATED_ERROR 0x8ULL
#define IOAT_CHANSTS_SOFT_ERROR 0x10ULL
#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK (~0x3FULL)
#define IOAT_CHANCMP_ALIGN 8 /* CHANCMP address must be 64-bit aligned */
struct ioat_dma_hw_descriptor {
uint32_t size;
union {
uint32_t control_raw;
struct {
uint32_t int_enable: 1;
uint32_t src_snoop_disable: 1;
uint32_t dest_snoop_disable: 1;
uint32_t completion_update: 1;
uint32_t fence: 1;
uint32_t null: 1;
uint32_t src_page_break: 1;
uint32_t dest_page_break: 1;
uint32_t bundle: 1;
uint32_t dest_dca: 1;
uint32_t hint: 1;
uint32_t reserved: 13;
#define IOAT_OP_COPY 0x00
uint32_t op: 8;
} control;
} u;
uint64_t src_addr;
uint64_t dest_addr;
uint64_t next;
uint64_t reserved;
uint64_t reserved2;
uint64_t user1;
uint64_t user2;
};
struct ioat_fill_hw_descriptor {
uint32_t size;
union {
uint32_t control_raw;
struct {
uint32_t int_enable: 1;
uint32_t reserved: 1;
uint32_t dest_snoop_disable: 1;
uint32_t completion_update: 1;
uint32_t fence: 1;
uint32_t reserved2: 2;
uint32_t dest_page_break: 1;
uint32_t bundle: 1;
uint32_t reserved3: 15;
#define IOAT_OP_FILL 0x01
uint32_t op: 8;
} control;
} u;
uint64_t src_data;
uint64_t dest_addr;
uint64_t next;
uint64_t reserved;
uint64_t next_dest_addr;
uint64_t user1;
uint64_t user2;
};
struct ioat_xor_hw_descriptor {
uint32_t size;
union {
uint32_t control_raw;
struct {
uint32_t int_enable: 1;
uint32_t src_snoop_disable: 1;
uint32_t dest_snoop_disable: 1;
uint32_t completion_update: 1;
uint32_t fence: 1;
uint32_t src_count: 3;
uint32_t bundle: 1;
uint32_t dest_dca: 1;
uint32_t hint: 1;
uint32_t reserved: 13;
#define IOAT_OP_XOR 0x87
#define IOAT_OP_XOR_VAL 0x88
uint32_t op: 8;
} control;
} u;
uint64_t src_addr;
uint64_t dest_addr;
uint64_t next;
uint64_t src_addr2;
uint64_t src_addr3;
uint64_t src_addr4;
uint64_t src_addr5;
};
struct ioat_xor_ext_hw_descriptor {
uint64_t src_addr6;
uint64_t src_addr7;
uint64_t src_addr8;
uint64_t next;
uint64_t reserved[4];
};
struct ioat_pq_hw_descriptor {
uint32_t size;
union {
uint32_t control_raw;
struct {
uint32_t int_enable: 1;
uint32_t src_snoop_disable: 1;
uint32_t dest_snoop_disable: 1;
uint32_t completion_update: 1;
uint32_t fence: 1;
uint32_t src_count: 3;
uint32_t bundle: 1;
uint32_t dest_dca: 1;
uint32_t hint: 1;
uint32_t p_disable: 1;
uint32_t q_disable: 1;
uint32_t reserved: 11;
#define IOAT_OP_PQ 0x89
#define IOAT_OP_PQ_VAL 0x8a
uint32_t op: 8;
} control;
} u;
uint64_t src_addr;
uint64_t p_addr;
uint64_t next;
uint64_t src_addr2;
uint64_t src_addr3;
uint8_t coef[8];
uint64_t q_addr;
};
struct ioat_pq_ext_hw_descriptor {
uint64_t src_addr4;
uint64_t src_addr5;
uint64_t src_addr6;
uint64_t next;
uint64_t src_addr7;
uint64_t src_addr8;
uint64_t reserved[2];
};
struct ioat_pq_update_hw_descriptor {
uint32_t size;
union {
uint32_t control_raw;
struct {
uint32_t int_enable: 1;
uint32_t src_snoop_disable: 1;
uint32_t dest_snoop_disable: 1;
uint32_t completion_update: 1;
uint32_t fence: 1;
uint32_t src_cnt: 3;
uint32_t bundle: 1;
uint32_t dest_dca: 1;
uint32_t hint: 1;
uint32_t p_disable: 1;
uint32_t q_disable: 1;
uint32_t reserved: 3;
uint32_t coef: 8;
#define IOAT_OP_PQ_UP 0x8b
uint32_t op: 8;
} control;
} u;
uint64_t src_addr;
uint64_t p_addr;
uint64_t next;
uint64_t src_addr2;
uint64_t p_src;
uint64_t q_src;
uint64_t q_addr;
};
struct ioat_raw_hw_descriptor {
uint64_t field[8];
};
#endif /* __IOAT_SPEC_H__ */

View File

@ -34,7 +34,7 @@
SPDK_ROOT_DIR := $(CURDIR)/.. SPDK_ROOT_DIR := $(CURDIR)/..
include $(SPDK_ROOT_DIR)/mk/spdk.common.mk include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
DIRS-y += memory util nvme DIRS-y += memory util nvme ioat
.PHONY: all clean $(DIRS-y) .PHONY: all clean $(DIRS-y)

51
lib/ioat/Makefile Normal file
View File

@ -0,0 +1,51 @@
#
# BSD LICENSE
#
# Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
SPDK_ROOT_DIR := $(CURDIR)/../..
include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
CFLAGS += $(DPDK_INC) -include $(CONFIG_IOAT_IMPL)
C_SRCS = ioat.c
LIB = libspdk_ioat.a
all: $(LIB)
clean:
$(CLEAN_C)
$(LIB): $(OBJS)
$(LIB_C)
include $(SPDK_ROOT_DIR)/mk/spdk.deps.mk

738
lib/ioat/ioat.c Normal file
View File

@ -0,0 +1,738 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "ioat_internal.h"
#include "ioat_pci.h"
/** List of channels that have been attached but are not yet assigned to a thread.
*
* Must hold g_ioat_driver.lock while manipulating this list.
*/
static SLIST_HEAD(, ioat_channel) ioat_free_channels;
/** IOAT channel assigned to this thread (or NULL if not assigned yet). */
static __thread struct ioat_channel *ioat_thread_channel;
struct ioat_driver {
ioat_mutex_t lock;
};
static struct ioat_driver g_ioat_driver = {
.lock = IOAT_MUTEX_INITIALIZER,
};
struct pci_device_id {
uint16_t vendor;
uint16_t device;
};
static const struct pci_device_id ioat_pci_table[] = {
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB0},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB1},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB2},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB3},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB4},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB5},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB6},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB7},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB0},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB1},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB2},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB3},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB4},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB5},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB6},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB7},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW0},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW1},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW2},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW3},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW4},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW5},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW6},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW7},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX0},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX1},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX2},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX3},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX4},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX5},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX6},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX7},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX8},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX9},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE0},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE1},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3},
};
bool
ioat_pci_device_match_id(uint16_t vendor_id, uint16_t device_id)
{
size_t i;
const struct pci_device_id *ids;
for (i = 0; i < sizeof(ioat_pci_table) / sizeof(struct pci_device_id); i++) {
ids = &ioat_pci_table[i];
if (ids->device == device_id && ids->vendor == vendor_id) {
return true;
}
}
return false;
}
static uint64_t
ioat_mmio_read_8(volatile uint64_t *addr)
{
uint64_t val;
volatile uint32_t *addr32 = (volatile uint32_t *)addr;
if (IOAT_64BIT_IO) {
val = *addr;
} else {
/* Must read lower 4 bytes before upper 4 bytes. */
val = addr32[0];
val |= (uint64_t)addr32[1] << 32;
}
return val;
}
static void
ioat_mmio_write_8(volatile uint64_t *addr, uint64_t val)
{
volatile uint32_t *addr32 = (volatile uint32_t *)addr;
if (IOAT_64BIT_IO) {
*addr = val;
} else {
addr32[0] = (uint32_t)val;
addr32[1] = (uint32_t)(val >> 32);
}
}
static uint64_t
ioat_get_chansts(struct ioat_channel *ioat)
{
return ioat_mmio_read_8(&ioat->regs->chansts);
}
static void
ioat_write_chancmp(struct ioat_channel *ioat, uint64_t addr)
{
ioat_mmio_write_8(&ioat->regs->chancmp, addr);
}
static void
ioat_write_chainaddr(struct ioat_channel *ioat, uint64_t addr)
{
ioat_mmio_write_8(&ioat->regs->chainaddr, addr);
}
static inline void
ioat_suspend(struct ioat_channel *ioat)
{
ioat->regs->chancmd = IOAT_CHANCMD_SUSPEND;
}
static inline void
ioat_reset(struct ioat_channel *ioat)
{
ioat->regs->chancmd = IOAT_CHANCMD_RESET;
}
static inline uint32_t
ioat_reset_pending(struct ioat_channel *ioat)
{
uint8_t cmd;
cmd = ioat->regs->chancmd;
return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET;
}
static int
ioat_map_pci_bar(struct ioat_channel *ioat)
{
int regs_bar, rc;
void *addr;
regs_bar = 0;
rc = ioat_pcicfg_map_bar(ioat->device, regs_bar, 0, &addr);
if (rc != 0 || addr == NULL) {
ioat_printf(ioat, "%s: pci_device_map_range failed with error code %d\n",
__func__, rc);
return -1;
}
ioat->regs = (volatile struct ioat_registers *)addr;
return 0;
}
static int
ioat_unmap_pci_bar(struct ioat_channel *ioat)
{
int rc = 0;
void *addr = (void *)ioat->regs;
if (addr) {
rc = ioat_pcicfg_unmap_bar(ioat->device, 0, addr);
}
return rc;
}
static inline uint32_t
ioat_get_active(struct ioat_channel *ioat)
{
return (ioat->head - ioat->tail) & ((1 << ioat->ring_size_order) - 1);
}
static inline uint32_t
ioat_get_ring_space(struct ioat_channel *ioat)
{
return (1 << ioat->ring_size_order) - ioat_get_active(ioat) - 1;
}
static struct ioat_descriptor *
ioat_alloc_ring_entry(struct ioat_channel *ioat)
{
struct ioat_descriptor *desc;
struct ioat_dma_hw_descriptor *hw_desc;
desc = malloc(sizeof(*desc));
if (desc == NULL) {
return NULL;
}
hw_desc = ioat_zmalloc(NULL, sizeof(*hw_desc), 64, &desc->hw_desc_bus_addr);
if (hw_desc == NULL) {
free(desc);
return NULL;
}
if (desc->hw_desc_bus_addr == 0) {
free(desc);
ioat_free(hw_desc);
return NULL;
}
desc->u.dma = hw_desc;
return desc;
}
static void
ioat_free_ring_entry(struct ioat_channel *ioat, struct ioat_descriptor *desc)
{
if (desc) {
ioat_free(desc->u.dma);
free(desc);
}
}
static struct ioat_descriptor *
ioat_get_ring_entry(struct ioat_channel *ioat, uint32_t index)
{
return ioat->ring[index % (1 << ioat->ring_size_order)];
}
static void
ioat_submit_single(struct ioat_channel *ioat)
{
ioat->head++;
}
static void
ioat_flush(struct ioat_channel *ioat)
{
ioat->regs->dmacount = (uint16_t)ioat->head;
}
static struct ioat_descriptor *
ioat_prep_null(struct ioat_channel *ioat)
{
struct ioat_descriptor *desc;
struct ioat_dma_hw_descriptor *hw_desc;
if (ioat_get_ring_space(ioat) < 1) {
return NULL;
}
desc = ioat_get_ring_entry(ioat, ioat->head);
hw_desc = desc->u.dma;
hw_desc->u.control_raw = 0;
hw_desc->u.control.op = IOAT_OP_COPY;
hw_desc->u.control.null = 1;
hw_desc->u.control.completion_update = 1;
hw_desc->size = 8;
hw_desc->src_addr = 0;
hw_desc->dest_addr = 0;
desc->callback_fn = NULL;
desc->callback_arg = NULL;
ioat_submit_single(ioat);
return desc;
}
static struct ioat_descriptor *
ioat_prep_copy(struct ioat_channel *ioat, uint64_t dst,
uint64_t src, uint32_t len)
{
struct ioat_descriptor *desc;
struct ioat_dma_hw_descriptor *hw_desc;
ioat_assert(len <= ioat->max_xfer_size);
if (ioat_get_ring_space(ioat) < 1) {
return NULL;
}
desc = ioat_get_ring_entry(ioat, ioat->head);
hw_desc = desc->u.dma;
hw_desc->u.control_raw = 0;
hw_desc->u.control.op = IOAT_OP_COPY;
hw_desc->u.control.completion_update = 1;
hw_desc->size = len;
hw_desc->src_addr = src;
hw_desc->dest_addr = dst;
desc->callback_fn = NULL;
desc->callback_arg = NULL;
ioat_submit_single(ioat);
return desc;
}
static int ioat_reset_hw(struct ioat_channel *ioat)
{
int timeout;
uint64_t status;
uint32_t chanerr;
status = ioat_get_chansts(ioat);
if (is_ioat_active(status) || is_ioat_idle(status)) {
ioat_suspend(ioat);
}
timeout = 20; /* in milliseconds */
while (is_ioat_active(status) || is_ioat_idle(status)) {
ioat_delay_us(1000);
timeout--;
if (timeout == 0) {
ioat_printf(ioat, "%s: timed out waiting for suspend\n", __func__);
return -1;
}
status = ioat_get_chansts(ioat);
}
/*
* Clear any outstanding errors.
* CHANERR is write-1-to-clear, so write the current CHANERR bits back to reset everything.
*/
chanerr = ioat->regs->chanerr;
ioat->regs->chanerr = chanerr;
ioat_reset(ioat);
timeout = 20;
while (ioat_reset_pending(ioat)) {
ioat_delay_us(1000);
timeout--;
if (timeout == 0) {
ioat_printf(ioat, "%s: timed out waiting for reset\n", __func__);
return -1;
}
}
return 0;
}
static void
ioat_process_channel_events(struct ioat_channel *ioat)
{
struct ioat_descriptor *desc;
uint64_t status, completed_descriptor;
if (ioat->head == ioat->tail) {
return;
}
status = *ioat->comp_update;
completed_descriptor = status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK;
if (is_ioat_halted(status)) {
ioat_printf(ioat, "%s: Channel halted (%x)\n", __func__, ioat->regs->chanerr);
/* TODO: report error */
return;
}
if (completed_descriptor == ioat->last_seen) {
return;
}
while (1) {
desc = ioat_get_ring_entry(ioat, ioat->tail);
if (desc->callback_fn) {
desc->callback_fn(desc->callback_arg);
}
ioat->tail++;
if (desc->hw_desc_bus_addr == completed_descriptor)
break;
}
ioat->last_seen = desc->hw_desc_bus_addr;
}
static int
ioat_channel_destruct(struct ioat_channel *ioat)
{
ioat_unmap_pci_bar(ioat);
if (ioat->ring) {
int i;
for (i = 0; i < (1 << ioat->ring_size_order); i++)
ioat_free_ring_entry(ioat, ioat->ring[i]);
free(ioat->ring);
}
if (ioat->comp_update) {
ioat_free((void *)ioat->comp_update);
ioat->comp_update = NULL;
}
return 0;
}
static int
ioat_channel_start(struct ioat_channel *ioat)
{
struct ioat_descriptor **ring;
uint8_t xfercap, version;
uint64_t status;
int i, num_descriptors;
struct ioat_descriptor *next;
struct ioat_dma_hw_descriptor *dma_hw_desc;
uint64_t comp_update_bus_addr;
if (ioat_map_pci_bar(ioat) != 0) {
ioat_printf(ioat, "%s: ioat_map_pci_bar() failed\n", __func__);
return -1;
}
version = ioat->regs->cbver;
if (version < IOAT_VER_3_0) {
ioat_printf(ioat, "%s: unsupported IOAT version %u.%u\n",
__func__, version >> 4, version & 0xF);
return -1;
}
xfercap = ioat->regs->xfercap;
/* Only bits [4:0] are valid. */
xfercap &= 0x1f;
if (xfercap == 0) {
/* 0 means 4 GB max transfer size. */
ioat->max_xfer_size = 1ULL << 32;
} else if (xfercap < 12) {
/* XFCERCAP must be at least 12 (4 KB) according to the spec. */
ioat_printf(ioat, "%s: invalid XFERCAP value %u\n", __func__, xfercap);
return -1;
} else {
ioat->max_xfer_size = 1U << xfercap;
}
ioat->comp_update = ioat_zmalloc(NULL, sizeof(*ioat->comp_update), IOAT_CHANCMP_ALIGN,
&comp_update_bus_addr);
if (ioat->comp_update == NULL) {
return -1;
}
ioat->ring_size_order = IOAT_DEFAULT_ORDER;
num_descriptors = 1 << ioat->ring_size_order;
ioat->ring = calloc(num_descriptors, sizeof(*ring));
if (!ioat->ring) {
ioat_channel_destruct(ioat);
return -1;
}
ring = ioat->ring;
for (i = 0; i < num_descriptors; i++) {
ring[i] = ioat_alloc_ring_entry(ioat);
if (!ring[i]) {
ioat_channel_destruct(ioat);
return -1;
}
}
for (i = 0; i < num_descriptors - 1; i++) {
next = ring[i + 1];
dma_hw_desc = ring[i]->u.dma;
dma_hw_desc->next = next->hw_desc_bus_addr;
}
ring[i]->u.dma->next = ring[0]->hw_desc_bus_addr;
ioat->head = 0;
ioat->tail = 0;
ioat->last_seen = 0;
ioat_reset_hw(ioat);
ioat->regs->chanctrl = IOAT_CHANCTRL_ANY_ERR_ABORT_EN;
ioat_write_chancmp(ioat, comp_update_bus_addr);
ioat_write_chainaddr(ioat, ring[0]->hw_desc_bus_addr);
ioat_prep_null(ioat);
ioat_flush(ioat);
i = 100;
while (i-- > 0) {
ioat_delay_us(100);
status = ioat_get_chansts(ioat);
if (is_ioat_idle(status))
break;
}
if (is_ioat_idle(status)) {
ioat_process_channel_events(ioat);
} else {
ioat_printf(ioat, "%s: could not start channel: status = %p\n error = %#x\n",
__func__, (void *)status, ioat->regs->chanerr);
ioat_channel_destruct(ioat);
return -1;
}
return 0;
}
struct ioat_channel *
ioat_attach(void *device)
{
struct ioat_driver *driver = &g_ioat_driver;
struct ioat_channel *ioat;
uint32_t cmd_reg;
ioat = malloc(sizeof(struct ioat_channel));
if (ioat == NULL) {
return NULL;
}
/* Enable PCI busmaster. */
ioat_pcicfg_read32(device, &cmd_reg, 4);
cmd_reg |= 0x4;
ioat_pcicfg_write32(device, cmd_reg, 4);
ioat->device = device;
if (ioat_channel_start(ioat) != 0) {
free(ioat);
return NULL;
}
ioat_mutex_lock(&driver->lock);
SLIST_INSERT_HEAD(&ioat_free_channels, ioat, next);
ioat_mutex_unlock(&driver->lock);
return ioat;
}
int
ioat_detach(struct ioat_channel *ioat)
{
struct ioat_driver *driver = &g_ioat_driver;
/* ioat should be in the free list (not registered to a thread)
* when calling ioat_detach().
*/
ioat_mutex_lock(&driver->lock);
SLIST_REMOVE(&ioat_free_channels, ioat, ioat_channel, next);
ioat_mutex_unlock(&driver->lock);
ioat_channel_destruct(ioat);
free(ioat);
return 0;
}
int
ioat_register_thread(void)
{
struct ioat_driver *driver = &g_ioat_driver;
if (ioat_thread_channel) {
ioat_printf(NULL, "%s: thread already registered\n", __func__);
return -1;
}
ioat_mutex_lock(&driver->lock);
ioat_thread_channel = SLIST_FIRST(&ioat_free_channels);
if (ioat_thread_channel) {
SLIST_REMOVE_HEAD(&ioat_free_channels, next);
}
ioat_mutex_unlock(&driver->lock);
return ioat_thread_channel ? 0 : -1;
}
void
ioat_unregister_thread(void)
{
struct ioat_driver *driver = &g_ioat_driver;
if (!ioat_thread_channel) {
return;
}
ioat_mutex_lock(&driver->lock);
SLIST_INSERT_HEAD(&ioat_free_channels, ioat_thread_channel, next);
ioat_thread_channel = NULL;
ioat_mutex_unlock(&driver->lock);
}
#define min(a, b) (((a)<(b))?(a):(b))
#define _2MB_PAGE(ptr) ((ptr) & ~(0x200000 - 1))
#define _2MB_OFFSET(ptr) ((ptr) & (0x200000 - 1))
int64_t
ioat_submit_copy(void *cb_arg, ioat_callback_t cb_fn,
void *dst, const void *src, uint64_t nbytes)
{
struct ioat_channel *ioat;
struct ioat_descriptor *last_desc;
uint64_t remaining, op_size;
uint64_t vdst, vsrc;
uint64_t vdst_page, vsrc_page;
uint64_t pdst_page, psrc_page;
uint32_t orig_head;
ioat = ioat_thread_channel;
if (!ioat) {
return -1;
}
orig_head = ioat->head;
vdst = (uint64_t)dst;
vsrc = (uint64_t)src;
vsrc_page = _2MB_PAGE(vsrc);
vdst_page = _2MB_PAGE(vdst);
psrc_page = ioat_vtophys((void *)vsrc_page);
pdst_page = ioat_vtophys((void *)vdst_page);
remaining = nbytes;
while (remaining) {
op_size = remaining;
op_size = min(op_size, (0x200000 - _2MB_OFFSET(vsrc)));
op_size = min(op_size, (0x200000 - _2MB_OFFSET(vdst)));
op_size = min(op_size, ioat->max_xfer_size);
remaining -= op_size;
last_desc = ioat_prep_copy(ioat,
pdst_page + _2MB_OFFSET(vdst),
psrc_page + _2MB_OFFSET(vsrc),
op_size);
if (remaining == 0 || last_desc == NULL) {
break;
}
vsrc += op_size;
vdst += op_size;
if (_2MB_PAGE(vsrc) != vsrc_page) {
vsrc_page = _2MB_PAGE(vsrc);
psrc_page = ioat_vtophys((void *)vsrc_page);
}
if (_2MB_PAGE(vdst) != vdst_page) {
vdst_page = _2MB_PAGE(vdst);
pdst_page = ioat_vtophys((void *)vdst_page);
}
}
/* Issue null descriptor for null transfer */
if (nbytes == 0) {
last_desc = ioat_prep_null(ioat);
}
if (last_desc) {
last_desc->callback_fn = cb_fn;
last_desc->callback_arg = cb_arg;
} else {
/*
* Ran out of descriptors in the ring - reset head to leave things as they were
* in case we managed to fill out any descriptors.
*/
ioat->head = orig_head;
return -1;
}
ioat_flush(ioat);
return nbytes;
}
void ioat_process_events(void)
{
if (!ioat_thread_channel) {
return;
}
ioat_process_channel_events(ioat_thread_channel);
}

93
lib/ioat/ioat_impl.h Normal file
View File

@ -0,0 +1,93 @@
#ifndef __IOAT_IMPL_H__
#define __IOAT_IMPL_H__
#include <assert.h>
#include <pthread.h>
#include <pciaccess.h>
#include <stdio.h>
#include <rte_malloc.h>
#include <rte_config.h>
#include <rte_atomic.h>
#include <rte_cycles.h>
#include "spdk/vtophys.h"
/**
* \file
*
* This file describes the functions required to integrate
* the userspace IOAT driver for a specific implementation. This
* implementation is specific for DPDK. Users would revise it as
* necessary for their own particular environment if not using it
* within the SPDK framework.
*/
/**
* Allocate a pinned, physically contiguous memory buffer with the
* given size and alignment.
*/
static inline void *
ioat_zmalloc(const char *tag, size_t size, unsigned align, uint64_t *phys_addr)
{
void *buf = rte_zmalloc(tag, size, align);
*phys_addr = rte_malloc_virt2phy(buf);
return buf;
}
/**
* Free a memory buffer previously allocated with ioat_zmalloc.
*/
#define ioat_free(buf) rte_free(buf)
/**
* Return the physical address for the specified virtual address.
*/
#define ioat_vtophys(buf) vtophys(buf)
/**
* Delay us.
*/
#define ioat_delay_us(us) rte_delay_us(us)
/**
* Assert a condition and panic/abort as desired. Failures of these
* assertions indicate catastrophic failures within the driver.
*/
#define ioat_assert(check) assert(check)
/**
* Log or print a message from the driver.
*/
#define ioat_printf(chan, fmt, args...) printf(fmt, ##args)
/**
*
*/
#define ioat_pcicfg_read32(handle, var, offset) pci_device_cfg_read_u32(handle, var, offset)
#define ioat_pcicfg_write32(handle, var, offset) pci_device_cfg_write_u32(handle, var, offset)
static inline int
ioat_pcicfg_map_bar(void *devhandle, uint32_t bar, uint32_t read_only, void **mapped_addr)
{
struct pci_device *dev = devhandle;
uint32_t flags = (read_only ? 0 : PCI_DEV_MAP_FLAG_WRITABLE);
return pci_device_map_range(dev, dev->regions[bar].base_addr, 4096,
flags, mapped_addr);
}
static inline int
ioat_pcicfg_unmap_bar(void *devhandle, uint32_t bar, void *addr)
{
struct pci_device *dev = devhandle;
return pci_device_unmap_range(dev, addr, dev->regions[bar].size);
}
typedef pthread_mutex_t ioat_mutex_t;
#define ioat_mutex_lock pthread_mutex_lock
#define ioat_mutex_unlock pthread_mutex_unlock
#define IOAT_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
#endif /* __IOAT_IMPL_H__ */

116
lib/ioat/ioat_internal.h Normal file
View File

@ -0,0 +1,116 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __IOAT_INTERNAL_H__
#define __IOAT_INTERNAL_H__
#include "spdk/ioat.h"
#include "spdk/ioat_spec.h"
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <inttypes.h>
#include "spdk/queue.h"
/* Allocate 2 << 15 (32K) descriptors per channel by default. */
#define IOAT_DEFAULT_ORDER 15
#ifdef __x86_64__
#define IOAT_64BIT_IO 1 /* Can do atomic 64-bit memory read/write (over PCIe) */
#else
#define IOAT_64BIT_IO 0
#endif
struct ioat_descriptor {
ioat_callback_t callback_fn;
void *callback_arg;
union {
struct ioat_dma_hw_descriptor *dma;
struct ioat_fill_hw_descriptor *fill;
struct ioat_xor_hw_descriptor *xor;
struct ioat_xor_ext_hw_descriptor *xor_ext;
struct ioat_pq_hw_descriptor *pq;
struct ioat_pq_ext_hw_descriptor *pq_ext;
struct ioat_raw_hw_descriptor *raw;
} u;
uint64_t hw_desc_bus_addr;
};
/* One of these per allocated PCI device. */
struct ioat_channel {
SLIST_ENTRY(ioat_channel) next;
/* Opaque handle to upper layer */
void *device;
uint64_t max_xfer_size;
volatile struct ioat_registers *regs;
volatile uint64_t *comp_update;
uint32_t head;
uint32_t tail;
uint32_t ring_size_order;
uint64_t last_seen;
struct ioat_descriptor **ring;
};
static inline uint32_t
is_ioat_active(uint64_t status)
{
return (status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE;
}
static inline uint32_t
is_ioat_idle(uint64_t status)
{
return (status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_IDLE;
}
static inline uint32_t
is_ioat_halted(uint64_t status)
{
return (status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED;
}
static inline uint32_t
is_ioat_suspended(uint64_t status)
{
return (status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED;
}
#endif /* __IOAT_INTERNAL_H__ */

94
lib/ioat/ioat_pci.h Normal file
View File

@ -0,0 +1,94 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __IOAT_PCI_H__
#define __IOAT_PCI_H__
#define PCI_DEVICE_ID_INTEL_IOAT_SNB0 0x3c20
#define PCI_DEVICE_ID_INTEL_IOAT_SNB1 0x3c21
#define PCI_DEVICE_ID_INTEL_IOAT_SNB2 0x3c22
#define PCI_DEVICE_ID_INTEL_IOAT_SNB3 0x3c23
#define PCI_DEVICE_ID_INTEL_IOAT_SNB4 0x3c24
#define PCI_DEVICE_ID_INTEL_IOAT_SNB5 0x3c25
#define PCI_DEVICE_ID_INTEL_IOAT_SNB6 0x3c26
#define PCI_DEVICE_ID_INTEL_IOAT_SNB7 0x3c27
#define PCI_DEVICE_ID_INTEL_IOAT_SNB8 0x3c2e
#define PCI_DEVICE_ID_INTEL_IOAT_SNB9 0x3c2f
#define PCI_DEVICE_ID_INTEL_IOAT_IVB0 0x0e20
#define PCI_DEVICE_ID_INTEL_IOAT_IVB1 0x0e21
#define PCI_DEVICE_ID_INTEL_IOAT_IVB2 0x0e22
#define PCI_DEVICE_ID_INTEL_IOAT_IVB3 0x0e23
#define PCI_DEVICE_ID_INTEL_IOAT_IVB4 0x0e24
#define PCI_DEVICE_ID_INTEL_IOAT_IVB5 0x0e25
#define PCI_DEVICE_ID_INTEL_IOAT_IVB6 0x0e26
#define PCI_DEVICE_ID_INTEL_IOAT_IVB7 0x0e27
#define PCI_DEVICE_ID_INTEL_IOAT_IVB8 0x0e2e
#define PCI_DEVICE_ID_INTEL_IOAT_IVB9 0x0e2f
#define PCI_DEVICE_ID_INTEL_IOAT_HSW0 0x2f20
#define PCI_DEVICE_ID_INTEL_IOAT_HSW1 0x2f21
#define PCI_DEVICE_ID_INTEL_IOAT_HSW2 0x2f22
#define PCI_DEVICE_ID_INTEL_IOAT_HSW3 0x2f23
#define PCI_DEVICE_ID_INTEL_IOAT_HSW4 0x2f24
#define PCI_DEVICE_ID_INTEL_IOAT_HSW5 0x2f25
#define PCI_DEVICE_ID_INTEL_IOAT_HSW6 0x2f26
#define PCI_DEVICE_ID_INTEL_IOAT_HSW7 0x2f27
#define PCI_DEVICE_ID_INTEL_IOAT_HSW8 0x2f2e
#define PCI_DEVICE_ID_INTEL_IOAT_HSW9 0x2f2f
#define PCI_DEVICE_ID_INTEL_IOAT_BWD0 0x0C50
#define PCI_DEVICE_ID_INTEL_IOAT_BWD1 0x0C51
#define PCI_DEVICE_ID_INTEL_IOAT_BWD2 0x0C52
#define PCI_DEVICE_ID_INTEL_IOAT_BWD3 0x0C53
#define PCI_DEVICE_ID_INTEL_IOAT_BDXDE0 0x6f50
#define PCI_DEVICE_ID_INTEL_IOAT_BDXDE1 0x6f51
#define PCI_DEVICE_ID_INTEL_IOAT_BDXDE2 0x6f52
#define PCI_DEVICE_ID_INTEL_IOAT_BDXDE3 0x6f53
#define PCI_DEVICE_ID_INTEL_IOAT_BDX0 0x6f20
#define PCI_DEVICE_ID_INTEL_IOAT_BDX1 0x6f21
#define PCI_DEVICE_ID_INTEL_IOAT_BDX2 0x6f22
#define PCI_DEVICE_ID_INTEL_IOAT_BDX3 0x6f23
#define PCI_DEVICE_ID_INTEL_IOAT_BDX4 0x6f24
#define PCI_DEVICE_ID_INTEL_IOAT_BDX5 0x6f25
#define PCI_DEVICE_ID_INTEL_IOAT_BDX6 0x6f26
#define PCI_DEVICE_ID_INTEL_IOAT_BDX7 0x6f27
#define PCI_DEVICE_ID_INTEL_IOAT_BDX8 0x6f2e
#define PCI_DEVICE_ID_INTEL_IOAT_BDX9 0x6f2f
#define PCI_VENDOR_ID_INTEL 0x8086
#endif /* __IOAT_PCI_H__ */

58
mk/ioat.unittest.mk Normal file
View File

@ -0,0 +1,58 @@
#
# BSD LICENSE
#
# Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
SPDK_ROOT_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))/..
IOAT_DIR := $(SPDK_ROOT_DIR)/lib/ioat
include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
C_SRCS = $(TEST_FILE) $(OTHER_FILES)
CFLAGS += -I$(SPDK_ROOT_DIR)/lib -include $(SPDK_ROOT_DIR)/test/lib/ioat/unit/ioat_impl.h
LIBS += -lcunit -lpthread
APP = $(TEST_FILE:.c=)
all: $(APP)
$(APP) : $(OBJS)
$(LINK_C)
clean:
$(CLEAN_C) $(APP)
%.o: $(IOAT_DIR)/%.c %.d $(MAKEFILE_LIST)
$(COMPILE_C)
include $(SPDK_ROOT_DIR)/mk/spdk.deps.mk

View File

@ -4,14 +4,15 @@ set -e
function configure_linux { function configure_linux {
rmmod nvme || true rmmod nvme || true
rmmod ioatdma || true
} }
function configure_freebsd { function configure_freebsd {
TMP=`mktemp` TMP=`mktemp`
AWK_PROG="{if (count > 0) printf \",\"; printf \"%s:%s:%s\",\$2,\$3,\$4; count++}" AWK_PROG="{if (count > 0) printf \",\"; printf \"%s:%s:%s\",\$2,\$3,\$4; count++}"
echo $AWK_PROG > $TMP echo $AWK_PROG > $TMP
NVME_PCICONF=`pciconf -l | grep class=0x010802` PCICONF=`pciconf -l | grep 'class=0x010802\|^ioat'`
BDFS=`echo $NVME_PCICONF | awk -F: -f $TMP` BDFS=`echo $PCICONF | awk -F: -f $TMP`
kldunload nic_uio.ko || true kldunload nic_uio.ko || true
kenv hw.nic_uio.bdfs=$BDFS kenv hw.nic_uio.bdfs=$BDFS
kldload nic_uio.ko kldload nic_uio.ko

View File

@ -34,7 +34,7 @@
SPDK_ROOT_DIR := $(CURDIR)/../.. SPDK_ROOT_DIR := $(CURDIR)/../..
include $(SPDK_ROOT_DIR)/mk/spdk.common.mk include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
DIRS-y = nvme memory DIRS-y = nvme memory ioat
.PHONY: all clean $(DIRS-y) .PHONY: all clean $(DIRS-y)

44
test/lib/ioat/Makefile Normal file
View File

@ -0,0 +1,44 @@
#
# BSD LICENSE
#
# Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
SPDK_ROOT_DIR := $(CURDIR)/../../..
include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
DIRS-y = unit
.PHONY: all clean $(DIRS-y)
all: $(DIRS-y)
clean: $(DIRS-y)
include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk

23
test/lib/ioat/ioat.sh Executable file
View File

@ -0,0 +1,23 @@
#!/usr/bin/env bash
set -xe
testdir=$(readlink -f $(dirname $0))
rootdir="$testdir/../../.."
source $rootdir/scripts/autotest_common.sh
timing_enter ioat
timing_enter unit
$testdir/unit/ioat_ut
timing_exit unit
timing_enter perf
$rootdir/examples/ioat/perf/perf
timing_exit perf
timing_enter verify
$rootdir/examples/ioat/verify/verify
timing_exit verify
timing_exit ioat

1
test/lib/ioat/unit/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
ioat_ut

View File

@ -0,0 +1,39 @@
#
# BSD LICENSE
#
# Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
SPDK_ROOT_DIR := $(CURDIR)/../../../..
include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
TEST_FILE = ioat_ut.c
include $(SPDK_ROOT_DIR)/mk/ioat.unittest.mk

View File

@ -0,0 +1,50 @@
/* Unit test stubbed version of ioat_impl.h */
#ifndef __IOAT_IMPL_H__
#define __IOAT_IMPL_H__
#include <assert.h>
#include <pthread.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
static inline void *
ioat_zmalloc(const char *tag, size_t size, unsigned align, uint64_t *phys_addr)
{
return calloc(1, size);
}
#define ioat_noop() do { } while (0)
#define ioat_calloc(tag, num, size, align) calloc(num, size)
#define ioat_malloc(tag, size, align) malloc(size)
#define ioat_free(buf) free(buf)
#define ioat_vtophys(buf) (uint64_t)(buf)
#define ioat_delay_us(us) ioat_noop()
#define ioat_assert(check) assert(check)
#define ioat_printf(chan, fmt, args...) printf(fmt, ##args)
#define ioat_pcicfg_read32(handle, var, offset) do { *(var) = 0xFFFFFFFFu; } while (0)
#define ioat_pcicfg_write32(handle, var, offset) do { (void)(var); } while (0)
static inline int
ioat_pcicfg_map_bar(void *devhandle, uint32_t bar, uint32_t read_only, void **mapped_addr)
{
/* TODO */
*mapped_addr = NULL;
return -1;
}
static inline int
ioat_pcicfg_unmap_bar(void *devhandle, uint32_t bar, void *addr)
{
return 0;
}
typedef pthread_mutex_t ioat_mutex_t;
#define ioat_mutex_lock pthread_mutex_lock
#define ioat_mutex_unlock pthread_mutex_unlock
#define IOAT_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
#endif /* __IOAT_IMPL_H__ */

View File

@ -0,0 +1,112 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "CUnit/Basic.h"
#include "ioat/ioat.c"
static void ioat_state_check(void)
{
/*
* CHANSTS's STATUS field is 3 bits (8 possible values), but only has 5 valid states:
* ACTIVE 0x0
* IDLE 0x1
* SUSPENDED 0x2
* HALTED 0x3
* ARMED 0x4
*/
CU_ASSERT(is_ioat_active(0) == 1); /* ACTIVE */
CU_ASSERT(is_ioat_active(1) == 0); /* IDLE */
CU_ASSERT(is_ioat_active(2) == 0); /* SUSPENDED */
CU_ASSERT(is_ioat_active(3) == 0); /* HALTED */
CU_ASSERT(is_ioat_active(4) == 0); /* ARMED */
CU_ASSERT(is_ioat_active(5) == 0); /* reserved */
CU_ASSERT(is_ioat_active(6) == 0); /* reserved */
CU_ASSERT(is_ioat_active(7) == 0); /* reserved */
CU_ASSERT(is_ioat_idle(0) == 0); /* ACTIVE */
CU_ASSERT(is_ioat_idle(1) == 1); /* IDLE */
CU_ASSERT(is_ioat_idle(2) == 0); /* SUSPENDED */
CU_ASSERT(is_ioat_idle(3) == 0); /* HALTED */
CU_ASSERT(is_ioat_idle(4) == 0); /* ARMED */
CU_ASSERT(is_ioat_idle(5) == 0); /* reserved */
CU_ASSERT(is_ioat_idle(6) == 0); /* reserved */
CU_ASSERT(is_ioat_idle(7) == 0); /* reserved */
CU_ASSERT(is_ioat_suspended(0) == 0); /* ACTIVE */
CU_ASSERT(is_ioat_suspended(1) == 0); /* IDLE */
CU_ASSERT(is_ioat_suspended(2) == 1); /* SUSPENDED */
CU_ASSERT(is_ioat_suspended(3) == 0); /* HALTED */
CU_ASSERT(is_ioat_suspended(4) == 0); /* ARMED */
CU_ASSERT(is_ioat_suspended(5) == 0); /* reserved */
CU_ASSERT(is_ioat_suspended(6) == 0); /* reserved */
CU_ASSERT(is_ioat_suspended(7) == 0); /* reserved */
CU_ASSERT(is_ioat_halted(0) == 0); /* ACTIVE */
CU_ASSERT(is_ioat_halted(1) == 0); /* IDLE */
CU_ASSERT(is_ioat_halted(2) == 0); /* SUSPENDED */
CU_ASSERT(is_ioat_halted(3) == 1); /* HALTED */
CU_ASSERT(is_ioat_halted(4) == 0); /* ARMED */
CU_ASSERT(is_ioat_halted(5) == 0); /* reserved */
CU_ASSERT(is_ioat_halted(6) == 0); /* reserved */
CU_ASSERT(is_ioat_halted(7) == 0); /* reserved */
}
int main(int argc, char **argv)
{
CU_pSuite suite = NULL;
unsigned int num_failures;
if (CU_initialize_registry() != CUE_SUCCESS) {
return CU_get_error();
}
suite = CU_add_suite("ioat", NULL, NULL);
if (suite == NULL) {
CU_cleanup_registry();
return CU_get_error();
}
if (
CU_add_test(suite, "ioat_state_check", ioat_state_check) == NULL) {
CU_cleanup_registry();
return CU_get_error();
}
CU_basic_set_mode(CU_BRM_VERBOSE);
CU_basic_run_tests();
num_failures = CU_get_number_of_failures();
CU_cleanup_registry();
return num_failures;
}

View File

@ -9,3 +9,7 @@ test/lib/nvme/unit/nvme_ctrlr_c/nvme_ctrlr_ut
test/lib/nvme/unit/nvme_ctrlr_cmd_c/nvme_ctrlr_cmd_ut test/lib/nvme/unit/nvme_ctrlr_cmd_c/nvme_ctrlr_cmd_ut
test/lib/nvme/unit/nvme_ns_cmd_c/nvme_ns_cmd_ut test/lib/nvme/unit/nvme_ns_cmd_c/nvme_ns_cmd_ut
test/lib/nvme/unit/nvme_qpair_c/nvme_qpair_ut test/lib/nvme/unit/nvme_qpair_c/nvme_qpair_ut
make -C test/lib/ioat/unit
test/lib/ioat/unit/ioat_ut