bdev : xNVMe BDEV module implementation

This implementation of xNVMe BDEV module supports the char-device / ioctl-over-uring,
along with the "regular" io_uring, libaio, POSIX aio, emulated aio (via threadpools) etc.

Code changes done :
a. Addition of xNVMe submodule to SPDK
b. Modification of RPC scripts to Create / Delete xNVMe BDEVs
c. Implementation of xNVMe BDEV module

Signed-off-by: Krishna Kanth Reddy <krish.reddy@samsung.com>
Change-Id: If814ca1c784124df429d283015a6570068b44f87
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/11161
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Krishna Kanth Reddy 2022-01-19 15:42:50 +05:30 committed by Tomasz Zawadzki
parent 8f2ca281ec
commit 6f338d4bf3
19 changed files with 826 additions and 1 deletions

3
.gitmodules vendored
View File

@ -13,3 +13,6 @@
[submodule "libvfio-user"]
path = libvfio-user
url = https://github.com/nutanix/libvfio-user.git
[submodule "xnvme"]
path = xnvme
url = https://github.com/OpenMPDK/xNVMe.git

View File

@ -2,6 +2,10 @@
## v22.09: (Upcoming Release)
### bdev
New RPCs `bdev_xnvme_create` and `bdev_xnvme_delete` were added to support the xNVMe bdev.
### sock
Added new `ssl` based socket implementation, the code is located in module/sock/posix.

3
CONFIG
View File

@ -118,6 +118,9 @@ CONFIG_VFIO_USER_DIR=
CONFIG_PMDK=n
CONFIG_PMDK_DIR=
# Build with xNVMe
CONFIG_XNVME=n
# Enable the dependencies for building the compress vbdev
CONFIG_REDUCE=n

View File

@ -20,6 +20,7 @@ DIRS-$(CONFIG_IPSEC_MB) += ipsecbuild
DIRS-$(CONFIG_ISAL) += isalbuild
DIRS-$(CONFIG_VFIO_USER) += vfiouserbuild
DIRS-y += python
DIRS-$(CONFIG_XNVME) += xnvmebuild
.PHONY: all clean $(DIRS-y) include/spdk/config.h mk/config.mk \
cc_version cxx_version .libs_only_other .ldflags ldflags install \
@ -67,6 +68,11 @@ VFIOUSERBUILD = vfiouserbuild
LIB += vfiouserbuild
endif
ifeq ($(CONFIG_XNVME),y)
XNVMEBUILD = xnvmebuild
LIB += xnvmebuild
endif
all: mk/cc.mk $(DIRS-y)
clean: $(DIRS-y)
$(Q)rm -f include/spdk/config.h
@ -83,7 +89,7 @@ dpdkdeps $(DPDK_DEPS): $(WPDK)
dpdkbuild: $(WPDK) $(DPDK_DEPS)
endif
lib: $(WPDK) $(DPDKBUILD) $(VFIOUSERBUILD)
lib: $(WPDK) $(DPDKBUILD) $(VFIOUSERBUILD) $(XNVMEBUILD)
module: lib
shared_lib: module
app: $(LIB)

8
configure vendored
View File

@ -56,6 +56,8 @@ function usage() {
echo " --without-crypto No path required."
echo " --with-fio[=DIR] Build fio_plugin."
echo " --without-fio default: /usr/src/fio"
echo " --with-xnvme Build xNVMe bdev module."
echo " --without-xnvme No path required."
echo " --with-vhost Build vhost target. Enabled by default."
echo " --without-vhost No path required."
echo " --with-virtio Build vhost initiator and virtio-pci bdev modules."
@ -541,6 +543,12 @@ for i in "$@"; do
--without-reduce)
CONFIG[REDUCE]=n
;;
--with-xnvme)
CONFIG[XNVME]=y
;;
--without-xnvme)
CONFIG[XNVME]=n
;;
--with-fio) ;&
--with-fio=*)
if [[ ${i#*=} != "$i" ]]; then

View File

@ -588,6 +588,26 @@ To remove a uring bdev use the `bdev_uring_delete` RPC.
`rpc.py bdev_uring_delete bdev_u0`
## xnvme {#bdev_ug_xnvme}
The xnvme bdev module issues I/O to the underlying NVMe devices through various I/O mechanisms
such as libaio, io_uring, Asynchronous IOCTL using io_uring passthrough, POSIX aio, emulated aio etc.
This module requires xNVMe library.
For more information on xNVMe refer to [xNVMe] (https://xnvme.io/docs/latest)
The user needs to configure SPDK to include xNVMe support:
`configure --with-xnvme`
To create a xnvme bdev with given filename, bdev name and I/O mechanism use the `bdev_xnvme_create` RPC.
`rpc.py bdev_xnvme_create /dev/ng0n1 bdev_ng0n1 io_uring_cmd`
To remove a xnvme bdev use the `bdev_xnvme_delete` RPC.
`rpc.py bdev_xnvme_delete bdev_ng0n1`
## Virtio Block {#bdev_config_virtio_blk}
The Virtio-Block driver allows creating SPDK bdevs from Virtio-Block devices.

View File

@ -4934,6 +4934,85 @@ Example response:
}
~~~
### bdev_xnvme_create {#rpc_bdev_xnvme_create}
Create xnvme bdev. This bdev type redirects all IO to its underlying backend.
#### Parameters
Name | Optional | Type | Description
----------------------- | -------- | ----------- | -----------
name | Required | string | name of xNVMe bdev to create
filename | Required | string | path to device or file (ex: /dev/nvme0n1)
io_mechanism | Required | string | IO mechanism to use (ex: libaio, io_uring, io_uring_cmd, etc.)
#### Result
Name of newly created bdev.
#### Example
Example request:
~~~json
{
"jsonrpc": "2.0",
"method": "bdev_xnvme_create",
"id": 1,
"params": {
"name": "bdev_ng0n1",
"filename": "/dev/ng0n1",
"io_mechanism": "io_uring_cmd"
}
}
~~~
Example response:
~~~json
{
"jsonrpc": "2.0",
"id": 1,
"result": "bdev_ng0n1"
}
~~~
### bdev_xnvme_delete {#rpc_bdev_xnvme_delete}
Delete xnvme bdev.
#### Parameters
Name | Optional | Type | Description
----------------------- | -------- | ----------- | -----------
name | Required | string | name of xnvme bdev to delete
#### Example
Example request:
~~~json
{
"params": {
"name": "bdev_ng0n1"
},
"jsonrpc": "2.0",
"method": "bdev_xnvme_delete",
"id": 1
}
~~~
Example response:
~~~json
{
"jsonrpc": "2.0",
"id": 1,
"result": true
}
~~~
### bdev_virtio_attach_controller {#rpc_bdev_virtio_attach_controller}
Create new initiator @ref bdev_config_virtio_scsi or @ref bdev_config_virtio_blk and expose all found bdevs.

View File

@ -196,6 +196,16 @@ LDFLAGS += -L$(VFIO_USER_LIBRARY_DIR)
SYS_LIBS += -lvfio-user -ljson-c
endif
ifeq ($(CONFIG_XNVME), y)
XNVME_DIR=$(SPDK_ROOT_DIR)/xnvme
XNVME_INSTALL_DIR=$(XNVME_DIR)/builddir/lib
XNVME_INCLUDE_DIR=$(XNVME_DIR)/include
CFLAGS += -I$(XNVME_INCLUDE_DIR)
LDFLAGS += -L$(XNVME_INSTALL_DIR)
SYS_LIBS += -lxnvme
endif
#Attach only if FreeBSD and RDMA is specified with configure
ifeq ($(OS),FreeBSD)
ifeq ($(CONFIG_RDMA),y)

View File

@ -14,6 +14,10 @@ INTR_BLOCKDEV_MODULES_LIST = bdev_malloc bdev_passthru bdev_error bdev_gpt bdev_
# Logical volume, blobstore and blobfs can directly run in both interrupt mode and poll mode.
INTR_BLOCKDEV_MODULES_LIST += bdev_lvol blobfs blobfs_bdev blob_bdev blob lvol
ifeq ($(CONFIG_XNVME),y)
BLOCKDEV_MODULES_LIST += bdev_xnvme
endif
ifeq ($(CONFIG_VFIO_USER),y)
BLOCKDEV_MODULES_LIST += vfio_user
endif

View File

@ -8,6 +8,8 @@ include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
DIRS-y += delay error gpt lvol malloc null nvme passthru raid split zone_block
DIRS-$(CONFIG_XNVME) += xnvme
DIRS-$(CONFIG_CRYPTO) += crypto
DIRS-$(CONFIG_OCF) += ocf

View File

@ -0,0 +1,19 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) Samsung Electronics Co., Ltd.
# All rights reserved.
#
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
SO_VER := 1
SO_MINOR := 0
C_SRCS = bdev_xnvme.c bdev_xnvme_rpc.c
LIBNAME = bdev_xnvme
CFLAGS += -I$(SPDK_ROOT_DIR)/xnvme/include
SPDK_MAP_FILE = $(SPDK_ROOT_DIR)/mk/spdk_blank.map
include $(SPDK_ROOT_DIR)/mk/spdk.lib.mk

View File

@ -0,0 +1,434 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) Samsung Electronics Co., Ltd.
* All rights reserved.
*/
#include "libxnvme.h"
#include "libxnvme_pp.h"
#include "bdev_xnvme.h"
#include "spdk/stdinc.h"
#include "spdk/barrier.h"
#include "spdk/bdev.h"
#include "spdk/env.h"
#include "spdk/fd.h"
#include "spdk/likely.h"
#include "spdk/thread.h"
#include "spdk/json.h"
#include "spdk/util.h"
#include "spdk/string.h"
#include "spdk/log.h"
struct bdev_xnvme_io_channel {
struct xnvme_queue *queue;
struct spdk_poller *poller;
};
struct bdev_xnvme_task {
struct bdev_xnvme_io_channel *ch;
TAILQ_ENTRY(bdev_xnvme_task) link;
};
struct bdev_xnvme {
struct spdk_bdev bdev;
char *filename;
struct xnvme_dev *dev;
uint32_t nsid;
TAILQ_ENTRY(bdev_xnvme) link;
};
static int bdev_xnvme_init(void);
static void bdev_xnvme_fini(void);
static void bdev_xnvme_free(struct bdev_xnvme *xnvme);
static TAILQ_HEAD(, bdev_xnvme) g_xnvme_bdev_head = TAILQ_HEAD_INITIALIZER(g_xnvme_bdev_head);
static int
bdev_xnvme_get_ctx_size(void)
{
return sizeof(struct bdev_xnvme_task);
}
static struct spdk_bdev_module xnvme_if = {
.name = "xnvme",
.module_init = bdev_xnvme_init,
.module_fini = bdev_xnvme_fini,
.get_ctx_size = bdev_xnvme_get_ctx_size,
};
SPDK_BDEV_MODULE_REGISTER(xnvme, &xnvme_if)
static struct spdk_io_channel *
bdev_xnvme_get_io_channel(void *ctx)
{
struct bdev_xnvme *xnvme = ctx;
return spdk_get_io_channel(xnvme);
}
static bool
bdev_xnvme_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type)
{
switch (io_type) {
case SPDK_BDEV_IO_TYPE_READ:
case SPDK_BDEV_IO_TYPE_WRITE:
return true;
default:
return false;
}
}
static int
bdev_xnvme_destruct(void *ctx)
{
struct bdev_xnvme *xnvme = ctx;
int rc = 0;
TAILQ_REMOVE(&g_xnvme_bdev_head, xnvme, link);
spdk_io_device_unregister(xnvme, NULL);
bdev_xnvme_free(xnvme);
return rc;
}
static void
bdev_xnvme_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success)
{
struct bdev_xnvme_task *xnvme_task = (struct bdev_xnvme_task *)bdev_io->driver_ctx;
struct bdev_xnvme *xnvme = (struct bdev_xnvme *)bdev_io->bdev->ctxt;
struct bdev_xnvme_io_channel *xnvme_ch = spdk_io_channel_get_ctx(ch);
struct xnvme_cmd_ctx *ctx = xnvme_queue_get_cmd_ctx(xnvme_ch->queue);
int err;
if (!success) {
xnvme_queue_put_cmd_ctx(xnvme_ch->queue, ctx);
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
return;
}
SPDK_DEBUGLOG(xnvme, "bdev_io : %p, iov_cnt : %d, bdev_xnvme_task : %p\n",
bdev_io, bdev_io->u.bdev.iovcnt, (struct bdev_xnvme_task *)bdev_io->driver_ctx);
switch (bdev_io->type) {
case SPDK_BDEV_IO_TYPE_READ:
ctx->cmd.common.opcode = XNVME_SPEC_NVM_OPC_READ;
ctx->cmd.common.nsid = xnvme->nsid;
ctx->cmd.nvm.nlb = bdev_io->u.bdev.num_blocks - 1;
ctx->cmd.nvm.slba = bdev_io->u.bdev.offset_blocks;
break;
case SPDK_BDEV_IO_TYPE_WRITE:
ctx->cmd.common.opcode = XNVME_SPEC_NVM_OPC_WRITE;
ctx->cmd.common.nsid = xnvme->nsid;
ctx->cmd.nvm.nlb = bdev_io->u.bdev.num_blocks - 1;
ctx->cmd.nvm.slba = bdev_io->u.bdev.offset_blocks;
break;
default:
SPDK_ERRLOG("Wrong io type\n");
xnvme_queue_put_cmd_ctx(xnvme_ch->queue, ctx);
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
return;
}
xnvme_task->ch = xnvme_ch;
ctx->async.cb_arg = xnvme_task;
err = xnvme_cmd_passv(ctx, bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
bdev_io->u.bdev.num_blocks * xnvme->bdev.blocklen, NULL, 0, 0);
switch (err) {
/* Submission success! */
case 0:
SPDK_DEBUGLOG(xnvme, "io_channel : %p, iovcnt:%d, nblks: %lu off: %#lx\n",
xnvme_ch, bdev_io->u.bdev.iovcnt,
bdev_io->u.bdev.num_blocks, bdev_io->u.bdev.offset_blocks);
return;
/* Submission failed: queue is full or no memory => Queue the I/O in bdev layer */
case -EBUSY:
case -EAGAIN:
case -ENOMEM:
SPDK_WARNLOG("Start to queue I/O for xnvme bdev\n");
xnvme_queue_put_cmd_ctx(xnvme_ch->queue, ctx);
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
return;
/* Submission failed: unexpected error, put the command-context back in the queue */
default:
SPDK_ERRLOG("bdev_xnvme_cmd_passv : Submission failed: unexpected error\n");
xnvme_queue_put_cmd_ctx(xnvme_ch->queue, ctx);
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
return;
}
}
static void
bdev_xnvme_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
{
switch (bdev_io->type) {
/* Read and write operations must be performed on buffers aligned to
* bdev->required_alignment. If user specified unaligned buffers,
* get the aligned buffer from the pool by calling spdk_bdev_io_get_buf. */
case SPDK_BDEV_IO_TYPE_READ:
case SPDK_BDEV_IO_TYPE_WRITE:
spdk_bdev_io_get_buf(bdev_io, bdev_xnvme_get_buf_cb,
bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
break;
default:
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
break;
}
}
static const struct spdk_bdev_fn_table xnvme_fn_table = {
.destruct = bdev_xnvme_destruct,
.submit_request = bdev_xnvme_submit_request,
.io_type_supported = bdev_xnvme_io_type_supported,
.get_io_channel = bdev_xnvme_get_io_channel,
};
static void
bdev_xnvme_free(struct bdev_xnvme *xnvme)
{
assert(xnvme != NULL);
free(xnvme->filename);
free(xnvme->bdev.name);
free(xnvme);
}
static void
bdev_xnvme_cmd_cb(struct xnvme_cmd_ctx *ctx, void *cb_arg)
{
struct bdev_xnvme_task *xnvme_task = ctx->async.cb_arg;
enum spdk_bdev_io_status status = SPDK_BDEV_IO_STATUS_SUCCESS;
SPDK_DEBUGLOG(xnvme, "xnvme_task : %p\n", xnvme_task);
if (xnvme_cmd_ctx_cpl_status(ctx)) {
SPDK_ERRLOG("xNVMe I/O Failed\n");
xnvme_cmd_ctx_pr(ctx, XNVME_PR_DEF);
status = SPDK_BDEV_IO_STATUS_FAILED;
}
spdk_bdev_io_complete(spdk_bdev_io_from_ctx(xnvme_task), status);
/* Completed: Put the command- context back in the queue */
xnvme_queue_put_cmd_ctx(ctx->async.queue, ctx);
}
static int
bdev_xnvme_poll(void *arg)
{
struct bdev_xnvme_io_channel *ch = arg;
int rc;
rc = xnvme_queue_poke(ch->queue, 0);
if (rc < 0) {
SPDK_ERRLOG("xnvme_queue_poke failure rc : %d\n", rc);
return SPDK_POLLER_BUSY;
}
return xnvme_queue_get_outstanding(ch->queue) ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
}
static int
bdev_xnvme_queue_create_cb(void *io_device, void *ctx_buf)
{
struct bdev_xnvme *xnvme = io_device;
struct bdev_xnvme_io_channel *ch = ctx_buf;
int rc;
int qd = 512;
rc = xnvme_queue_init(xnvme->dev, qd, 0, &ch->queue);
if (rc) {
SPDK_ERRLOG("xnvme_queue_init failure: %d\n", rc);
return 1;
}
xnvme_queue_set_cb(ch->queue, bdev_xnvme_cmd_cb, ch);
ch->poller = SPDK_POLLER_REGISTER(bdev_xnvme_poll, ch, 0);
return 0;
}
static void
bdev_xnvme_queue_destroy_cb(void *io_device, void *ctx_buf)
{
struct bdev_xnvme_io_channel *ch = ctx_buf;
spdk_poller_unregister(&ch->poller);
xnvme_queue_term(ch->queue);
}
struct spdk_bdev *
create_xnvme_bdev(const char *name, const char *filename, const char *io_mechanism)
{
struct bdev_xnvme *xnvme;
uint32_t block_size;
uint64_t bdev_size;
int rc;
struct xnvme_opts opts = xnvme_opts_default();
xnvme = calloc(1, sizeof(*xnvme));
if (!xnvme) {
SPDK_ERRLOG("Unable to allocate enough memory for xNVMe backend\n");
return NULL;
}
opts.async = io_mechanism;
if (!opts.async) {
goto error_return;
}
xnvme->filename = strdup(filename);
if (!xnvme->filename) {
goto error_return;
}
xnvme->dev = xnvme_dev_open(xnvme->filename, &opts);
if (!xnvme->dev) {
SPDK_ERRLOG("Unable to open xNVMe device %s\n", filename);
goto error_return;
}
xnvme->nsid = xnvme_dev_get_nsid(xnvme->dev);
bdev_size = xnvme_dev_get_geo(xnvme->dev)->tbytes;
block_size = xnvme_dev_get_geo(xnvme->dev)->nbytes;
xnvme->bdev.name = strdup(name);
if (!xnvme->bdev.name) {
goto error_return;
}
xnvme->bdev.product_name = "xNVMe bdev";
xnvme->bdev.module = &xnvme_if;
xnvme->bdev.write_cache = 0;
if (block_size == 0) {
SPDK_ERRLOG("Block size could not be auto-detected\n");
goto error_return;
}
if (block_size < 512) {
SPDK_ERRLOG("Invalid block size %" PRIu32 " (must be at least 512).\n", block_size);
goto error_return;
}
if (!spdk_u32_is_pow2(block_size)) {
SPDK_ERRLOG("Invalid block size %" PRIu32 " (must be a power of 2.)\n", block_size);
goto error_return;
}
SPDK_DEBUGLOG(xnvme, "bdev_name : %s, bdev_size : %lu, block_size : %d\n",
xnvme->bdev.name, bdev_size, block_size);
xnvme->bdev.blocklen = block_size;
xnvme->bdev.required_alignment = spdk_u32log2(block_size);
if (bdev_size % xnvme->bdev.blocklen != 0) {
SPDK_ERRLOG("Disk size %" PRIu64 " is not a multiple of block size %" PRIu32 "\n",
bdev_size, xnvme->bdev.blocklen);
goto error_return;
}
xnvme->bdev.blockcnt = bdev_size / xnvme->bdev.blocklen;
xnvme->bdev.ctxt = xnvme;
xnvme->bdev.fn_table = &xnvme_fn_table;
spdk_io_device_register(xnvme, bdev_xnvme_queue_create_cb, bdev_xnvme_queue_destroy_cb,
sizeof(struct bdev_xnvme_io_channel),
xnvme->bdev.name);
rc = spdk_bdev_register(&xnvme->bdev);
if (rc) {
spdk_io_device_unregister(xnvme, NULL);
goto error_return;
}
TAILQ_INSERT_TAIL(&g_xnvme_bdev_head, xnvme, link);
return &xnvme->bdev;
error_return:
xnvme_dev_close(xnvme->dev);
bdev_xnvme_free(xnvme);
return NULL;
}
struct delete_xnvme_bdev_ctx {
spdk_delete_xnvme_complete cb_fn;
void *cb_arg;
};
static void
xnvme_bdev_unregister_cb(void *arg, int bdeverrno)
{
struct delete_xnvme_bdev_ctx *ctx = arg;
ctx->cb_fn(ctx->cb_arg, bdeverrno);
free(ctx);
}
void
delete_xnvme_bdev(struct spdk_bdev *bdev, spdk_delete_xnvme_complete cb_fn, void *cb_arg)
{
struct delete_xnvme_bdev_ctx *ctx;
struct bdev_xnvme *xnvme = (struct bdev_xnvme *)bdev->ctxt;
if (!bdev || bdev->module != &xnvme_if) {
cb_fn(cb_arg, -ENODEV);
return;
}
ctx = calloc(1, sizeof(*ctx));
if (ctx == NULL) {
cb_fn(cb_arg, -ENOMEM);
return;
}
ctx->cb_fn = cb_fn;
ctx->cb_arg = cb_arg;
spdk_bdev_unregister(bdev, xnvme_bdev_unregister_cb, ctx);
xnvme_dev_close(xnvme->dev);
}
static int
bdev_xnvme_module_create_cb(void *io_device, void *ctx_buf)
{
return 0;
}
static void
bdev_xnvme_module_destroy_cb(void *io_device, void *ctx_buf)
{
}
static int
bdev_xnvme_init(void)
{
spdk_io_device_register(&xnvme_if, bdev_xnvme_module_create_cb, bdev_xnvme_module_destroy_cb,
0, "xnvme_module");
return 0;
}
static void
bdev_xnvme_fini(void)
{
spdk_io_device_unregister(&xnvme_if, NULL);
}
SPDK_LOG_REGISTER_COMPONENT(xnvme)

View File

@ -0,0 +1,23 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) Samsung Electronics Co., Ltd.
* All rights reserved.
*/
#ifndef SPDK_BDEV_XNVME_H
#define SPDK_BDEV_XNVME_H
#include "spdk/stdinc.h"
#include "spdk/queue.h"
#include "spdk/bdev.h"
#include "spdk/bdev_module.h"
typedef void (*spdk_delete_xnvme_complete)(void *cb_arg, int bdeverrno);
struct spdk_bdev *create_xnvme_bdev(const char *name, const char *filename,
const char *io_mechanism);
void delete_xnvme_bdev(struct spdk_bdev *bdev, spdk_delete_xnvme_complete cb_fn, void *cb_arg);
#endif /* SPDK_BDEV_XNVME_H */

View File

@ -0,0 +1,134 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) Samsung Electronics Co., Ltd.
* All rights reserved.
*/
#include "bdev_xnvme.h"
#include "spdk/rpc.h"
#include "spdk/util.h"
#include "spdk/string.h"
#include "spdk/log.h"
/* Structure to hold the parameters for this RPC method. */
struct rpc_create_xnvme {
char *name;
char *filename;
char *io_mechanism;
};
/* Free the allocated memory resource after the RPC handling. */
static void
free_rpc_create_xnvme(struct rpc_create_xnvme *r)
{
free(r->name);
free(r->filename);
free(r->io_mechanism);
}
/* Structure to decode the input parameters for this RPC method. */
static const struct spdk_json_object_decoder rpc_create_xnvme_decoders[] = {
{"name", offsetof(struct rpc_create_xnvme, name), spdk_json_decode_string},
{"filename", offsetof(struct rpc_create_xnvme, filename), spdk_json_decode_string},
{"io_mechanism", offsetof(struct rpc_create_xnvme, io_mechanism), spdk_json_decode_string},
};
static void
dummy_bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *ctx)
{
}
/* Decode the parameters for this RPC method and properly create the xnvme
* device. Error status returned in the failed cases.
*/
static void
rpc_bdev_xnvme_create(struct spdk_jsonrpc_request *request,
const struct spdk_json_val *params)
{
struct rpc_create_xnvme req = {};
struct spdk_json_write_ctx *w;
struct spdk_bdev *bdev;
if (spdk_json_decode_object(params, rpc_create_xnvme_decoders,
SPDK_COUNTOF(rpc_create_xnvme_decoders),
&req)) {
SPDK_ERRLOG("spdk_json_decode_object failed\n");
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
"spdk_json_decode_object failed");
goto cleanup;
}
bdev = create_xnvme_bdev(req.name, req.filename, req.io_mechanism);
if (!bdev) {
SPDK_ERRLOG("Unable to create xNVMe bdev from file %s\n", req.filename);
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
"Unable to create xNVMe bdev.");
goto cleanup;
}
w = spdk_jsonrpc_begin_result(request);
spdk_json_write_string(w, req.name);
spdk_jsonrpc_end_result(request, w);
cleanup:
free_rpc_create_xnvme(&req);
}
SPDK_RPC_REGISTER("bdev_xnvme_create", rpc_bdev_xnvme_create, SPDK_RPC_RUNTIME)
struct rpc_delete_xnvme {
char *name;
};
static void
free_rpc_delete_xnvme(struct rpc_delete_xnvme *req)
{
free(req->name);
}
static const struct spdk_json_object_decoder rpc_delete_xnvme_decoders[] = {
{"name", offsetof(struct rpc_delete_xnvme, name), spdk_json_decode_string},
};
static void
_rpc_bdev_xnvme_delete_cb(void *cb_arg, int bdeverrno)
{
struct spdk_jsonrpc_request *request = cb_arg;
spdk_jsonrpc_send_bool_response(request, bdeverrno == 0);
}
static void
rpc_bdev_xnvme_delete(struct spdk_jsonrpc_request *request,
const struct spdk_json_val *params)
{
struct rpc_delete_xnvme req = {NULL};
struct spdk_bdev_desc *desc;
struct spdk_bdev *bdev = NULL;
int rc;
if (spdk_json_decode_object(params, rpc_delete_xnvme_decoders,
SPDK_COUNTOF(rpc_delete_xnvme_decoders),
&req)) {
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
"spdk_json_decode_object failed");
goto cleanup;
}
rc = spdk_bdev_open_ext(req.name, false, dummy_bdev_event_cb, NULL, &desc);
if (rc == 0) {
bdev = spdk_bdev_desc_get_bdev(desc);
spdk_bdev_close(desc);
} else {
goto cleanup;
}
if (bdev == NULL) {
spdk_jsonrpc_send_error_response(request, -ENODEV, spdk_strerror(ENODEV));
goto cleanup;
}
delete_xnvme_bdev(bdev, _rpc_bdev_xnvme_delete_cb, request);
cleanup:
free_rpc_delete_xnvme(&req);
}
SPDK_RPC_REGISTER("bdev_xnvme_delete", rpc_bdev_xnvme_delete, SPDK_RPC_RUNTIME)

View File

@ -440,6 +440,34 @@ def bdev_uring_delete(client, name):
return client.call('bdev_uring_delete', params)
def bdev_xnvme_create(client, filename, name, io_mechanism):
"""Create a bdev with xNVMe backend.
Args:
filename: path to device or file (ex: /dev/nvme0n1)
name: name of xNVMe bdev to create
io_mechanism: I/O mechanism to use (ex: io_uring, io_uring_cmd, etc.)
Returns:
Name of created bdev.
"""
params = {'name': name,
'filename': filename,
'io_mechanism': io_mechanism}
return client.call('bdev_xnvme_create', params)
def bdev_xnvme_delete(client, name):
"""Delete a xNVMe bdev.
Args:
name: name of xNVMe bdev to delete
"""
params = {'name': name}
return client.call('bdev_xnvme_delete', params)
def bdev_nvme_set_options(client, action_on_timeout=None, timeout_us=None, timeout_admin_us=None,
keep_alive_timeout_ms=None, retry_count=None, arbitration_burst=None,
low_priority_weight=None, medium_priority_weight=None, high_priority_weight=None,

View File

@ -468,6 +468,26 @@ if __name__ == "__main__":
p.add_argument('name', help='uring bdev name')
p.set_defaults(func=bdev_uring_delete)
def bdev_xnvme_create(args):
print_json(rpc.bdev.bdev_xnvme_create(args.client,
filename=args.filename,
name=args.name,
io_mechanism=args.io_mechanism))
p = subparsers.add_parser('bdev_xnvme_create', help='Create a bdev with xNVMe backend')
p.add_argument('filename', help='Path to device or file (ex: /dev/nvme0n1)')
p.add_argument('name', help='name of xNVMe bdev to create')
p.add_argument('io_mechanism', help='IO mechanism to use (ex: libaio, io_uring, io_uring_cmd, etc.)')
p.set_defaults(func=bdev_xnvme_create)
def bdev_xnvme_delete(args):
rpc.bdev.bdev_xnvme_delete(args.client,
name=args.name)
p = subparsers.add_parser('bdev_xnvme_delete', help='Delete a xNVMe bdev')
p.add_argument('name', help='xNVMe bdev name')
p.set_defaults(func=bdev_xnvme_delete)
def bdev_nvme_set_options(args):
rpc.bdev.bdev_nvme_set_options(args.client,
action_on_timeout=args.action_on_timeout,

View File

@ -50,3 +50,7 @@ test/app/fuzz/llvm_nvme_fuzz/llvm_nvme_fuzz
# used for the main build and scanbuild tests.
module/bdev/daos/bdev_daos
module/bdev/daos/bdev_daos_rpc
# Not configured to test xNVMe bdev
module/bdev/xnvme/bdev_xnvme
module/bdev/xnvme/bdev_xnvme_rpc

1
xnvme Submodule

@ -0,0 +1 @@
Subproject commit e507f2d1791163d0c2757f1b973cd2cf6dfcb573

23
xnvmebuild/Makefile Normal file
View File

@ -0,0 +1,23 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) Samsung Electronics Co., Ltd.
# All rights reserved.
#
SPDK_ROOT_DIR := $(abspath $(CURDIR)/..)
include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
.PHONY: all clean install uninstall
all:
(cd $(SPDK_ROOT_DIR)/xnvme && \
meson setup builddir -Dwith-spdk=false -Dwith-fio=false -Dshared_library=false && \
meson compile -C builddir && \
cd -)
install: all
uninstall:
@:
clean:
(cd $(SPDK_ROOT_DIR)/xnvme && rm -fr builddir || true && cd -)