Spdk/module/bdev/malloc/bdev_malloc.c
Shuhei Matsumoto e6b2b9075a bdev/malloc: Use options structure to create a malloc bdev
Define a options structure, malloc_bdev_opts, and use it directly for
the bdev_malloc_create RPC. To do this, bdev_malloc.h includes
bdev_module.h instead of bdev.h to have the definition of the struct
spdk_uuid, and the struct malloc_bdev_opts has a instance of struct
spdk_uuid. Clean up file inclusion together. Furthermore, use
spdk_uuid_copy() to copy uuid from the malloc_bdev_opts to the malloc
disk rather than the = operator, and remove a duplicated size check.

These are helpful to add more parameters for creation.

Signed-off-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
Change-Id: Ief25f12586c21b1666180ce10cfc6256ede8eba9
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/14982
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
2022-10-28 06:49:40 +00:00

528 lines
13 KiB
C

/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) Intel Corporation.
* All rights reserved.
* Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*/
#include "spdk/stdinc.h"
#include "bdev_malloc.h"
#include "spdk/endian.h"
#include "spdk/env.h"
#include "spdk/accel.h"
#include "spdk/string.h"
#include "spdk/log.h"
struct malloc_disk {
struct spdk_bdev disk;
void *malloc_buf;
TAILQ_ENTRY(malloc_disk) link;
};
struct malloc_task {
int num_outstanding;
enum spdk_bdev_io_status status;
TAILQ_ENTRY(malloc_task) tailq;
};
struct malloc_channel {
struct spdk_io_channel *accel_channel;
struct spdk_poller *completion_poller;
TAILQ_HEAD(, malloc_task) completed_tasks;
};
static void
malloc_done(void *ref, int status)
{
struct malloc_task *task = (struct malloc_task *)ref;
if (status != 0) {
if (status == -ENOMEM) {
task->status = SPDK_BDEV_IO_STATUS_NOMEM;
} else {
task->status = SPDK_BDEV_IO_STATUS_FAILED;
}
}
if (--task->num_outstanding == 0) {
spdk_bdev_io_complete(spdk_bdev_io_from_ctx(task), task->status);
}
}
static void
malloc_complete_task(struct malloc_task *task, struct malloc_channel *mch,
enum spdk_bdev_io_status status)
{
task->status = status;
TAILQ_INSERT_TAIL(&mch->completed_tasks, task, tailq);
}
static TAILQ_HEAD(, malloc_disk) g_malloc_disks = TAILQ_HEAD_INITIALIZER(g_malloc_disks);
int malloc_disk_count = 0;
static int bdev_malloc_initialize(void);
static void bdev_malloc_deinitialize(void);
static int
bdev_malloc_get_ctx_size(void)
{
return sizeof(struct malloc_task);
}
static struct spdk_bdev_module malloc_if = {
.name = "malloc",
.module_init = bdev_malloc_initialize,
.module_fini = bdev_malloc_deinitialize,
.get_ctx_size = bdev_malloc_get_ctx_size,
};
SPDK_BDEV_MODULE_REGISTER(malloc, &malloc_if)
static void
malloc_disk_free(struct malloc_disk *malloc_disk)
{
if (!malloc_disk) {
return;
}
free(malloc_disk->disk.name);
spdk_free(malloc_disk->malloc_buf);
free(malloc_disk);
}
static int
bdev_malloc_destruct(void *ctx)
{
struct malloc_disk *malloc_disk = ctx;
TAILQ_REMOVE(&g_malloc_disks, malloc_disk, link);
malloc_disk_free(malloc_disk);
return 0;
}
static int
bdev_malloc_check_iov_len(struct iovec *iovs, int iovcnt, size_t nbytes)
{
int i;
for (i = 0; i < iovcnt; i++) {
if (nbytes < iovs[i].iov_len) {
return 0;
}
nbytes -= iovs[i].iov_len;
}
return nbytes != 0;
}
static void
bdev_malloc_readv(struct malloc_disk *mdisk, struct spdk_io_channel *ch,
struct malloc_task *task,
struct iovec *iov, int iovcnt, size_t len, uint64_t offset)
{
int64_t res = 0;
void *src = mdisk->malloc_buf + offset;
int i;
if (bdev_malloc_check_iov_len(iov, iovcnt, len)) {
spdk_bdev_io_complete(spdk_bdev_io_from_ctx(task),
SPDK_BDEV_IO_STATUS_FAILED);
return;
}
SPDK_DEBUGLOG(bdev_malloc, "read %zu bytes from offset %#" PRIx64 ", iovcnt=%d\n",
len, offset, iovcnt);
task->status = SPDK_BDEV_IO_STATUS_SUCCESS;
task->num_outstanding = 0;
for (i = 0; i < iovcnt; i++) {
task->num_outstanding++;
res = spdk_accel_submit_copy(ch, iov[i].iov_base,
src, iov[i].iov_len, 0, malloc_done, task);
if (res != 0) {
malloc_done(task, res);
break;
}
src += iov[i].iov_len;
len -= iov[i].iov_len;
}
}
static void
bdev_malloc_writev(struct malloc_disk *mdisk, struct spdk_io_channel *ch,
struct malloc_task *task,
struct iovec *iov, int iovcnt, size_t len, uint64_t offset)
{
int64_t res = 0;
void *dst = mdisk->malloc_buf + offset;
int i;
if (bdev_malloc_check_iov_len(iov, iovcnt, len)) {
spdk_bdev_io_complete(spdk_bdev_io_from_ctx(task),
SPDK_BDEV_IO_STATUS_FAILED);
return;
}
SPDK_DEBUGLOG(bdev_malloc, "wrote %zu bytes to offset %#" PRIx64 ", iovcnt=%d\n",
len, offset, iovcnt);
task->status = SPDK_BDEV_IO_STATUS_SUCCESS;
task->num_outstanding = 0;
for (i = 0; i < iovcnt; i++) {
task->num_outstanding++;
res = spdk_accel_submit_copy(ch, dst, iov[i].iov_base,
iov[i].iov_len, 0, malloc_done, task);
if (res != 0) {
malloc_done(task, res);
break;
}
dst += iov[i].iov_len;
}
}
static int
bdev_malloc_unmap(struct malloc_disk *mdisk,
struct spdk_io_channel *ch,
struct malloc_task *task,
uint64_t offset,
uint64_t byte_count)
{
task->status = SPDK_BDEV_IO_STATUS_SUCCESS;
task->num_outstanding = 1;
return spdk_accel_submit_fill(ch, mdisk->malloc_buf + offset, 0,
byte_count, 0, malloc_done, task);
}
static int
_bdev_malloc_submit_request(struct malloc_channel *mch, struct spdk_bdev_io *bdev_io)
{
uint32_t block_size = bdev_io->bdev->blocklen;
switch (bdev_io->type) {
case SPDK_BDEV_IO_TYPE_READ:
if (bdev_io->u.bdev.iovs[0].iov_base == NULL) {
assert(bdev_io->u.bdev.iovcnt == 1);
bdev_io->u.bdev.iovs[0].iov_base =
((struct malloc_disk *)bdev_io->bdev->ctxt)->malloc_buf +
bdev_io->u.bdev.offset_blocks * block_size;
bdev_io->u.bdev.iovs[0].iov_len = bdev_io->u.bdev.num_blocks * block_size;
malloc_complete_task((struct malloc_task *)bdev_io->driver_ctx, mch,
SPDK_BDEV_IO_STATUS_SUCCESS);
return 0;
}
bdev_malloc_readv((struct malloc_disk *)bdev_io->bdev->ctxt,
mch->accel_channel,
(struct malloc_task *)bdev_io->driver_ctx,
bdev_io->u.bdev.iovs,
bdev_io->u.bdev.iovcnt,
bdev_io->u.bdev.num_blocks * block_size,
bdev_io->u.bdev.offset_blocks * block_size);
return 0;
case SPDK_BDEV_IO_TYPE_WRITE:
bdev_malloc_writev((struct malloc_disk *)bdev_io->bdev->ctxt,
mch->accel_channel,
(struct malloc_task *)bdev_io->driver_ctx,
bdev_io->u.bdev.iovs,
bdev_io->u.bdev.iovcnt,
bdev_io->u.bdev.num_blocks * block_size,
bdev_io->u.bdev.offset_blocks * block_size);
return 0;
case SPDK_BDEV_IO_TYPE_RESET:
malloc_complete_task((struct malloc_task *)bdev_io->driver_ctx, mch,
SPDK_BDEV_IO_STATUS_SUCCESS);
return 0;
case SPDK_BDEV_IO_TYPE_FLUSH:
malloc_complete_task((struct malloc_task *)bdev_io->driver_ctx, mch,
SPDK_BDEV_IO_STATUS_SUCCESS);
return 0;
case SPDK_BDEV_IO_TYPE_UNMAP:
return bdev_malloc_unmap((struct malloc_disk *)bdev_io->bdev->ctxt,
mch->accel_channel,
(struct malloc_task *)bdev_io->driver_ctx,
bdev_io->u.bdev.offset_blocks * block_size,
bdev_io->u.bdev.num_blocks * block_size);
case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
/* bdev_malloc_unmap is implemented with a call to mem_cpy_fill which zeroes out all of the requested bytes. */
return bdev_malloc_unmap((struct malloc_disk *)bdev_io->bdev->ctxt,
mch->accel_channel,
(struct malloc_task *)bdev_io->driver_ctx,
bdev_io->u.bdev.offset_blocks * block_size,
bdev_io->u.bdev.num_blocks * block_size);
case SPDK_BDEV_IO_TYPE_ZCOPY:
if (bdev_io->u.bdev.zcopy.start) {
void *buf;
size_t len;
buf = ((struct malloc_disk *)bdev_io->bdev->ctxt)->malloc_buf +
bdev_io->u.bdev.offset_blocks * block_size;
len = bdev_io->u.bdev.num_blocks * block_size;
spdk_bdev_io_set_buf(bdev_io, buf, len);
}
malloc_complete_task((struct malloc_task *)bdev_io->driver_ctx, mch,
SPDK_BDEV_IO_STATUS_SUCCESS);
return 0;
case SPDK_BDEV_IO_TYPE_ABORT:
malloc_complete_task((struct malloc_task *)bdev_io->driver_ctx, mch,
SPDK_BDEV_IO_STATUS_FAILED);
return 0;
default:
return -1;
}
return 0;
}
static void
bdev_malloc_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
{
struct malloc_channel *mch = spdk_io_channel_get_ctx(ch);
if (_bdev_malloc_submit_request(mch, bdev_io) != 0) {
malloc_complete_task((struct malloc_task *)bdev_io->driver_ctx, mch,
SPDK_BDEV_IO_STATUS_FAILED);
}
}
static bool
bdev_malloc_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type)
{
switch (io_type) {
case SPDK_BDEV_IO_TYPE_READ:
case SPDK_BDEV_IO_TYPE_WRITE:
case SPDK_BDEV_IO_TYPE_FLUSH:
case SPDK_BDEV_IO_TYPE_RESET:
case SPDK_BDEV_IO_TYPE_UNMAP:
case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
case SPDK_BDEV_IO_TYPE_ZCOPY:
case SPDK_BDEV_IO_TYPE_ABORT:
return true;
default:
return false;
}
}
static struct spdk_io_channel *
bdev_malloc_get_io_channel(void *ctx)
{
return spdk_get_io_channel(&g_malloc_disks);
}
static void
bdev_malloc_write_json_config(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w)
{
char uuid_str[SPDK_UUID_STRING_LEN];
spdk_json_write_object_begin(w);
spdk_json_write_named_string(w, "method", "bdev_malloc_create");
spdk_json_write_named_object_begin(w, "params");
spdk_json_write_named_string(w, "name", bdev->name);
spdk_json_write_named_uint64(w, "num_blocks", bdev->blockcnt);
spdk_json_write_named_uint32(w, "block_size", bdev->blocklen);
spdk_uuid_fmt_lower(uuid_str, sizeof(uuid_str), &bdev->uuid);
spdk_json_write_named_string(w, "uuid", uuid_str);
spdk_json_write_named_uint32(w, "optimal_io_boundary", bdev->optimal_io_boundary);
spdk_json_write_object_end(w);
spdk_json_write_object_end(w);
}
static const struct spdk_bdev_fn_table malloc_fn_table = {
.destruct = bdev_malloc_destruct,
.submit_request = bdev_malloc_submit_request,
.io_type_supported = bdev_malloc_io_type_supported,
.get_io_channel = bdev_malloc_get_io_channel,
.write_config_json = bdev_malloc_write_json_config,
};
int
create_malloc_disk(struct spdk_bdev **bdev, const struct malloc_bdev_opts *opts)
{
struct malloc_disk *mdisk;
int rc;
assert(opts != NULL);
if (opts->num_blocks == 0) {
SPDK_ERRLOG("Disk num_blocks must be greater than 0");
return -EINVAL;
}
if (opts->block_size % 512) {
SPDK_ERRLOG("block size must be 512 bytes aligned\n");
return -EINVAL;
}
mdisk = calloc(1, sizeof(*mdisk));
if (!mdisk) {
SPDK_ERRLOG("mdisk calloc() failed\n");
return -ENOMEM;
}
/*
* Allocate the large backend memory buffer from pinned memory.
*
* TODO: need to pass a hint so we know which socket to allocate
* from on multi-socket systems.
*/
mdisk->malloc_buf = spdk_zmalloc(opts->num_blocks * opts->block_size, 2 * 1024 * 1024, NULL,
SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
if (!mdisk->malloc_buf) {
SPDK_ERRLOG("malloc_buf spdk_zmalloc() failed\n");
malloc_disk_free(mdisk);
return -ENOMEM;
}
if (opts->name) {
mdisk->disk.name = strdup(opts->name);
} else {
/* Auto-generate a name */
mdisk->disk.name = spdk_sprintf_alloc("Malloc%d", malloc_disk_count);
malloc_disk_count++;
}
if (!mdisk->disk.name) {
malloc_disk_free(mdisk);
return -ENOMEM;
}
mdisk->disk.product_name = "Malloc disk";
mdisk->disk.write_cache = 1;
mdisk->disk.blocklen = opts->block_size;
mdisk->disk.blockcnt = opts->num_blocks;
if (opts->optimal_io_boundary) {
mdisk->disk.optimal_io_boundary = opts->optimal_io_boundary;
mdisk->disk.split_on_optimal_io_boundary = true;
}
if (!spdk_mem_all_zero(&opts->uuid, sizeof(opts->uuid))) {
spdk_uuid_copy(&mdisk->disk.uuid, &opts->uuid);
} else {
spdk_uuid_generate(&mdisk->disk.uuid);
}
mdisk->disk.ctxt = mdisk;
mdisk->disk.fn_table = &malloc_fn_table;
mdisk->disk.module = &malloc_if;
rc = spdk_bdev_register(&mdisk->disk);
if (rc) {
malloc_disk_free(mdisk);
return rc;
}
*bdev = &(mdisk->disk);
TAILQ_INSERT_TAIL(&g_malloc_disks, mdisk, link);
return rc;
}
void
delete_malloc_disk(const char *name, spdk_delete_malloc_complete cb_fn, void *cb_arg)
{
int rc;
rc = spdk_bdev_unregister_by_name(name, &malloc_if, cb_fn, cb_arg);
if (rc != 0) {
cb_fn(cb_arg, rc);
}
}
static int
malloc_completion_poller(void *ctx)
{
struct malloc_channel *ch = ctx;
struct malloc_task *task;
TAILQ_HEAD(, malloc_task) completed_tasks;
uint32_t num_completions = 0;
TAILQ_INIT(&completed_tasks);
TAILQ_SWAP(&completed_tasks, &ch->completed_tasks, malloc_task, tailq);
while (!TAILQ_EMPTY(&completed_tasks)) {
task = TAILQ_FIRST(&completed_tasks);
TAILQ_REMOVE(&completed_tasks, task, tailq);
spdk_bdev_io_complete(spdk_bdev_io_from_ctx(task), task->status);
num_completions++;
}
return num_completions > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
}
static int
malloc_create_channel_cb(void *io_device, void *ctx)
{
struct malloc_channel *ch = ctx;
ch->accel_channel = spdk_accel_get_io_channel();
if (!ch->accel_channel) {
SPDK_ERRLOG("Failed to get accel framework's IO channel\n");
return -ENOMEM;
}
ch->completion_poller = SPDK_POLLER_REGISTER(malloc_completion_poller, ch, 0);
if (!ch->completion_poller) {
SPDK_ERRLOG("Failed to register malloc completion poller\n");
spdk_put_io_channel(ch->accel_channel);
return -ENOMEM;
}
TAILQ_INIT(&ch->completed_tasks);
return 0;
}
static void
malloc_destroy_channel_cb(void *io_device, void *ctx)
{
struct malloc_channel *ch = ctx;
assert(TAILQ_EMPTY(&ch->completed_tasks));
spdk_put_io_channel(ch->accel_channel);
spdk_poller_unregister(&ch->completion_poller);
}
static int
bdev_malloc_initialize(void)
{
/* This needs to be reset for each reinitialization of submodules.
* Otherwise after enough devices or reinitializations the value gets too high.
* TODO: Make malloc bdev name mandatory and remove this counter. */
malloc_disk_count = 0;
spdk_io_device_register(&g_malloc_disks, malloc_create_channel_cb,
malloc_destroy_channel_cb, sizeof(struct malloc_channel),
"bdev_malloc");
return 0;
}
static void
bdev_malloc_deinitialize(void)
{
spdk_io_device_unregister(&g_malloc_disks, NULL);
}
SPDK_LOG_REGISTER_COMPONENT(bdev_malloc)