2022-06-03 19:15:11 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
2022-11-01 20:26:26 +00:00
|
|
|
* Copyright (C) 2022 Intel Corporation.
|
2019-11-04 11:35:50 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "spdk/stdinc.h"
|
|
|
|
#include "spdk_cunit.h"
|
|
|
|
#include "spdk/env.h"
|
|
|
|
|
2021-04-30 14:11:43 +00:00
|
|
|
#include "common/lib/ut_multithread.c"
|
|
|
|
|
2022-04-27 09:18:05 +00:00
|
|
|
#include "bdev/raid/raid5f.c"
|
2022-11-29 10:28:08 +00:00
|
|
|
#include "../common.c"
|
2019-11-04 11:35:50 +00:00
|
|
|
|
|
|
|
DEFINE_STUB_V(raid_bdev_module_list_add, (struct raid_bdev_module *raid_module));
|
2022-07-28 12:52:20 +00:00
|
|
|
DEFINE_STUB(spdk_bdev_get_buf_align, size_t, (const struct spdk_bdev *bdev), 0);
|
2022-09-27 14:08:08 +00:00
|
|
|
DEFINE_STUB_V(raid_bdev_module_stop_done, (struct raid_bdev *raid_bdev));
|
2022-10-27 08:32:37 +00:00
|
|
|
|
|
|
|
void *
|
|
|
|
spdk_bdev_io_get_md_buf(struct spdk_bdev_io *bdev_io)
|
|
|
|
{
|
|
|
|
return bdev_io->u.bdev.md_buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t
|
|
|
|
spdk_bdev_get_md_size(const struct spdk_bdev *bdev)
|
|
|
|
{
|
|
|
|
return bdev->md_len;
|
|
|
|
}
|
2020-01-28 12:28:14 +00:00
|
|
|
|
|
|
|
void
|
|
|
|
raid_bdev_io_complete(struct raid_bdev_io *raid_io, enum spdk_bdev_io_status status)
|
|
|
|
{
|
|
|
|
struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(raid_io);
|
|
|
|
|
|
|
|
if (bdev_io->internal.cb) {
|
|
|
|
bdev_io->internal.cb(bdev_io, status == SPDK_BDEV_IO_STATUS_SUCCESS, bdev_io->internal.caller_ctx);
|
|
|
|
}
|
|
|
|
}
|
2019-11-04 11:35:50 +00:00
|
|
|
|
2021-04-30 14:11:43 +00:00
|
|
|
bool
|
|
|
|
raid_bdev_io_complete_part(struct raid_bdev_io *raid_io, uint64_t completed,
|
|
|
|
enum spdk_bdev_io_status status)
|
|
|
|
{
|
|
|
|
assert(raid_io->base_bdev_io_remaining >= completed);
|
|
|
|
raid_io->base_bdev_io_remaining -= completed;
|
|
|
|
|
|
|
|
if (status != SPDK_BDEV_IO_STATUS_SUCCESS) {
|
|
|
|
raid_io->base_bdev_io_status = status;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (raid_io->base_bdev_io_remaining == 0) {
|
|
|
|
raid_bdev_io_complete(raid_io, raid_io->base_bdev_io_status);
|
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-04 11:35:50 +00:00
|
|
|
static int
|
|
|
|
test_setup(void)
|
|
|
|
{
|
|
|
|
uint8_t num_base_bdevs_values[] = { 3, 4, 5 };
|
|
|
|
uint64_t base_bdev_blockcnt_values[] = { 1, 1024, 1024 * 1024 };
|
|
|
|
uint32_t base_bdev_blocklen_values[] = { 512, 4096 };
|
|
|
|
uint32_t strip_size_kb_values[] = { 1, 4, 128 };
|
2022-10-27 08:32:37 +00:00
|
|
|
uint32_t md_len_values[] = { 0, 64 };
|
2019-11-04 11:35:50 +00:00
|
|
|
uint8_t *num_base_bdevs;
|
|
|
|
uint64_t *base_bdev_blockcnt;
|
|
|
|
uint32_t *base_bdev_blocklen;
|
|
|
|
uint32_t *strip_size_kb;
|
2022-10-27 08:32:37 +00:00
|
|
|
uint32_t *md_len;
|
2022-11-29 10:28:08 +00:00
|
|
|
struct raid_params params;
|
|
|
|
uint64_t params_count;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
params_count = SPDK_COUNTOF(num_base_bdevs_values) *
|
|
|
|
SPDK_COUNTOF(base_bdev_blockcnt_values) *
|
|
|
|
SPDK_COUNTOF(base_bdev_blocklen_values) *
|
2022-10-27 08:32:37 +00:00
|
|
|
SPDK_COUNTOF(strip_size_kb_values) *
|
|
|
|
SPDK_COUNTOF(md_len_values);
|
2022-11-29 10:28:08 +00:00
|
|
|
rc = raid_test_params_alloc(params_count);
|
|
|
|
if (rc) {
|
|
|
|
return rc;
|
2019-11-04 11:35:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ARRAY_FOR_EACH(num_base_bdevs_values, num_base_bdevs) {
|
|
|
|
ARRAY_FOR_EACH(base_bdev_blockcnt_values, base_bdev_blockcnt) {
|
|
|
|
ARRAY_FOR_EACH(base_bdev_blocklen_values, base_bdev_blocklen) {
|
|
|
|
ARRAY_FOR_EACH(strip_size_kb_values, strip_size_kb) {
|
2022-10-27 08:32:37 +00:00
|
|
|
ARRAY_FOR_EACH(md_len_values, md_len) {
|
|
|
|
params.num_base_bdevs = *num_base_bdevs;
|
|
|
|
params.base_bdev_blockcnt = *base_bdev_blockcnt;
|
|
|
|
params.base_bdev_blocklen = *base_bdev_blocklen;
|
|
|
|
params.strip_size = *strip_size_kb * 1024 / *base_bdev_blocklen;
|
|
|
|
params.md_len = *md_len;
|
|
|
|
if (params.strip_size == 0 ||
|
|
|
|
params.strip_size > *base_bdev_blockcnt) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
raid_test_params_add(¶ms);
|
2019-11-04 11:35:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
test_cleanup(void)
|
|
|
|
{
|
2022-11-29 10:28:08 +00:00
|
|
|
raid_test_params_free();
|
2019-11-04 11:35:50 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-04-27 09:18:05 +00:00
|
|
|
static struct raid5f_info *
|
2022-11-29 10:28:08 +00:00
|
|
|
create_raid5f(struct raid_params *params)
|
2019-11-04 11:35:50 +00:00
|
|
|
{
|
2022-11-29 10:28:08 +00:00
|
|
|
struct raid_bdev *raid_bdev = raid_test_create_raid_bdev(params, &g_raid5f_module);
|
2019-11-04 11:35:50 +00:00
|
|
|
|
2022-04-27 09:18:05 +00:00
|
|
|
SPDK_CU_ASSERT_FATAL(raid5f_start(raid_bdev) == 0);
|
2019-11-04 11:35:50 +00:00
|
|
|
|
|
|
|
return raid_bdev->module_private;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2022-04-27 09:18:05 +00:00
|
|
|
delete_raid5f(struct raid5f_info *r5f_info)
|
2019-11-04 11:35:50 +00:00
|
|
|
{
|
2022-04-27 09:18:05 +00:00
|
|
|
struct raid_bdev *raid_bdev = r5f_info->raid_bdev;
|
2019-11-04 11:35:50 +00:00
|
|
|
|
2022-04-27 09:18:05 +00:00
|
|
|
raid5f_stop(raid_bdev);
|
2019-11-04 11:35:50 +00:00
|
|
|
|
2022-11-29 10:28:08 +00:00
|
|
|
raid_test_delete_raid_bdev(raid_bdev);
|
2019-11-04 11:35:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2022-04-27 09:18:05 +00:00
|
|
|
test_raid5f_start(void)
|
2019-11-04 11:35:50 +00:00
|
|
|
{
|
2022-11-29 10:28:08 +00:00
|
|
|
struct raid_params *params;
|
2019-11-04 11:35:50 +00:00
|
|
|
|
2022-11-29 10:28:08 +00:00
|
|
|
RAID_PARAMS_FOR_EACH(params) {
|
2022-04-27 09:18:05 +00:00
|
|
|
struct raid5f_info *r5f_info;
|
2019-11-04 11:35:50 +00:00
|
|
|
|
2022-04-27 09:18:05 +00:00
|
|
|
r5f_info = create_raid5f(params);
|
2019-11-04 11:35:50 +00:00
|
|
|
|
2022-11-29 10:28:08 +00:00
|
|
|
SPDK_CU_ASSERT_FATAL(r5f_info != NULL);
|
|
|
|
|
2022-04-27 09:18:05 +00:00
|
|
|
CU_ASSERT_EQUAL(r5f_info->stripe_blocks, params->strip_size * (params->num_base_bdevs - 1));
|
|
|
|
CU_ASSERT_EQUAL(r5f_info->total_stripes, params->base_bdev_blockcnt / params->strip_size);
|
|
|
|
CU_ASSERT_EQUAL(r5f_info->raid_bdev->bdev.blockcnt,
|
2019-11-04 11:35:50 +00:00
|
|
|
(params->base_bdev_blockcnt - params->base_bdev_blockcnt % params->strip_size) *
|
|
|
|
(params->num_base_bdevs - 1));
|
2020-01-28 12:28:14 +00:00
|
|
|
CU_ASSERT_EQUAL(r5f_info->raid_bdev->bdev.optimal_io_boundary, params->strip_size);
|
|
|
|
CU_ASSERT_TRUE(r5f_info->raid_bdev->bdev.split_on_optimal_io_boundary);
|
2021-04-30 14:11:43 +00:00
|
|
|
CU_ASSERT_EQUAL(r5f_info->raid_bdev->bdev.write_unit_size, r5f_info->stripe_blocks);
|
2020-01-28 12:28:14 +00:00
|
|
|
|
|
|
|
delete_raid5f(r5f_info);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-27 09:54:47 +00:00
|
|
|
enum test_bdev_error_type {
|
|
|
|
TEST_BDEV_ERROR_NONE,
|
|
|
|
TEST_BDEV_ERROR_SUBMIT,
|
|
|
|
TEST_BDEV_ERROR_COMPLETE,
|
|
|
|
TEST_BDEV_ERROR_NOMEM,
|
|
|
|
};
|
|
|
|
|
2020-01-28 12:28:14 +00:00
|
|
|
struct raid_io_info {
|
|
|
|
struct raid5f_info *r5f_info;
|
|
|
|
struct raid_bdev_io_channel *raid_ch;
|
|
|
|
enum spdk_bdev_io_type io_type;
|
|
|
|
uint64_t offset_blocks;
|
|
|
|
uint64_t num_blocks;
|
|
|
|
void *src_buf;
|
|
|
|
void *dest_buf;
|
2022-10-27 08:32:37 +00:00
|
|
|
void *src_md_buf;
|
|
|
|
void *dest_md_buf;
|
2020-01-28 12:28:14 +00:00
|
|
|
size_t buf_size;
|
2022-07-28 12:52:20 +00:00
|
|
|
void *parity_buf;
|
|
|
|
void *reference_parity;
|
|
|
|
size_t parity_buf_size;
|
2022-10-27 08:32:37 +00:00
|
|
|
void *parity_md_buf;
|
|
|
|
void *reference_md_parity;
|
|
|
|
size_t parity_md_buf_size;
|
2020-01-28 12:28:14 +00:00
|
|
|
enum spdk_bdev_io_status status;
|
|
|
|
bool failed;
|
|
|
|
int remaining;
|
|
|
|
TAILQ_HEAD(, spdk_bdev_io) bdev_io_queue;
|
2022-09-27 09:54:47 +00:00
|
|
|
TAILQ_HEAD(, spdk_bdev_io_wait_entry) bdev_io_wait_queue;
|
|
|
|
struct {
|
|
|
|
enum test_bdev_error_type type;
|
|
|
|
struct spdk_bdev *bdev;
|
|
|
|
void (*on_enomem_cb)(struct raid_io_info *io_info, void *ctx);
|
|
|
|
void *on_enomem_cb_ctx;
|
|
|
|
} error;
|
2020-01-28 12:28:14 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct test_raid_bdev_io {
|
|
|
|
char bdev_io_buf[sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)];
|
|
|
|
struct raid_io_info *io_info;
|
|
|
|
void *buf;
|
2022-10-27 08:32:37 +00:00
|
|
|
void *buf_md;
|
2020-01-28 12:28:14 +00:00
|
|
|
};
|
|
|
|
|
2022-09-27 09:54:47 +00:00
|
|
|
void
|
|
|
|
raid_bdev_queue_io_wait(struct raid_bdev_io *raid_io, struct spdk_bdev *bdev,
|
|
|
|
struct spdk_io_channel *ch, spdk_bdev_io_wait_cb cb_fn)
|
|
|
|
{
|
|
|
|
struct raid_io_info *io_info;
|
|
|
|
|
|
|
|
io_info = ((struct test_raid_bdev_io *)spdk_bdev_io_from_ctx(raid_io))->io_info;
|
|
|
|
|
|
|
|
raid_io->waitq_entry.bdev = bdev;
|
|
|
|
raid_io->waitq_entry.cb_fn = cb_fn;
|
|
|
|
raid_io->waitq_entry.cb_arg = raid_io;
|
|
|
|
TAILQ_INSERT_TAIL(&io_info->bdev_io_wait_queue, &raid_io->waitq_entry, link);
|
|
|
|
}
|
|
|
|
|
2020-01-28 12:28:14 +00:00
|
|
|
static void
|
|
|
|
raid_bdev_io_completion_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
|
|
|
|
{
|
|
|
|
struct raid_io_info *io_info = cb_arg;
|
|
|
|
|
|
|
|
spdk_bdev_free_io(bdev_io);
|
|
|
|
|
|
|
|
if (!success) {
|
|
|
|
io_info->failed = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (--io_info->remaining == 0) {
|
|
|
|
if (io_info->failed) {
|
|
|
|
io_info->status = SPDK_BDEV_IO_STATUS_FAILED;
|
|
|
|
} else {
|
|
|
|
io_info->status = SPDK_BDEV_IO_STATUS_SUCCESS;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct raid_bdev_io *
|
|
|
|
get_raid_io(struct raid_io_info *io_info, uint64_t offset_blocks_split, uint64_t num_blocks)
|
|
|
|
{
|
|
|
|
struct spdk_bdev_io *bdev_io;
|
|
|
|
struct raid_bdev_io *raid_io;
|
|
|
|
struct raid_bdev *raid_bdev = io_info->r5f_info->raid_bdev;
|
|
|
|
uint32_t blocklen = raid_bdev->bdev.blocklen;
|
|
|
|
struct test_raid_bdev_io *test_raid_bdev_io;
|
|
|
|
void *src_buf = io_info->src_buf + offset_blocks_split * blocklen;
|
|
|
|
void *dest_buf = io_info->dest_buf + offset_blocks_split * blocklen;
|
|
|
|
|
2022-10-27 08:32:37 +00:00
|
|
|
void *src_md_buf = io_info->src_md_buf + offset_blocks_split * raid_bdev->bdev.md_len;
|
|
|
|
void *dest_md_buf = io_info->dest_md_buf + offset_blocks_split * raid_bdev->bdev.md_len;
|
|
|
|
|
2020-01-28 12:28:14 +00:00
|
|
|
test_raid_bdev_io = calloc(1, sizeof(*test_raid_bdev_io));
|
|
|
|
SPDK_CU_ASSERT_FATAL(test_raid_bdev_io != NULL);
|
|
|
|
|
|
|
|
SPDK_CU_ASSERT_FATAL(test_raid_bdev_io->bdev_io_buf == (char *)test_raid_bdev_io);
|
|
|
|
bdev_io = (struct spdk_bdev_io *)test_raid_bdev_io->bdev_io_buf;
|
|
|
|
bdev_io->bdev = &raid_bdev->bdev;
|
|
|
|
bdev_io->type = io_info->io_type;
|
|
|
|
bdev_io->u.bdev.offset_blocks = io_info->offset_blocks + offset_blocks_split;
|
|
|
|
bdev_io->u.bdev.num_blocks = num_blocks;
|
|
|
|
bdev_io->internal.cb = raid_bdev_io_completion_cb;
|
|
|
|
bdev_io->internal.caller_ctx = io_info;
|
|
|
|
|
|
|
|
raid_io = (void *)bdev_io->driver_ctx;
|
|
|
|
raid_io->raid_bdev = raid_bdev;
|
|
|
|
raid_io->raid_ch = io_info->raid_ch;
|
|
|
|
raid_io->base_bdev_io_status = SPDK_BDEV_IO_STATUS_SUCCESS;
|
|
|
|
|
|
|
|
test_raid_bdev_io->io_info = io_info;
|
|
|
|
|
|
|
|
if (io_info->io_type == SPDK_BDEV_IO_TYPE_READ) {
|
|
|
|
test_raid_bdev_io->buf = src_buf;
|
2022-10-27 08:32:37 +00:00
|
|
|
test_raid_bdev_io->buf_md = src_md_buf;
|
|
|
|
bdev_io->u.bdev.md_buf = dest_md_buf;
|
2020-01-28 12:28:14 +00:00
|
|
|
bdev_io->iov.iov_base = dest_buf;
|
|
|
|
} else {
|
|
|
|
test_raid_bdev_io->buf = dest_buf;
|
2022-10-27 08:32:37 +00:00
|
|
|
test_raid_bdev_io->buf_md = dest_md_buf;
|
|
|
|
bdev_io->u.bdev.md_buf = src_md_buf;
|
2020-01-28 12:28:14 +00:00
|
|
|
bdev_io->iov.iov_base = src_buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
bdev_io->u.bdev.iovs = &bdev_io->iov;
|
|
|
|
bdev_io->u.bdev.iovcnt = 1;
|
|
|
|
bdev_io->iov.iov_len = num_blocks * blocklen;
|
|
|
|
|
|
|
|
io_info->remaining++;
|
|
|
|
|
|
|
|
return raid_io;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
spdk_bdev_free_io(struct spdk_bdev_io *bdev_io)
|
|
|
|
{
|
|
|
|
free(bdev_io);
|
|
|
|
}
|
|
|
|
|
2022-09-27 09:54:47 +00:00
|
|
|
static int
|
2020-01-28 12:28:14 +00:00
|
|
|
submit_io(struct raid_io_info *io_info, struct spdk_bdev_desc *desc,
|
|
|
|
spdk_bdev_io_completion_cb cb, void *cb_arg)
|
|
|
|
{
|
2022-09-27 09:54:47 +00:00
|
|
|
struct spdk_bdev *bdev = desc->bdev;
|
2020-01-28 12:28:14 +00:00
|
|
|
struct spdk_bdev_io *bdev_io;
|
|
|
|
|
2022-09-27 09:54:47 +00:00
|
|
|
if (bdev == io_info->error.bdev) {
|
|
|
|
if (io_info->error.type == TEST_BDEV_ERROR_SUBMIT) {
|
|
|
|
return -EINVAL;
|
|
|
|
} else if (io_info->error.type == TEST_BDEV_ERROR_NOMEM) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-28 12:28:14 +00:00
|
|
|
bdev_io = calloc(1, sizeof(*bdev_io));
|
|
|
|
SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
|
2022-09-27 09:54:47 +00:00
|
|
|
bdev_io->bdev = bdev;
|
2020-01-28 12:28:14 +00:00
|
|
|
bdev_io->internal.cb = cb;
|
|
|
|
bdev_io->internal.caller_ctx = cb_arg;
|
|
|
|
|
|
|
|
TAILQ_INSERT_TAIL(&io_info->bdev_io_queue, bdev_io, internal.link);
|
2022-09-27 09:54:47 +00:00
|
|
|
|
|
|
|
return 0;
|
2020-01-28 12:28:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
process_io_completions(struct raid_io_info *io_info)
|
|
|
|
{
|
|
|
|
struct spdk_bdev_io *bdev_io;
|
2022-09-27 09:54:47 +00:00
|
|
|
bool success;
|
2020-01-28 12:28:14 +00:00
|
|
|
|
|
|
|
while ((bdev_io = TAILQ_FIRST(&io_info->bdev_io_queue))) {
|
|
|
|
TAILQ_REMOVE(&io_info->bdev_io_queue, bdev_io, internal.link);
|
|
|
|
|
2022-09-27 09:54:47 +00:00
|
|
|
if (io_info->error.type == TEST_BDEV_ERROR_COMPLETE &&
|
|
|
|
io_info->error.bdev == bdev_io->bdev) {
|
|
|
|
success = false;
|
|
|
|
} else {
|
|
|
|
success = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bdev_io->internal.cb(bdev_io, success, bdev_io->internal.caller_ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (io_info->error.type == TEST_BDEV_ERROR_NOMEM) {
|
|
|
|
struct spdk_bdev_io_wait_entry *waitq_entry, *tmp;
|
|
|
|
struct spdk_bdev *enomem_bdev = io_info->error.bdev;
|
|
|
|
|
|
|
|
io_info->error.type = TEST_BDEV_ERROR_NONE;
|
|
|
|
|
|
|
|
if (io_info->error.on_enomem_cb != NULL) {
|
|
|
|
io_info->error.on_enomem_cb(io_info, io_info->error.on_enomem_cb_ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
TAILQ_FOREACH_SAFE(waitq_entry, &io_info->bdev_io_wait_queue, link, tmp) {
|
|
|
|
TAILQ_REMOVE(&io_info->bdev_io_wait_queue, waitq_entry, link);
|
|
|
|
CU_ASSERT(waitq_entry->bdev == enomem_bdev);
|
|
|
|
waitq_entry->cb_fn(waitq_entry->cb_arg);
|
|
|
|
}
|
|
|
|
|
|
|
|
process_io_completions(io_info);
|
|
|
|
} else {
|
|
|
|
CU_ASSERT(TAILQ_EMPTY(&io_info->bdev_io_wait_queue));
|
2020-01-28 12:28:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-27 08:32:37 +00:00
|
|
|
#define DATA_OFFSET_TO_MD_OFFSET(raid_bdev, data_offset) ((data_offset >> raid_bdev->blocklen_shift) * raid_bdev->bdev.md_len)
|
|
|
|
|
2021-04-30 14:11:43 +00:00
|
|
|
int
|
2022-10-27 08:32:37 +00:00
|
|
|
spdk_bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
|
|
|
|
struct iovec *iov, int iovcnt, void *md_buf,
|
|
|
|
uint64_t offset_blocks, uint64_t num_blocks,
|
|
|
|
spdk_bdev_io_completion_cb cb, void *cb_arg)
|
2021-04-30 14:11:43 +00:00
|
|
|
{
|
|
|
|
struct chunk *chunk = cb_arg;
|
|
|
|
struct stripe_request *stripe_req;
|
|
|
|
struct test_raid_bdev_io *test_raid_bdev_io;
|
|
|
|
struct raid_io_info *io_info;
|
|
|
|
struct raid_bdev *raid_bdev;
|
|
|
|
uint64_t stripe_idx_off;
|
|
|
|
uint8_t data_chunk_idx;
|
2022-10-27 08:32:37 +00:00
|
|
|
uint64_t data_offset;
|
|
|
|
void *dest_buf, *dest_md_buf;
|
2021-04-30 14:11:43 +00:00
|
|
|
|
|
|
|
SPDK_CU_ASSERT_FATAL(cb == raid5f_chunk_write_complete_bdev_io);
|
|
|
|
SPDK_CU_ASSERT_FATAL(iovcnt == 1);
|
|
|
|
|
|
|
|
stripe_req = raid5f_chunk_stripe_req(chunk);
|
|
|
|
test_raid_bdev_io = (struct test_raid_bdev_io *)spdk_bdev_io_from_ctx(stripe_req->raid_io);
|
|
|
|
io_info = test_raid_bdev_io->io_info;
|
|
|
|
|
2022-07-28 12:52:20 +00:00
|
|
|
raid_bdev = io_info->r5f_info->raid_bdev;
|
2021-04-30 14:11:43 +00:00
|
|
|
|
|
|
|
stripe_idx_off = offset_blocks / raid_bdev->strip_size -
|
|
|
|
io_info->offset_blocks / io_info->r5f_info->stripe_blocks;
|
|
|
|
|
2022-07-28 12:52:20 +00:00
|
|
|
if (chunk == stripe_req->parity_chunk) {
|
|
|
|
if (io_info->parity_buf == NULL) {
|
|
|
|
goto submit;
|
|
|
|
}
|
2022-10-27 08:32:37 +00:00
|
|
|
data_offset = stripe_idx_off * raid_bdev->strip_size_kb * 1024;
|
|
|
|
dest_buf = io_info->parity_buf + data_offset;
|
|
|
|
|
|
|
|
if (md_buf != NULL) {
|
|
|
|
data_offset = DATA_OFFSET_TO_MD_OFFSET(raid_bdev, data_offset);
|
|
|
|
dest_md_buf = io_info->parity_md_buf + data_offset;
|
|
|
|
}
|
2022-07-28 12:52:20 +00:00
|
|
|
} else {
|
|
|
|
data_chunk_idx = chunk < stripe_req->parity_chunk ? chunk->index : chunk->index - 1;
|
2022-10-27 08:32:37 +00:00
|
|
|
data_offset = (stripe_idx_off * io_info->r5f_info->stripe_blocks +
|
|
|
|
data_chunk_idx * raid_bdev->strip_size) *
|
|
|
|
raid_bdev->bdev.blocklen;
|
|
|
|
dest_buf = test_raid_bdev_io->buf + data_offset;
|
|
|
|
|
|
|
|
if (md_buf != NULL) {
|
|
|
|
data_offset = DATA_OFFSET_TO_MD_OFFSET(raid_bdev, data_offset);
|
|
|
|
dest_md_buf = test_raid_bdev_io->buf_md + data_offset;
|
|
|
|
}
|
2022-07-28 12:52:20 +00:00
|
|
|
}
|
2021-04-30 14:11:43 +00:00
|
|
|
|
|
|
|
memcpy(dest_buf, iov->iov_base, iov->iov_len);
|
2022-10-27 08:32:37 +00:00
|
|
|
if (md_buf != NULL) {
|
|
|
|
memcpy(dest_md_buf, md_buf, DATA_OFFSET_TO_MD_OFFSET(raid_bdev, iov->iov_len));
|
|
|
|
}
|
|
|
|
|
2022-07-28 12:52:20 +00:00
|
|
|
submit:
|
2022-09-27 09:54:47 +00:00
|
|
|
return submit_io(io_info, desc, cb, cb_arg);
|
2021-04-30 14:11:43 +00:00
|
|
|
}
|
|
|
|
|
2020-01-28 12:28:14 +00:00
|
|
|
int
|
2022-10-27 08:32:37 +00:00
|
|
|
spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
|
|
|
|
struct iovec *iov, int iovcnt,
|
|
|
|
uint64_t offset_blocks, uint64_t num_blocks,
|
|
|
|
spdk_bdev_io_completion_cb cb, void *cb_arg)
|
|
|
|
{
|
|
|
|
return spdk_bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks, num_blocks, cb,
|
|
|
|
cb_arg);
|
|
|
|
}
|
|
|
|
|
bdev: remove spdk_bdev_ext_io_opts from spdk_bdev_io
The spdk_bdev_ext_io_opts structure is used to pass extra options when
submitting a bdev IO request, without having to modify/add functions to
handle new options. Additionally, the structure has a size field to
allow adding new fields without breaking the ABI (and thus having to
bump up the major version of a library).
It is also a part of spdk_bdev_io and there are several reasons for
removing it from that structure:
1. The size field only makes sense in structures that are passed
through pointers. And spdk_bdev_ext_io_opts is indeed passed as a
pointer to spdk_bdev_{readv,writev}_blocks_ext(), however it is
also embedded in spdk_bdev_io (internal.ext_opts_copy), which is
also part of the API. It means that each time a new field is added
to spdk_bdev_ext_io_opts, the size of spdk_bdev_io will also
change, so we will need to bump the major version of libspdk_bdev
anyway, thus making spdk_bdev_ext_io_opts.size useless.
2. The size field also makes internal.ext_opts cumbersome to use, as
each time one of its fields is accessed, we need to check the size.
Currently the code doesn't do that, because all of the existing
spdk_bdev_ext_io_opts fields were present when this structure was
initially introduced, but we'd need to do check the size before
accessing any new fields.
3. spdk_bdev_ext_io_opts has a metadata field, while spdk_bdev_io
already has u.bdev.md_buf, which means that we store the same thing
in several different places in spdk_bdev_io (u.bdev.md_buf,
u.bdev.ext_opts->metadata, internal.ext_opts->metadata).
Therefore, this patch removes all references to spdk_bdev_ext_io_opts
from spdk_bdev_io and replaces them with fields (memory_domain,
memory_domain_ctx) that were missing in spdk_bdev_io. Unfortunately,
this change breaks the API and requires changes in bdev modules that
supported spdk_bdev_io.u.bdev.ext_opts.
Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Change-Id: I49b7524eb84d1d4d7f12b7ab025fec36da1ee01f
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/16773
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
2023-02-10 15:22:51 +00:00
|
|
|
int
|
|
|
|
spdk_bdev_writev_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
|
|
|
|
struct iovec *iov, int iovcnt, uint64_t offset_blocks,
|
|
|
|
uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg,
|
|
|
|
struct spdk_bdev_ext_io_opts *opts)
|
|
|
|
{
|
|
|
|
CU_ASSERT_PTR_NULL(opts->memory_domain);
|
|
|
|
CU_ASSERT_PTR_NULL(opts->memory_domain_ctx);
|
|
|
|
|
|
|
|
return spdk_bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, opts->metadata, offset_blocks,
|
|
|
|
num_blocks, cb, cb_arg);
|
|
|
|
}
|
|
|
|
|
2022-10-27 08:32:37 +00:00
|
|
|
int
|
|
|
|
spdk_bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
|
|
|
|
struct iovec *iov, int iovcnt, void *md_buf,
|
|
|
|
uint64_t offset_blocks, uint64_t num_blocks,
|
|
|
|
spdk_bdev_io_completion_cb cb, void *cb_arg)
|
2020-01-28 12:28:14 +00:00
|
|
|
{
|
|
|
|
struct raid_bdev_io *raid_io = cb_arg;
|
|
|
|
struct test_raid_bdev_io *test_raid_bdev_io;
|
|
|
|
|
|
|
|
SPDK_CU_ASSERT_FATAL(cb == raid5f_chunk_read_complete);
|
|
|
|
SPDK_CU_ASSERT_FATAL(iovcnt == 1);
|
|
|
|
|
|
|
|
test_raid_bdev_io = (struct test_raid_bdev_io *)spdk_bdev_io_from_ctx(raid_io);
|
|
|
|
|
|
|
|
memcpy(iov->iov_base, test_raid_bdev_io->buf, iov->iov_len);
|
2022-10-27 08:32:37 +00:00
|
|
|
if (md_buf != NULL) {
|
|
|
|
memcpy(md_buf, test_raid_bdev_io->buf_md, DATA_OFFSET_TO_MD_OFFSET(raid_io->raid_bdev,
|
|
|
|
iov->iov_len));
|
|
|
|
}
|
2020-01-28 12:28:14 +00:00
|
|
|
|
2022-09-27 09:54:47 +00:00
|
|
|
return submit_io(test_raid_bdev_io->io_info, desc, cb, cb_arg);
|
2020-01-28 12:28:14 +00:00
|
|
|
}
|
|
|
|
|
2022-10-27 08:32:37 +00:00
|
|
|
int
|
|
|
|
spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
|
|
|
|
struct iovec *iov, int iovcnt,
|
|
|
|
uint64_t offset_blocks, uint64_t num_blocks,
|
|
|
|
spdk_bdev_io_completion_cb cb, void *cb_arg)
|
|
|
|
{
|
|
|
|
return spdk_bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks, num_blocks, cb,
|
|
|
|
cb_arg);
|
|
|
|
}
|
|
|
|
|
bdev: remove spdk_bdev_ext_io_opts from spdk_bdev_io
The spdk_bdev_ext_io_opts structure is used to pass extra options when
submitting a bdev IO request, without having to modify/add functions to
handle new options. Additionally, the structure has a size field to
allow adding new fields without breaking the ABI (and thus having to
bump up the major version of a library).
It is also a part of spdk_bdev_io and there are several reasons for
removing it from that structure:
1. The size field only makes sense in structures that are passed
through pointers. And spdk_bdev_ext_io_opts is indeed passed as a
pointer to spdk_bdev_{readv,writev}_blocks_ext(), however it is
also embedded in spdk_bdev_io (internal.ext_opts_copy), which is
also part of the API. It means that each time a new field is added
to spdk_bdev_ext_io_opts, the size of spdk_bdev_io will also
change, so we will need to bump the major version of libspdk_bdev
anyway, thus making spdk_bdev_ext_io_opts.size useless.
2. The size field also makes internal.ext_opts cumbersome to use, as
each time one of its fields is accessed, we need to check the size.
Currently the code doesn't do that, because all of the existing
spdk_bdev_ext_io_opts fields were present when this structure was
initially introduced, but we'd need to do check the size before
accessing any new fields.
3. spdk_bdev_ext_io_opts has a metadata field, while spdk_bdev_io
already has u.bdev.md_buf, which means that we store the same thing
in several different places in spdk_bdev_io (u.bdev.md_buf,
u.bdev.ext_opts->metadata, internal.ext_opts->metadata).
Therefore, this patch removes all references to spdk_bdev_ext_io_opts
from spdk_bdev_io and replaces them with fields (memory_domain,
memory_domain_ctx) that were missing in spdk_bdev_io. Unfortunately,
this change breaks the API and requires changes in bdev modules that
supported spdk_bdev_io.u.bdev.ext_opts.
Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Change-Id: I49b7524eb84d1d4d7f12b7ab025fec36da1ee01f
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/16773
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
2023-02-10 15:22:51 +00:00
|
|
|
int
|
|
|
|
spdk_bdev_readv_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
|
|
|
|
struct iovec *iov, int iovcnt, uint64_t offset_blocks,
|
|
|
|
uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg,
|
|
|
|
struct spdk_bdev_ext_io_opts *opts)
|
|
|
|
{
|
|
|
|
CU_ASSERT_PTR_NULL(opts->memory_domain);
|
|
|
|
CU_ASSERT_PTR_NULL(opts->memory_domain_ctx);
|
|
|
|
|
|
|
|
return spdk_bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, opts->metadata, offset_blocks,
|
|
|
|
num_blocks, cb, cb_arg);
|
|
|
|
}
|
|
|
|
|
2022-07-28 12:52:20 +00:00
|
|
|
static void
|
|
|
|
xor_block(uint8_t *a, uint8_t *b, size_t size)
|
|
|
|
{
|
|
|
|
while (size-- > 0) {
|
|
|
|
a[size] ^= b[size];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-30 14:11:43 +00:00
|
|
|
static void
|
|
|
|
test_raid5f_write_request(struct raid_io_info *io_info)
|
|
|
|
{
|
|
|
|
struct raid_bdev_io *raid_io;
|
|
|
|
|
|
|
|
SPDK_CU_ASSERT_FATAL(io_info->num_blocks / io_info->r5f_info->stripe_blocks == 1);
|
|
|
|
|
|
|
|
raid_io = get_raid_io(io_info, 0, io_info->num_blocks);
|
|
|
|
|
|
|
|
raid5f_submit_rw_request(raid_io);
|
|
|
|
|
|
|
|
process_io_completions(io_info);
|
2022-07-28 12:52:20 +00:00
|
|
|
|
2022-10-27 08:32:37 +00:00
|
|
|
if (io_info->status == SPDK_BDEV_IO_STATUS_SUCCESS) {
|
|
|
|
if (io_info->parity_buf) {
|
|
|
|
CU_ASSERT(memcmp(io_info->parity_buf, io_info->reference_parity,
|
|
|
|
io_info->parity_buf_size) == 0);
|
|
|
|
}
|
|
|
|
if (io_info->parity_md_buf) {
|
|
|
|
CU_ASSERT(memcmp(io_info->parity_md_buf, io_info->reference_md_parity,
|
|
|
|
io_info->parity_md_buf_size) == 0);
|
|
|
|
}
|
2022-07-28 12:52:20 +00:00
|
|
|
}
|
2021-04-30 14:11:43 +00:00
|
|
|
}
|
|
|
|
|
2020-01-28 12:28:14 +00:00
|
|
|
static void
|
|
|
|
test_raid5f_read_request(struct raid_io_info *io_info)
|
|
|
|
{
|
|
|
|
uint32_t strip_size = io_info->r5f_info->raid_bdev->strip_size;
|
|
|
|
uint64_t num_blocks = io_info->num_blocks;
|
|
|
|
uint64_t offset_blocks_split = 0;
|
|
|
|
|
|
|
|
while (num_blocks) {
|
|
|
|
uint64_t chunk_offset = offset_blocks_split % strip_size;
|
|
|
|
uint64_t num_blocks_split = spdk_min(num_blocks, strip_size - chunk_offset);
|
|
|
|
struct raid_bdev_io *raid_io;
|
|
|
|
|
|
|
|
raid_io = get_raid_io(io_info, offset_blocks_split, num_blocks_split);
|
|
|
|
|
|
|
|
raid5f_submit_rw_request(raid_io);
|
|
|
|
|
|
|
|
num_blocks -= num_blocks_split;
|
|
|
|
offset_blocks_split += num_blocks_split;
|
|
|
|
}
|
|
|
|
|
|
|
|
process_io_completions(io_info);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
deinit_io_info(struct raid_io_info *io_info)
|
|
|
|
{
|
|
|
|
free(io_info->src_buf);
|
|
|
|
free(io_info->dest_buf);
|
2022-10-27 08:32:37 +00:00
|
|
|
free(io_info->src_md_buf);
|
|
|
|
free(io_info->dest_md_buf);
|
2022-07-28 12:52:20 +00:00
|
|
|
free(io_info->parity_buf);
|
|
|
|
free(io_info->reference_parity);
|
2022-10-27 08:32:37 +00:00
|
|
|
free(io_info->parity_md_buf);
|
|
|
|
free(io_info->reference_md_parity);
|
2020-01-28 12:28:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
init_io_info(struct raid_io_info *io_info, struct raid5f_info *r5f_info,
|
|
|
|
struct raid_bdev_io_channel *raid_ch, enum spdk_bdev_io_type io_type,
|
|
|
|
uint64_t offset_blocks, uint64_t num_blocks)
|
|
|
|
{
|
|
|
|
struct raid_bdev *raid_bdev = r5f_info->raid_bdev;
|
|
|
|
uint32_t blocklen = raid_bdev->bdev.blocklen;
|
|
|
|
void *src_buf, *dest_buf;
|
2022-10-27 08:32:37 +00:00
|
|
|
void *src_md_buf, *dest_md_buf;
|
2020-01-28 12:28:14 +00:00
|
|
|
size_t buf_size = num_blocks * blocklen;
|
2022-10-27 08:32:37 +00:00
|
|
|
size_t buf_md_size = num_blocks * raid_bdev->bdev.md_len;
|
2020-01-28 12:28:14 +00:00
|
|
|
uint64_t block;
|
2022-10-27 08:32:37 +00:00
|
|
|
uint64_t i;
|
2020-01-28 12:28:14 +00:00
|
|
|
|
|
|
|
memset(io_info, 0, sizeof(*io_info));
|
|
|
|
|
2022-10-27 08:32:37 +00:00
|
|
|
if (buf_size) {
|
|
|
|
src_buf = spdk_dma_malloc(buf_size, 4096, NULL);
|
|
|
|
SPDK_CU_ASSERT_FATAL(src_buf != NULL);
|
2020-01-28 12:28:14 +00:00
|
|
|
|
2022-10-27 08:32:37 +00:00
|
|
|
dest_buf = spdk_dma_malloc(buf_size, 4096, NULL);
|
|
|
|
SPDK_CU_ASSERT_FATAL(dest_buf != NULL);
|
2020-01-28 12:28:14 +00:00
|
|
|
|
2022-10-27 08:32:37 +00:00
|
|
|
memset(src_buf, 0xff, buf_size);
|
|
|
|
for (block = 0; block < num_blocks; block++) {
|
|
|
|
*((uint64_t *)(src_buf + block * blocklen)) = block;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
src_buf = NULL;
|
|
|
|
dest_buf = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (buf_md_size) {
|
|
|
|
src_md_buf = spdk_dma_malloc(buf_md_size, 4096, NULL);
|
|
|
|
SPDK_CU_ASSERT_FATAL(src_md_buf != NULL);
|
|
|
|
|
|
|
|
dest_md_buf = spdk_dma_malloc(buf_md_size, 4096, NULL);
|
|
|
|
SPDK_CU_ASSERT_FATAL(dest_md_buf != NULL);
|
|
|
|
|
|
|
|
memset(src_md_buf, 0xff, buf_md_size);
|
|
|
|
for (i = 0; i < buf_md_size; i++) {
|
|
|
|
*((uint8_t *)(src_md_buf + i)) = (uint8_t)i;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
src_md_buf = NULL;
|
|
|
|
dest_md_buf = NULL;
|
2020-01-28 12:28:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
io_info->r5f_info = r5f_info;
|
|
|
|
io_info->raid_ch = raid_ch;
|
|
|
|
io_info->io_type = io_type;
|
|
|
|
io_info->offset_blocks = offset_blocks;
|
|
|
|
io_info->num_blocks = num_blocks;
|
|
|
|
io_info->src_buf = src_buf;
|
|
|
|
io_info->dest_buf = dest_buf;
|
2022-10-27 08:32:37 +00:00
|
|
|
io_info->src_md_buf = src_md_buf;
|
|
|
|
io_info->dest_md_buf = dest_md_buf;
|
2020-01-28 12:28:14 +00:00
|
|
|
io_info->buf_size = buf_size;
|
|
|
|
io_info->status = SPDK_BDEV_IO_STATUS_PENDING;
|
|
|
|
|
|
|
|
TAILQ_INIT(&io_info->bdev_io_queue);
|
2022-09-27 09:54:47 +00:00
|
|
|
TAILQ_INIT(&io_info->bdev_io_wait_queue);
|
2020-01-28 12:28:14 +00:00
|
|
|
}
|
|
|
|
|
2022-07-28 12:52:20 +00:00
|
|
|
static void
|
|
|
|
io_info_setup_parity(struct raid_io_info *io_info)
|
|
|
|
{
|
|
|
|
struct raid5f_info *r5f_info = io_info->r5f_info;
|
|
|
|
struct raid_bdev *raid_bdev = r5f_info->raid_bdev;
|
|
|
|
uint32_t blocklen = raid_bdev->bdev.blocklen;
|
|
|
|
uint64_t num_stripes = io_info->num_blocks / r5f_info->stripe_blocks;
|
|
|
|
size_t strip_len = raid_bdev->strip_size * blocklen;
|
2022-10-27 08:32:37 +00:00
|
|
|
size_t strip_md_len = raid_bdev->strip_size * raid_bdev->bdev.md_len;
|
2022-07-28 12:52:20 +00:00
|
|
|
void *src = io_info->src_buf;
|
|
|
|
void *dest;
|
|
|
|
unsigned i, j;
|
|
|
|
|
|
|
|
io_info->parity_buf_size = num_stripes * strip_len;
|
|
|
|
io_info->parity_buf = calloc(1, io_info->parity_buf_size);
|
|
|
|
SPDK_CU_ASSERT_FATAL(io_info->parity_buf != NULL);
|
|
|
|
|
|
|
|
io_info->reference_parity = calloc(1, io_info->parity_buf_size);
|
|
|
|
SPDK_CU_ASSERT_FATAL(io_info->reference_parity != NULL);
|
|
|
|
|
|
|
|
dest = io_info->reference_parity;
|
|
|
|
for (i = 0; i < num_stripes; i++) {
|
|
|
|
for (j = 0; j < raid5f_stripe_data_chunks_num(raid_bdev); j++) {
|
|
|
|
xor_block(dest, src, strip_len);
|
|
|
|
src += strip_len;
|
|
|
|
}
|
|
|
|
dest += strip_len;
|
|
|
|
}
|
2022-10-27 08:32:37 +00:00
|
|
|
|
|
|
|
io_info->parity_md_buf_size = num_stripes * strip_md_len;
|
|
|
|
io_info->parity_md_buf = calloc(1, io_info->parity_md_buf_size);
|
|
|
|
SPDK_CU_ASSERT_FATAL(io_info->parity_md_buf != NULL);
|
|
|
|
|
|
|
|
io_info->reference_md_parity = calloc(1, io_info->parity_md_buf_size);
|
|
|
|
SPDK_CU_ASSERT_FATAL(io_info->reference_md_parity != NULL);
|
|
|
|
|
|
|
|
src = io_info->src_md_buf;
|
|
|
|
dest = io_info->reference_md_parity;
|
|
|
|
for (i = 0; i < num_stripes; i++) {
|
|
|
|
for (j = 0; j < raid5f_stripe_data_chunks_num(raid_bdev); j++) {
|
|
|
|
xor_block(dest, src, strip_md_len);
|
|
|
|
src += strip_md_len;
|
|
|
|
}
|
|
|
|
dest += strip_md_len;
|
|
|
|
}
|
2022-07-28 12:52:20 +00:00
|
|
|
}
|
|
|
|
|
2020-01-28 12:28:14 +00:00
|
|
|
static void
|
|
|
|
test_raid5f_submit_rw_request(struct raid5f_info *r5f_info, struct raid_bdev_io_channel *raid_ch,
|
|
|
|
enum spdk_bdev_io_type io_type, uint64_t stripe_index, uint64_t stripe_offset_blocks,
|
|
|
|
uint64_t num_blocks)
|
|
|
|
{
|
|
|
|
uint64_t offset_blocks = stripe_index * r5f_info->stripe_blocks + stripe_offset_blocks;
|
|
|
|
struct raid_io_info io_info;
|
|
|
|
|
|
|
|
init_io_info(&io_info, r5f_info, raid_ch, io_type, offset_blocks, num_blocks);
|
|
|
|
|
|
|
|
switch (io_type) {
|
|
|
|
case SPDK_BDEV_IO_TYPE_READ:
|
|
|
|
test_raid5f_read_request(&io_info);
|
|
|
|
break;
|
2021-04-30 14:11:43 +00:00
|
|
|
case SPDK_BDEV_IO_TYPE_WRITE:
|
2022-07-28 12:52:20 +00:00
|
|
|
io_info_setup_parity(&io_info);
|
2021-04-30 14:11:43 +00:00
|
|
|
test_raid5f_write_request(&io_info);
|
|
|
|
break;
|
2020-01-28 12:28:14 +00:00
|
|
|
default:
|
|
|
|
CU_FAIL_FATAL("unsupported io_type");
|
|
|
|
}
|
|
|
|
|
bdev: remove spdk_bdev_ext_io_opts from spdk_bdev_io
The spdk_bdev_ext_io_opts structure is used to pass extra options when
submitting a bdev IO request, without having to modify/add functions to
handle new options. Additionally, the structure has a size field to
allow adding new fields without breaking the ABI (and thus having to
bump up the major version of a library).
It is also a part of spdk_bdev_io and there are several reasons for
removing it from that structure:
1. The size field only makes sense in structures that are passed
through pointers. And spdk_bdev_ext_io_opts is indeed passed as a
pointer to spdk_bdev_{readv,writev}_blocks_ext(), however it is
also embedded in spdk_bdev_io (internal.ext_opts_copy), which is
also part of the API. It means that each time a new field is added
to spdk_bdev_ext_io_opts, the size of spdk_bdev_io will also
change, so we will need to bump the major version of libspdk_bdev
anyway, thus making spdk_bdev_ext_io_opts.size useless.
2. The size field also makes internal.ext_opts cumbersome to use, as
each time one of its fields is accessed, we need to check the size.
Currently the code doesn't do that, because all of the existing
spdk_bdev_ext_io_opts fields were present when this structure was
initially introduced, but we'd need to do check the size before
accessing any new fields.
3. spdk_bdev_ext_io_opts has a metadata field, while spdk_bdev_io
already has u.bdev.md_buf, which means that we store the same thing
in several different places in spdk_bdev_io (u.bdev.md_buf,
u.bdev.ext_opts->metadata, internal.ext_opts->metadata).
Therefore, this patch removes all references to spdk_bdev_ext_io_opts
from spdk_bdev_io and replaces them with fields (memory_domain,
memory_domain_ctx) that were missing in spdk_bdev_io. Unfortunately,
this change breaks the API and requires changes in bdev modules that
supported spdk_bdev_io.u.bdev.ext_opts.
Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Change-Id: I49b7524eb84d1d4d7f12b7ab025fec36da1ee01f
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/16773
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
2023-02-10 15:22:51 +00:00
|
|
|
assert(io_info.status == SPDK_BDEV_IO_STATUS_SUCCESS);
|
|
|
|
assert(memcmp(io_info.src_buf, io_info.dest_buf, io_info.buf_size) == 0);
|
|
|
|
|
2020-01-28 12:28:14 +00:00
|
|
|
CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_SUCCESS);
|
|
|
|
CU_ASSERT(memcmp(io_info.src_buf, io_info.dest_buf, io_info.buf_size) == 0);
|
|
|
|
|
|
|
|
deinit_io_info(&io_info);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
run_for_each_raid5f_config(void (*test_fn)(struct raid_bdev *raid_bdev,
|
|
|
|
struct raid_bdev_io_channel *raid_ch))
|
|
|
|
{
|
2022-11-29 10:28:08 +00:00
|
|
|
struct raid_params *params;
|
2020-01-28 12:28:14 +00:00
|
|
|
|
2022-11-29 10:28:08 +00:00
|
|
|
RAID_PARAMS_FOR_EACH(params) {
|
2020-01-28 12:28:14 +00:00
|
|
|
struct raid5f_info *r5f_info;
|
|
|
|
struct raid_bdev_io_channel raid_ch = { 0 };
|
|
|
|
|
|
|
|
r5f_info = create_raid5f(params);
|
|
|
|
|
|
|
|
raid_ch.num_channels = params->num_base_bdevs;
|
|
|
|
raid_ch.base_channel = calloc(params->num_base_bdevs, sizeof(struct spdk_io_channel *));
|
|
|
|
SPDK_CU_ASSERT_FATAL(raid_ch.base_channel != NULL);
|
|
|
|
|
2021-04-30 14:11:43 +00:00
|
|
|
raid_ch.module_channel = raid5f_get_io_channel(r5f_info->raid_bdev);
|
|
|
|
SPDK_CU_ASSERT_FATAL(raid_ch.module_channel);
|
|
|
|
|
2020-01-28 12:28:14 +00:00
|
|
|
test_fn(r5f_info->raid_bdev, &raid_ch);
|
|
|
|
|
2021-04-30 14:11:43 +00:00
|
|
|
spdk_put_io_channel(raid_ch.module_channel);
|
|
|
|
poll_threads();
|
|
|
|
|
2020-01-28 12:28:14 +00:00
|
|
|
free(raid_ch.base_channel);
|
2019-11-04 11:35:50 +00:00
|
|
|
|
2022-04-27 09:18:05 +00:00
|
|
|
delete_raid5f(r5f_info);
|
2019-11-04 11:35:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-28 12:28:14 +00:00
|
|
|
#define RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, i) \
|
|
|
|
for (i = 0; i < spdk_min(raid_bdev->num_base_bdevs, ((struct raid5f_info *)raid_bdev->module_private)->total_stripes); i++)
|
|
|
|
|
|
|
|
struct test_request_conf {
|
|
|
|
uint64_t stripe_offset_blocks;
|
|
|
|
uint64_t num_blocks;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
__test_raid5f_submit_read_request(struct raid_bdev *raid_bdev, struct raid_bdev_io_channel *raid_ch)
|
|
|
|
{
|
|
|
|
struct raid5f_info *r5f_info = raid_bdev->module_private;
|
|
|
|
uint32_t strip_size = raid_bdev->strip_size;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
struct test_request_conf test_requests[] = {
|
|
|
|
{ 0, 1 },
|
|
|
|
{ 0, strip_size },
|
|
|
|
{ 0, strip_size + 1 },
|
|
|
|
{ 0, r5f_info->stripe_blocks },
|
|
|
|
{ 1, 1 },
|
|
|
|
{ 1, strip_size },
|
|
|
|
{ 1, strip_size + 1 },
|
|
|
|
{ strip_size, 1 },
|
|
|
|
{ strip_size, strip_size },
|
|
|
|
{ strip_size, strip_size + 1 },
|
|
|
|
{ strip_size - 1, 1 },
|
|
|
|
{ strip_size - 1, strip_size },
|
|
|
|
{ strip_size - 1, strip_size + 1 },
|
|
|
|
{ strip_size - 1, 2 },
|
|
|
|
};
|
|
|
|
for (i = 0; i < SPDK_COUNTOF(test_requests); i++) {
|
|
|
|
struct test_request_conf *t = &test_requests[i];
|
|
|
|
uint64_t stripe_index;
|
|
|
|
|
|
|
|
RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, stripe_index) {
|
|
|
|
test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_READ,
|
|
|
|
stripe_index, t->stripe_offset_blocks, t->num_blocks);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
static void
|
|
|
|
test_raid5f_submit_read_request(void)
|
|
|
|
{
|
|
|
|
run_for_each_raid5f_config(__test_raid5f_submit_read_request);
|
|
|
|
}
|
|
|
|
|
2021-04-30 14:11:43 +00:00
|
|
|
static void
|
|
|
|
__test_raid5f_stripe_request_map_iovecs(struct raid_bdev *raid_bdev,
|
|
|
|
struct raid_bdev_io_channel *raid_ch)
|
|
|
|
{
|
2022-10-27 08:32:37 +00:00
|
|
|
struct raid5f_info *r5f_info = raid_bdev->module_private;
|
2021-04-30 14:11:43 +00:00
|
|
|
struct raid5f_io_channel *r5ch = spdk_io_channel_get_ctx(raid_ch->module_channel);
|
|
|
|
size_t strip_bytes = raid_bdev->strip_size * raid_bdev->bdev.blocklen;
|
2022-10-27 08:32:37 +00:00
|
|
|
struct raid_io_info io_info;
|
|
|
|
struct raid_bdev_io *raid_io;
|
|
|
|
struct spdk_bdev_io *bdev_io;
|
2021-04-30 14:11:43 +00:00
|
|
|
struct stripe_request *stripe_req;
|
|
|
|
struct chunk *chunk;
|
|
|
|
struct iovec iovs[] = {
|
|
|
|
{ .iov_base = (void *)0x0ff0000, .iov_len = strip_bytes },
|
|
|
|
{ .iov_base = (void *)0x1ff0000, .iov_len = strip_bytes / 2 },
|
|
|
|
{ .iov_base = (void *)0x2ff0000, .iov_len = strip_bytes * 2 },
|
|
|
|
{ .iov_base = (void *)0x3ff0000, .iov_len = strip_bytes * raid_bdev->num_base_bdevs },
|
|
|
|
};
|
2022-10-27 08:32:37 +00:00
|
|
|
size_t iovcnt = SPDK_COUNTOF(iovs);
|
2021-04-30 14:11:43 +00:00
|
|
|
int ret;
|
|
|
|
|
2022-10-27 08:32:37 +00:00
|
|
|
init_io_info(&io_info, r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_WRITE, 0, 0);
|
|
|
|
|
|
|
|
raid_io = get_raid_io(&io_info, 0, 0);
|
|
|
|
bdev_io = spdk_bdev_io_from_ctx(raid_io);
|
|
|
|
bdev_io->u.bdev.iovs = iovs;
|
|
|
|
bdev_io->u.bdev.iovcnt = iovcnt;
|
|
|
|
|
2021-04-30 14:11:43 +00:00
|
|
|
stripe_req = raid5f_stripe_request_alloc(r5ch);
|
|
|
|
SPDK_CU_ASSERT_FATAL(stripe_req != NULL);
|
|
|
|
|
|
|
|
stripe_req->parity_chunk = &stripe_req->chunks[raid5f_stripe_data_chunks_num(raid_bdev)];
|
2022-10-27 08:32:37 +00:00
|
|
|
stripe_req->raid_io = raid_io;
|
2021-04-30 14:11:43 +00:00
|
|
|
|
2022-10-27 08:32:37 +00:00
|
|
|
ret = raid5f_stripe_request_map_iovecs(stripe_req);
|
2021-04-30 14:11:43 +00:00
|
|
|
CU_ASSERT(ret == 0);
|
|
|
|
|
|
|
|
chunk = &stripe_req->chunks[0];
|
|
|
|
CU_ASSERT_EQUAL(chunk->iovcnt, 1);
|
|
|
|
CU_ASSERT_EQUAL(chunk->iovs[0].iov_base, iovs[0].iov_base);
|
|
|
|
CU_ASSERT_EQUAL(chunk->iovs[0].iov_len, iovs[0].iov_len);
|
|
|
|
|
|
|
|
chunk = &stripe_req->chunks[1];
|
|
|
|
CU_ASSERT_EQUAL(chunk->iovcnt, 2);
|
|
|
|
CU_ASSERT_EQUAL(chunk->iovs[0].iov_base, iovs[1].iov_base);
|
|
|
|
CU_ASSERT_EQUAL(chunk->iovs[0].iov_len, iovs[1].iov_len);
|
|
|
|
CU_ASSERT_EQUAL(chunk->iovs[1].iov_base, iovs[2].iov_base);
|
|
|
|
CU_ASSERT_EQUAL(chunk->iovs[1].iov_len, iovs[2].iov_len / 4);
|
|
|
|
|
|
|
|
if (raid_bdev->num_base_bdevs > 3) {
|
|
|
|
chunk = &stripe_req->chunks[2];
|
|
|
|
CU_ASSERT_EQUAL(chunk->iovcnt, 1);
|
|
|
|
CU_ASSERT_EQUAL(chunk->iovs[0].iov_base, iovs[2].iov_base + strip_bytes / 2);
|
|
|
|
CU_ASSERT_EQUAL(chunk->iovs[0].iov_len, iovs[2].iov_len / 2);
|
|
|
|
}
|
|
|
|
if (raid_bdev->num_base_bdevs > 4) {
|
|
|
|
chunk = &stripe_req->chunks[3];
|
|
|
|
CU_ASSERT_EQUAL(chunk->iovcnt, 2);
|
|
|
|
CU_ASSERT_EQUAL(chunk->iovs[0].iov_base, iovs[2].iov_base + (strip_bytes / 2) * 3);
|
|
|
|
CU_ASSERT_EQUAL(chunk->iovs[0].iov_len, iovs[2].iov_len / 4);
|
|
|
|
CU_ASSERT_EQUAL(chunk->iovs[1].iov_base, iovs[3].iov_base);
|
|
|
|
CU_ASSERT_EQUAL(chunk->iovs[1].iov_len, strip_bytes / 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
raid5f_stripe_request_free(stripe_req);
|
2022-10-27 08:32:37 +00:00
|
|
|
spdk_bdev_free_io(bdev_io);
|
|
|
|
deinit_io_info(&io_info);
|
2021-04-30 14:11:43 +00:00
|
|
|
}
|
|
|
|
static void
|
|
|
|
test_raid5f_stripe_request_map_iovecs(void)
|
|
|
|
{
|
|
|
|
run_for_each_raid5f_config(__test_raid5f_stripe_request_map_iovecs);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
__test_raid5f_submit_full_stripe_write_request(struct raid_bdev *raid_bdev,
|
|
|
|
struct raid_bdev_io_channel *raid_ch)
|
|
|
|
{
|
|
|
|
struct raid5f_info *r5f_info = raid_bdev->module_private;
|
|
|
|
uint64_t stripe_index;
|
|
|
|
|
|
|
|
RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, stripe_index) {
|
|
|
|
test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_WRITE,
|
|
|
|
stripe_index, 0, r5f_info->stripe_blocks);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
static void
|
|
|
|
test_raid5f_submit_full_stripe_write_request(void)
|
|
|
|
{
|
|
|
|
run_for_each_raid5f_config(__test_raid5f_submit_full_stripe_write_request);
|
|
|
|
}
|
|
|
|
|
2022-09-27 09:54:47 +00:00
|
|
|
static void
|
|
|
|
__test_raid5f_chunk_write_error(struct raid_bdev *raid_bdev, struct raid_bdev_io_channel *raid_ch)
|
|
|
|
{
|
|
|
|
struct raid5f_info *r5f_info = raid_bdev->module_private;
|
|
|
|
struct raid_base_bdev_info *base_bdev_info;
|
|
|
|
uint64_t stripe_index;
|
|
|
|
struct raid_io_info io_info;
|
|
|
|
enum test_bdev_error_type error_type;
|
|
|
|
|
|
|
|
for (error_type = TEST_BDEV_ERROR_SUBMIT; error_type <= TEST_BDEV_ERROR_NOMEM; error_type++) {
|
|
|
|
RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, stripe_index) {
|
|
|
|
RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_bdev_info) {
|
|
|
|
init_io_info(&io_info, r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_WRITE,
|
|
|
|
stripe_index * r5f_info->stripe_blocks, r5f_info->stripe_blocks);
|
|
|
|
|
|
|
|
io_info.error.type = error_type;
|
|
|
|
io_info.error.bdev = base_bdev_info->bdev;
|
|
|
|
|
|
|
|
test_raid5f_write_request(&io_info);
|
|
|
|
|
|
|
|
if (error_type == TEST_BDEV_ERROR_NOMEM) {
|
|
|
|
CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_SUCCESS);
|
|
|
|
} else {
|
|
|
|
CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_FAILED);
|
|
|
|
}
|
|
|
|
|
|
|
|
deinit_io_info(&io_info);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
static void
|
|
|
|
test_raid5f_chunk_write_error(void)
|
|
|
|
{
|
|
|
|
run_for_each_raid5f_config(__test_raid5f_chunk_write_error);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct chunk_write_error_with_enomem_ctx {
|
|
|
|
enum test_bdev_error_type error_type;
|
|
|
|
struct spdk_bdev *bdev;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
chunk_write_error_with_enomem_cb(struct raid_io_info *io_info, void *_ctx)
|
|
|
|
{
|
|
|
|
struct chunk_write_error_with_enomem_ctx *ctx = _ctx;
|
|
|
|
|
|
|
|
io_info->error.type = ctx->error_type;
|
|
|
|
io_info->error.bdev = ctx->bdev;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
__test_raid5f_chunk_write_error_with_enomem(struct raid_bdev *raid_bdev,
|
|
|
|
struct raid_bdev_io_channel *raid_ch)
|
|
|
|
{
|
|
|
|
struct raid5f_info *r5f_info = raid_bdev->module_private;
|
|
|
|
struct raid_base_bdev_info *base_bdev_info;
|
|
|
|
uint64_t stripe_index;
|
|
|
|
struct raid_io_info io_info;
|
|
|
|
enum test_bdev_error_type error_type;
|
|
|
|
struct chunk_write_error_with_enomem_ctx on_enomem_cb_ctx;
|
|
|
|
|
|
|
|
for (error_type = TEST_BDEV_ERROR_SUBMIT; error_type <= TEST_BDEV_ERROR_COMPLETE; error_type++) {
|
|
|
|
RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, stripe_index) {
|
|
|
|
struct raid_base_bdev_info *base_bdev_info_last =
|
|
|
|
&raid_bdev->base_bdev_info[raid_bdev->num_base_bdevs - 1];
|
|
|
|
|
|
|
|
RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_bdev_info) {
|
|
|
|
if (base_bdev_info == base_bdev_info_last) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
init_io_info(&io_info, r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_WRITE,
|
|
|
|
stripe_index * r5f_info->stripe_blocks, r5f_info->stripe_blocks);
|
|
|
|
|
|
|
|
io_info.error.type = TEST_BDEV_ERROR_NOMEM;
|
|
|
|
io_info.error.bdev = base_bdev_info->bdev;
|
|
|
|
io_info.error.on_enomem_cb = chunk_write_error_with_enomem_cb;
|
|
|
|
io_info.error.on_enomem_cb_ctx = &on_enomem_cb_ctx;
|
|
|
|
on_enomem_cb_ctx.error_type = error_type;
|
|
|
|
on_enomem_cb_ctx.bdev = base_bdev_info_last->bdev;
|
|
|
|
|
|
|
|
test_raid5f_write_request(&io_info);
|
|
|
|
|
|
|
|
CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_FAILED);
|
|
|
|
|
|
|
|
deinit_io_info(&io_info);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
static void
|
|
|
|
test_raid5f_chunk_write_error_with_enomem(void)
|
|
|
|
{
|
|
|
|
run_for_each_raid5f_config(__test_raid5f_chunk_write_error_with_enomem);
|
|
|
|
}
|
|
|
|
|
2019-11-04 11:35:50 +00:00
|
|
|
int
|
|
|
|
main(int argc, char **argv)
|
|
|
|
{
|
|
|
|
CU_pSuite suite = NULL;
|
|
|
|
unsigned int num_failures;
|
|
|
|
|
2020-03-11 17:59:24 +00:00
|
|
|
CU_set_error_action(CUEA_ABORT);
|
|
|
|
CU_initialize_registry();
|
2019-11-04 11:35:50 +00:00
|
|
|
|
2022-04-27 09:18:05 +00:00
|
|
|
suite = CU_add_suite("raid5f", test_setup, test_cleanup);
|
|
|
|
CU_ADD_TEST(suite, test_raid5f_start);
|
2020-01-28 12:28:14 +00:00
|
|
|
CU_ADD_TEST(suite, test_raid5f_submit_read_request);
|
2021-04-30 14:11:43 +00:00
|
|
|
CU_ADD_TEST(suite, test_raid5f_stripe_request_map_iovecs);
|
|
|
|
CU_ADD_TEST(suite, test_raid5f_submit_full_stripe_write_request);
|
2022-09-27 09:54:47 +00:00
|
|
|
CU_ADD_TEST(suite, test_raid5f_chunk_write_error);
|
|
|
|
CU_ADD_TEST(suite, test_raid5f_chunk_write_error_with_enomem);
|
2021-04-30 14:11:43 +00:00
|
|
|
|
|
|
|
allocate_threads(1);
|
|
|
|
set_thread(0);
|
2019-11-04 11:35:50 +00:00
|
|
|
|
|
|
|
CU_basic_set_mode(CU_BRM_VERBOSE);
|
|
|
|
CU_basic_run_tests();
|
|
|
|
num_failures = CU_get_number_of_failures();
|
|
|
|
CU_cleanup_registry();
|
2021-04-30 14:11:43 +00:00
|
|
|
|
|
|
|
free_threads();
|
|
|
|
|
2019-11-04 11:35:50 +00:00
|
|
|
return num_failures;
|
|
|
|
}
|