bdev/raid: enable unmap support

Change-Id: If0e3c483ce16680ecea0252c389e134c59b2793e
Signed-off-by: Xiaodong Liu <xiaodong.liu@intel.com>
Reviewed-on: https://review.gerrithub.io/c/441309
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Xiaodong Liu 2019-01-21 21:34:21 +08:00 committed by Changpeng Liu
parent 48b4a2545a
commit 3d951cd321
4 changed files with 646 additions and 0 deletions

View File

@ -151,6 +151,7 @@ if [ $SPDK_RUN_FUNCTIONAL_TEST -eq 1 ]; then
if [ $SPDK_TEST_BLOCKDEV -eq 1 ]; then
run_test suite test/bdev/blockdev.sh
run_test suite test/bdev/bdev_raid.sh
fi
if [ $SPDK_TEST_JSON -eq 1 ]; then

View File

@ -595,6 +595,185 @@ _raid_bdev_submit_reset_request(struct spdk_io_channel *ch, struct spdk_bdev_io
_raid_bdev_submit_reset_request_next(bdev_io);
}
/* raid0 IO range */
struct raid_bdev_io_range {
uint64_t strip_size;
uint64_t start_strip_in_disk;
uint64_t end_strip_in_disk;
uint64_t start_offset_in_strip;
uint64_t end_offset_in_strip;
uint64_t start_disk;
uint64_t end_disk;
uint64_t n_disks_involved;
};
static inline void
_raid_bdev_get_io_range(struct raid_bdev_io_range *io_range,
uint64_t num_base_bdevs, uint64_t strip_size, uint64_t strip_size_shift,
uint64_t offset_blocks, uint64_t num_blocks)
{
uint64_t start_strip;
uint64_t end_strip;
io_range->strip_size = strip_size;
/* The start and end strip index in raid0 bdev scope */
start_strip = offset_blocks >> strip_size_shift;
end_strip = (offset_blocks + num_blocks - 1) >> strip_size_shift;
io_range->start_strip_in_disk = start_strip / num_base_bdevs;
io_range->end_strip_in_disk = end_strip / num_base_bdevs;
/* The first strip may have unaligned start LBA offset.
* The end strip may have unaligned end LBA offset.
* Strips between them certainly have aligned offset and length to boundaries.
*/
io_range->start_offset_in_strip = offset_blocks % strip_size;
io_range->end_offset_in_strip = (offset_blocks + num_blocks - 1) % strip_size;
/* The base bdev indexes in which start and end strips are located */
io_range->start_disk = start_strip % num_base_bdevs;
io_range->end_disk = end_strip % num_base_bdevs;
/* Calculate how many base_bdevs are involved in unmap operation.
* Number of base bdevs involved is between 1 and num_base_bdevs.
* It will be 1 if the first strip and last strip are the same one.
*/
io_range->n_disks_involved = (end_strip - start_strip + 1);
io_range->n_disks_involved = spdk_min(io_range->n_disks_involved, num_base_bdevs);
}
static inline void
_raid_bdev_split_io_range(struct raid_bdev_io_range *io_range, uint64_t disk_idx,
uint64_t *_offset_in_disk, uint64_t *_nblocks_in_disk)
{
uint64_t n_strips_in_disk;
uint64_t start_offset_in_disk;
uint64_t end_offset_in_disk;
uint64_t offset_in_disk;
uint64_t nblocks_in_disk;
uint64_t start_strip_in_disk;
uint64_t end_strip_in_disk;
start_strip_in_disk = io_range->start_strip_in_disk;
if (disk_idx < io_range->start_disk) {
start_strip_in_disk += 1;
}
end_strip_in_disk = io_range->end_strip_in_disk;
if (disk_idx > io_range->end_disk) {
end_strip_in_disk -= 1;
}
assert(end_strip_in_disk >= start_strip_in_disk);
n_strips_in_disk = end_strip_in_disk - start_strip_in_disk + 1;
if (disk_idx == io_range->start_disk) {
start_offset_in_disk = io_range->start_offset_in_strip;
} else {
start_offset_in_disk = 0;
}
if (disk_idx == io_range->end_disk) {
end_offset_in_disk = io_range->end_offset_in_strip;
} else {
end_offset_in_disk = io_range->strip_size - 1;
}
offset_in_disk = start_offset_in_disk + start_strip_in_disk * io_range->strip_size;
nblocks_in_disk = (n_strips_in_disk - 1) * io_range->strip_size
+ end_offset_in_disk - start_offset_in_disk + 1;
SPDK_DEBUGLOG(SPDK_LOG_BDEV_RAID,
"raid_bdev (strip_size 0x%lx) splits IO to base_bdev (%lu) at (0x%lx, 0x%lx).\n",
io_range->strip_size, disk_idx, offset_in_disk, nblocks_in_disk);
*_offset_in_disk = offset_in_disk;
*_nblocks_in_disk = nblocks_in_disk;
}
/*
* brief:
* _raid_bdev_submit_unmap_request_next function submits the next batch of unmap requests
* to member disks; it will submit as many as possible unless one unmap fails with -ENOMEM, in
* which case it will queue it for later submission
* params:
* bdev_io - pointer to parent bdev_io on raid bdev device
* returns:
* none
*/
static void
_raid_bdev_submit_unmap_request_next(void *_bdev_io)
{
struct spdk_bdev_io *bdev_io = _bdev_io;
struct raid_bdev_io *raid_io;
struct raid_bdev *raid_bdev;
struct raid_bdev_io_channel *raid_ch;
struct raid_bdev_io_range io_range;
int ret;
raid_bdev = (struct raid_bdev *)bdev_io->bdev->ctxt;
raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
raid_ch = spdk_io_channel_get_ctx(raid_io->ch);
_raid_bdev_get_io_range(&io_range, raid_bdev->num_base_bdevs,
raid_bdev->strip_size, raid_bdev->strip_size_shift,
bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks);
raid_io->base_bdev_io_expected = io_range.n_disks_involved;
while (raid_io->base_bdev_io_submitted < raid_io->base_bdev_io_expected) {
uint64_t disk_idx;
uint64_t offset_in_disk;
uint64_t nblocks_in_disk;
/* base_bdev is started from start_disk to end_disk.
* It is possible that index of start_disk is larger than end_disk's.
*/
disk_idx = (io_range.start_disk + raid_io->base_bdev_io_submitted) % raid_bdev->num_base_bdevs;
_raid_bdev_split_io_range(&io_range, disk_idx, &offset_in_disk, &nblocks_in_disk);
ret = spdk_bdev_unmap_blocks(raid_bdev->base_bdev_info[disk_idx].desc,
raid_ch->base_channel[disk_idx],
offset_in_disk, nblocks_in_disk,
raid_bdev_base_io_completion, bdev_io);
if (ret == 0) {
raid_io->base_bdev_io_submitted++;
} else {
raid_bdev_base_io_submit_fail_process(bdev_io, disk_idx,
_raid_bdev_submit_unmap_request_next, ret);
return;
}
}
}
/*
* brief:
* _raid_bdev_submit_unmap_request function is the submit_request function for
* unmap requests
* params:
* ch - pointer to raid bdev io channel
* bdev_io - pointer to parent bdev_io on raid bdev device
* returns:
* none
*/
static void
_raid_bdev_submit_unmap_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
{
struct raid_bdev_io *raid_io;
raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
raid_io->ch = ch;
raid_io->base_bdev_io_submitted = 0;
raid_io->base_bdev_io_completed = 0;
raid_io->base_bdev_io_status = SPDK_BDEV_IO_STATUS_SUCCESS;
SPDK_DEBUGLOG(SPDK_LOG_BDEV_RAID, "raid_bdev unmap (0x%lx, 0x%lx)\n",
bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks);
_raid_bdev_submit_unmap_request_next(bdev_io);
}
/*
* brief:
* Callback function to spdk_bdev_io_get_buf.
@ -654,6 +833,10 @@ raid_bdev_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_i
_raid_bdev_submit_reset_request(ch, bdev_io);
break;
case SPDK_BDEV_IO_TYPE_UNMAP:
_raid_bdev_submit_unmap_request(ch, bdev_io);
break;
default:
SPDK_ERRLOG("submit request, invalid io type %u\n", bdev_io->type);
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
@ -662,6 +845,38 @@ raid_bdev_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_i
}
/*
* brief:
* raid_bdev_io_type_unmap_supported is check whether unmap is supported in
* raid bdev module. If anyone among the base_bdevs doesn't support, the
* raid device doesn't supports. For the base_bdev which is not discovered, by default
* it is thought supported.
* params:
* raid_bdev - pointer to raid bdev context
* returns:
* true - io_type is supported
* false - io_type is not supported
*/
static bool
raid_bdev_io_type_unmap_supported(struct raid_bdev *raid_bdev)
{
uint16_t i;
for (i = 0; i < raid_bdev->num_base_bdevs; i++) {
if (raid_bdev->base_bdev_info[i].bdev == NULL) {
assert(false);
continue;
}
if (spdk_bdev_io_type_supported(raid_bdev->base_bdev_info[i].bdev,
SPDK_BDEV_IO_TYPE_UNMAP) == false) {
return false;
}
}
return true;
}
/*
* brief:
* raid_bdev_io_type_supported is the io_supported function for bdev function
@ -683,6 +898,10 @@ raid_bdev_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type)
case SPDK_BDEV_IO_TYPE_FLUSH:
case SPDK_BDEV_IO_TYPE_RESET:
return true;
case SPDK_BDEV_IO_TYPE_UNMAP:
return raid_bdev_io_type_unmap_supported(ctx);
default:
return false;
}

115
test/bdev/bdev_raid.sh Executable file
View File

@ -0,0 +1,115 @@
#!/usr/bin/env bash
set -e
testdir=$(readlink -f $(dirname $0))
rootdir=$(readlink -f $testdir/../..)
rpc_py="$rootdir/scripts/rpc.py"
tmp_file=/tmp/raidrandtest
source $rootdir/test/common/autotest_common.sh
source $testdir/nbd_common.sh
function raid_unmap_data_verify() {
if hash blkdiscard; then
local nbd=$1
local rpc_server=$2
local blksize=$(lsblk -o LOG-SEC $nbd | grep -v LOG-SEC | cut -d ' ' -f 5)
local rw_blk_num=4096
local rw_len=$((blksize * rw_blk_num))
local unmap_blk_offs=(0 1028 321)
local unmap_blk_nums=(128 2035 456)
local unmap_off
local unmap_len
# data write
dd if=/dev/urandom of=$tmp_file bs=$blksize count=$rw_blk_num
dd if=$tmp_file of=$nbd bs=$blksize count=$rw_blk_num oflag=direct
blockdev --flushbufs $nbd
# confirm random data is written correctly in raid0 device
cmp -b -n $rw_len $tmp_file $nbd
for (( i=0; i<${#unmap_blk_offs[@]}; i++ )); do
unmap_off=$((blksize * ${unmap_blk_offs[$i]}))
unmap_len=$((blksize * ${unmap_blk_nums[$i]}))
# data unmap on tmp_file
dd if=/dev/zero of=$tmp_file bs=$blksize seek=${unmap_blk_offs[$i]} count=${unmap_blk_nums[$i]} conv=notrunc
# data unmap on raid bdev
blkdiscard -o $unmap_off -l $unmap_len $nbd
blockdev --flushbufs $nbd
# data verify after unmap
cmp -b -n $rw_len $tmp_file $nbd
done
fi
return 0
}
function on_error_exit() {
if [ ! -z $raid_pid ]; then
killprocess $raid_pid
fi
rm -f $testdir/bdev.conf
rm -f $tmp_file
print_backtrace
exit 1
}
function raid_function_test() {
if [ $(uname -s) = Linux ] && modprobe -n nbd; then
local rpc_server=/var/tmp/spdk-raid.sock
local conf=$1
local nbd=/dev/nbd0
local raid_bdev
if [ ! -e $conf ]; then
return 1
fi
modprobe nbd
$rootdir/test/app/bdev_svc/bdev_svc -r $rpc_server -i 0 -c ${conf} -L bdev_raid &
raid_pid=$!
echo "Process raid pid: $raid_pid"
waitforlisten $raid_pid $rpc_server
raid_bdev=$($rootdir/scripts/rpc.py -s $rpc_server get_raid_bdevs online | cut -d ' ' -f 1)
if [ $raid_bdev = "" ]; then
echo "No raid0 device in SPDK app"
return 1
fi
nbd_start_disks $rpc_server $raid_bdev $nbd
count=$(nbd_get_count $rpc_server)
if [ $count -ne 1 ]; then
return -1
fi
raid_unmap_data_verify $nbd $rpc_server
nbd_stop_disks $rpc_server $nbd
count=$(nbd_get_count $rpc_server)
if [ $count -ne 0 ]; then
return -1
fi
killprocess $raid_pid
fi
return 0
}
timing_enter bdev_raid
trap 'on_error_exit;' ERR
cp $testdir/bdev.conf.in $testdir/bdev.conf
raid_function_test $testdir/bdev.conf
rm -f $testdir/bdev.conf
rm -f $tmp_file
report_test_completion "bdev_raid"
timing_exit bdev_raid

View File

@ -41,6 +41,7 @@
#define MAX_BASE_DRIVES 255
#define MAX_RAIDS 31
#define INVALID_IO_SUBMIT 0xFFFF
#define MAX_TEST_IO_RANGE (3 * 3 * 3 * (MAX_BASE_DRIVES + 5))
/* Data structure to capture the output of IO for verification */
struct io_output {
@ -53,6 +54,11 @@ struct io_output {
enum spdk_bdev_io_type iotype;
};
struct raid_io_ranges {
uint64_t lba;
uint64_t nblocks;
};
/* Different test options, more options to test can be added here */
uint32_t g_blklen_opts[] = {512, 4096};
uint32_t g_strip_opts[] = {64, 128, 256, 512, 1024, 2048};
@ -86,6 +92,9 @@ uint8_t g_json_decode_obj_err;
uint8_t g_json_decode_obj_construct;
uint8_t g_config_level_create = 0;
uint8_t g_test_multi_raids;
struct raid_io_ranges g_io_ranges[MAX_TEST_IO_RANGE];
uint32_t g_io_range_idx;
uint64_t g_lba_offset;
/* Set randomly test options, in every run it is different */
static void
@ -141,6 +150,7 @@ set_globals(void)
g_json_beg_res_ret_err = 0;
g_json_decode_obj_err = 0;
g_json_decode_obj_construct = 0;
g_lba_offset = 0;
}
static void
@ -267,6 +277,41 @@ spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
return g_bdev_io_submit_status;
}
int
spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
uint64_t offset_blocks, uint64_t num_blocks,
spdk_bdev_io_completion_cb cb, void *cb_arg)
{
struct io_output *p = &g_io_output[g_io_output_index];
struct spdk_bdev_io *child_io;
if (g_ignore_io_output) {
return 0;
}
if (g_bdev_io_submit_status == 0) {
p->desc = desc;
p->ch = ch;
p->offset_blocks = offset_blocks;
p->num_blocks = num_blocks;
p->cb = cb;
p->cb_arg = cb_arg;
p->iotype = SPDK_BDEV_IO_TYPE_UNMAP;
g_io_output_index++;
child_io = calloc(1, sizeof(struct spdk_bdev_io));
SPDK_CU_ASSERT_FATAL(child_io != NULL);
cb(child_io, g_child_io_status_flag, cb_arg);
}
return g_bdev_io_submit_status;
}
bool
spdk_bdev_io_type_supported(struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type)
{
return true;
}
void
spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
{
@ -732,6 +777,11 @@ bdev_io_initialize(struct spdk_bdev_io *bdev_io, struct spdk_bdev *bdev,
bdev_io->u.bdev.offset_blocks = lba;
bdev_io->u.bdev.num_blocks = blocks;
bdev_io->type = iotype;
if (bdev_io->type == SPDK_BDEV_IO_TYPE_UNMAP || bdev_io->type == SPDK_BDEV_IO_TYPE_FLUSH) {
return;
}
bdev_io->u.bdev.iovcnt = 1;
bdev_io->u.bdev.iovs = calloc(1, sizeof(struct iovec));
SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs != NULL);
@ -815,6 +865,112 @@ verify_io(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives,
CU_ASSERT(g_io_comp_status == io_status);
}
static void
verify_io_without_payload(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives,
struct raid_bdev_io_channel *ch_ctx, struct raid_bdev *raid_bdev, uint32_t io_status)
{
uint32_t strip_shift = spdk_u32log2(g_strip_size);
uint64_t start_offset_in_strip = bdev_io->u.bdev.offset_blocks % g_strip_size;
uint64_t end_offset_in_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) %
g_strip_size;
uint64_t start_strip = bdev_io->u.bdev.offset_blocks >> strip_shift;
uint64_t end_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) >>
strip_shift;
uint32_t n_disks_involved;
uint64_t start_strip_disk_idx;
uint64_t end_strip_disk_idx;
uint64_t nblocks_in_start_disk;
uint64_t offset_in_start_disk;
uint32_t disk_idx;
uint64_t base_io_idx;
uint64_t sum_nblocks = 0;
if (io_status == INVALID_IO_SUBMIT) {
CU_ASSERT(g_io_comp_status == false);
return;
}
SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
SPDK_CU_ASSERT_FATAL(num_base_drives != 0);
SPDK_CU_ASSERT_FATAL(bdev_io->type != SPDK_BDEV_IO_TYPE_READ);
SPDK_CU_ASSERT_FATAL(bdev_io->type != SPDK_BDEV_IO_TYPE_WRITE);
n_disks_involved = spdk_min(end_strip - start_strip + 1, num_base_drives);
CU_ASSERT(n_disks_involved == g_io_output_index);
start_strip_disk_idx = start_strip % num_base_drives;
end_strip_disk_idx = end_strip % num_base_drives;
offset_in_start_disk = g_io_output[0].offset_blocks;
nblocks_in_start_disk = g_io_output[0].num_blocks;
for (base_io_idx = 0, disk_idx = start_strip_disk_idx; base_io_idx < n_disks_involved;
base_io_idx++, disk_idx++) {
uint64_t start_offset_in_disk;
uint64_t end_offset_in_disk;
/* round disk_idx */
if (disk_idx >= num_base_drives) {
disk_idx %= num_base_drives;
}
/* start_offset_in_disk aligned in strip check:
* The first base io has a same start_offset_in_strip with the whole raid io.
* Other base io should have aligned start_offset_in_strip which is 0.
*/
start_offset_in_disk = g_io_output[base_io_idx].offset_blocks;
if (base_io_idx == 0) {
CU_ASSERT(start_offset_in_disk % g_strip_size == start_offset_in_strip);
} else {
CU_ASSERT(start_offset_in_disk % g_strip_size == 0);
}
/* end_offset_in_disk aligned in strip check:
* Base io on disk at which end_strip is located, has a same end_offset_in_strip with the whole raid io.
* Other base io should have aligned end_offset_in_strip.
*/
end_offset_in_disk = g_io_output[base_io_idx].offset_blocks +
g_io_output[base_io_idx].num_blocks - 1;
if (disk_idx == end_strip_disk_idx) {
CU_ASSERT(end_offset_in_disk % g_strip_size == end_offset_in_strip);
} else {
CU_ASSERT(end_offset_in_disk % g_strip_size == g_strip_size - 1);
}
/* start_offset_in_disk compared with start_disk.
* 1. For disk_idx which is larger than start_strip_disk_idx: Its start_offset_in_disk mustn't be
* larger than the start offset of start_offset_in_disk; And the gap must be less than strip size.
* 2. For disk_idx which is less than start_strip_disk_idx, Its start_offset_in_disk must be
* larger than the start offset of start_offset_in_disk; And the gap mustn't be less than strip size.
*/
if (disk_idx > start_strip_disk_idx) {
CU_ASSERT(start_offset_in_disk <= offset_in_start_disk);
CU_ASSERT(offset_in_start_disk - start_offset_in_disk < g_strip_size);
} else if (disk_idx < start_strip_disk_idx) {
CU_ASSERT(start_offset_in_disk > offset_in_start_disk);
CU_ASSERT(g_io_output[base_io_idx].offset_blocks - offset_in_start_disk <= g_strip_size);
}
/* nblocks compared with start_disk:
* The gap between them must be within a strip size.
*/
if (g_io_output[base_io_idx].num_blocks <= nblocks_in_start_disk) {
CU_ASSERT(nblocks_in_start_disk - g_io_output[base_io_idx].num_blocks <= g_strip_size);
} else {
CU_ASSERT(g_io_output[base_io_idx].num_blocks - nblocks_in_start_disk < g_strip_size);
}
sum_nblocks += g_io_output[base_io_idx].num_blocks;
CU_ASSERT(ch_ctx->base_channel[disk_idx] == g_io_output[base_io_idx].ch);
CU_ASSERT(raid_bdev->base_bdev_info[disk_idx].desc == g_io_output[base_io_idx].desc);
CU_ASSERT(bdev_io->type == g_io_output[base_io_idx].iotype);
}
/* Sum of each nblocks should be same with raid bdev_io */
CU_ASSERT(bdev_io->u.bdev.num_blocks == sum_nblocks);
CU_ASSERT(g_io_comp_status == io_status);
}
static void
verify_raid_config_present(const char *name, bool presence)
{
@ -1567,6 +1723,160 @@ test_read_io(void)
reset_globals();
}
static void
raid_bdev_io_generate_by_strips(uint64_t n_strips)
{
uint64_t lba;
uint64_t nblocks;
uint64_t start_offset;
uint64_t end_offset;
uint64_t offsets_in_strip[3];
uint64_t start_bdev_idx;
uint64_t start_bdev_offset;
uint64_t start_bdev_idxs[3];
int i, j, l;
/* 3 different situations of offset in strip */
offsets_in_strip[0] = 0;
offsets_in_strip[1] = g_strip_size >> 1;
offsets_in_strip[2] = g_strip_size - 1;
/* 3 different situations of start_bdev_idx */
start_bdev_idxs[0] = 0;
start_bdev_idxs[1] = g_max_base_drives >> 1;
start_bdev_idxs[2] = g_max_base_drives - 1;
/* consider different offset in strip */
for (i = 0; i < 3; i++) {
start_offset = offsets_in_strip[i];
for (j = 0; j < 3; j++) {
end_offset = offsets_in_strip[j];
if (n_strips == 1 && start_offset > end_offset) {
continue;
}
/* consider at which base_bdev lba is started. */
for (l = 0; l < 3; l++) {
start_bdev_idx = start_bdev_idxs[l];
start_bdev_offset = start_bdev_idx * g_strip_size;
lba = g_lba_offset + start_bdev_offset + start_offset;
nblocks = (n_strips - 1) * g_strip_size + end_offset - start_offset + 1;
g_io_ranges[g_io_range_idx].lba = lba;
g_io_ranges[g_io_range_idx].nblocks = nblocks;
SPDK_CU_ASSERT_FATAL(g_io_range_idx < MAX_TEST_IO_RANGE);
g_io_range_idx++;
}
}
}
}
static void
raid_bdev_io_generate(void)
{
uint64_t n_strips;
uint64_t n_strips_span = g_max_base_drives;
uint64_t n_strips_times[5] = {g_max_base_drives + 1, g_max_base_drives * 2 - 1, g_max_base_drives * 2,
g_max_base_drives * 3, g_max_base_drives * 4
};
uint32_t i;
g_io_range_idx = 0;
/* consider different number of strips from 1 to strips spanned base bdevs,
* and even to times of strips spanned base bdevs
*/
for (n_strips = 1; n_strips < n_strips_span; n_strips++) {
raid_bdev_io_generate_by_strips(n_strips);
}
for (i = 0; i < SPDK_COUNTOF(n_strips_times); i++) {
n_strips = n_strips_times[i];
raid_bdev_io_generate_by_strips(n_strips);
}
}
static void
test_unmap_io(void)
{
struct rpc_construct_raid_bdev req;
struct rpc_destroy_raid_bdev destroy_req;
struct raid_bdev *pbdev;
struct spdk_io_channel *ch;
struct raid_bdev_io_channel *ch_ctx;
uint32_t i;
struct spdk_bdev_io *bdev_io;
uint32_t count;
uint64_t io_len;
uint64_t lba;
set_globals();
create_test_req(&req, "raid1", 0, true);
rpc_req = &req;
rpc_req_size = sizeof(req);
CU_ASSERT(raid_bdev_init() == 0);
verify_raid_config_present(req.name, false);
verify_raid_bdev_present(req.name, false);
g_rpc_err = 0;
g_json_decode_obj_construct = 1;
spdk_rpc_construct_raid_bdev(NULL, NULL);
CU_ASSERT(g_rpc_err == 0);
verify_raid_config(&req, true);
verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_list, global_link) {
if (strcmp(pbdev->bdev.name, req.name) == 0) {
break;
}
}
CU_ASSERT(pbdev != NULL);
ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel));
SPDK_CU_ASSERT_FATAL(ch != NULL);
ch_ctx = spdk_io_channel_get_ctx(ch);
SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
SPDK_CU_ASSERT_FATAL(ch_ctx->base_channel && ch_ctx->base_channel[i] == (void *)0x1);
}
CU_ASSERT(raid_bdev_io_type_supported(pbdev, SPDK_BDEV_IO_TYPE_UNMAP) == true);
raid_bdev_io_generate();
for (count = 0; count < g_io_range_idx; count++) {
bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
io_len = g_io_ranges[count].nblocks;
lba = g_io_ranges[count].lba;
bdev_io_initialize(bdev_io, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_UNMAP);
memset(g_io_output, 0, g_max_base_drives * sizeof(struct io_output));
g_io_output_index = 0;
raid_bdev_submit_request(ch, bdev_io);
verify_io_without_payload(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
g_child_io_status_flag);
bdev_io_cleanup(bdev_io);
free(bdev_io);
}
free_test_req(&req);
raid_bdev_destroy_cb(pbdev, ch_ctx);
CU_ASSERT(ch_ctx->base_channel == NULL);
free(ch);
destroy_req.name = strdup("raid1");
rpc_req = &destroy_req;
rpc_req_size = sizeof(destroy_req);
g_rpc_err = 0;
g_json_decode_obj_construct = 0;
spdk_rpc_destroy_raid_bdev(NULL, NULL);
CU_ASSERT(g_rpc_err == 0);
verify_raid_config_present("raid1", false);
verify_raid_bdev_present("raid1", false);
raid_bdev_exit();
base_bdevs_cleanup();
reset_globals();
}
/* Test IO failures */
static void
test_io_failure(void)
@ -2329,6 +2639,7 @@ int main(int argc, char **argv)
CU_add_test(suite, "test_reset_io", test_reset_io) == NULL ||
CU_add_test(suite, "test_write_io", test_write_io) == NULL ||
CU_add_test(suite, "test_read_io", test_read_io) == NULL ||
CU_add_test(suite, "test_unmap_io", test_unmap_io) == NULL ||
CU_add_test(suite, "test_io_failure", test_io_failure) == NULL ||
CU_add_test(suite, "test_io_waitq", test_io_waitq) == NULL ||
CU_add_test(suite, "test_multi_raid_no_io", test_multi_raid_no_io) == NULL ||