bdev/raid: Get struct raid_bdev from struct spdk_bdev_io in IO submission
During IO submission, raid_bdev can be get by bdev_io->bdev->ctxt. Hence holding raid_bdev in raid_bdev_io_channel is duplicated. Change-Id: I722432718aca8c5846541816b6ecca56821d77f6 Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Reviewed-on: https://review.gerrithub.io/421182 Reviewed-by: Dariusz Stojaczyk <dariuszx.stojaczyk@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Kunal Sablok <kunal.sablok@intel.com> Reviewed-by: GangCao <gang.cao@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
parent
056980c10c
commit
57dd0d5085
@ -89,13 +89,6 @@ raid_bdev_create_cb(void *io_device, void *ctx_buf)
|
|||||||
assert(raid_bdev != NULL);
|
assert(raid_bdev != NULL);
|
||||||
assert(raid_bdev->state == RAID_BDEV_STATE_ONLINE);
|
assert(raid_bdev->state == RAID_BDEV_STATE_ONLINE);
|
||||||
|
|
||||||
/*
|
|
||||||
* Store raid_bdev in each channel which is used to get the read only
|
|
||||||
* raid bdev specific information during io split logic like base bdev
|
|
||||||
* descriptors, strip size etc
|
|
||||||
*/
|
|
||||||
ch->raid_bdev = raid_bdev;
|
|
||||||
|
|
||||||
ch->base_bdevs_io_channel = calloc(raid_bdev->num_base_bdevs,
|
ch->base_bdevs_io_channel = calloc(raid_bdev->num_base_bdevs,
|
||||||
sizeof(struct spdk_io_channel *));
|
sizeof(struct spdk_io_channel *));
|
||||||
if (!ch->base_bdevs_io_channel) {
|
if (!ch->base_bdevs_io_channel) {
|
||||||
@ -150,7 +143,6 @@ raid_bdev_destroy_cb(void *io_device, void *ctx_buf)
|
|||||||
spdk_put_io_channel(ch->base_bdevs_io_channel[i]);
|
spdk_put_io_channel(ch->base_bdevs_io_channel[i]);
|
||||||
ch->base_bdevs_io_channel[i] = NULL;
|
ch->base_bdevs_io_channel[i] = NULL;
|
||||||
}
|
}
|
||||||
ch->raid_bdev = NULL;
|
|
||||||
free(ch->base_bdevs_io_channel);
|
free(ch->base_bdevs_io_channel);
|
||||||
ch->base_bdevs_io_channel = NULL;
|
ch->base_bdevs_io_channel = NULL;
|
||||||
}
|
}
|
||||||
@ -298,7 +290,7 @@ raid_bdev_send_passthru(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
raid_bdev_io_channel = spdk_io_channel_get_ctx(ch);
|
raid_bdev_io_channel = spdk_io_channel_get_ctx(ch);
|
||||||
raid_bdev = raid_bdev_io_channel->raid_bdev;
|
raid_bdev = (struct raid_bdev *)bdev_io->bdev->ctxt;
|
||||||
raid_bdev_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
|
raid_bdev_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
|
||||||
raid_bdev_io->status = SPDK_BDEV_IO_STATUS_SUCCESS;
|
raid_bdev_io->status = SPDK_BDEV_IO_STATUS_SUCCESS;
|
||||||
|
|
||||||
@ -365,7 +357,7 @@ raid_bdev_submit_children(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_
|
|||||||
{
|
{
|
||||||
struct raid_bdev_io_channel *raid_bdev_io_channel = spdk_io_channel_get_ctx(ch);
|
struct raid_bdev_io_channel *raid_bdev_io_channel = spdk_io_channel_get_ctx(ch);
|
||||||
struct raid_bdev_io *raid_bdev_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
|
struct raid_bdev_io *raid_bdev_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
|
||||||
struct raid_bdev *raid_bdev = raid_bdev_io_channel->raid_bdev;
|
struct raid_bdev *raid_bdev = (struct raid_bdev *)bdev_io->bdev->ctxt;
|
||||||
uint64_t pd_strip;
|
uint64_t pd_strip;
|
||||||
uint32_t offset_in_strip;
|
uint32_t offset_in_strip;
|
||||||
uint64_t pd_lba;
|
uint64_t pd_lba;
|
||||||
@ -549,7 +541,6 @@ raid_bdev_waitq_io_process(void *ctx)
|
|||||||
{
|
{
|
||||||
struct raid_bdev_io *raid_bdev_io = ctx;
|
struct raid_bdev_io *raid_bdev_io = ctx;
|
||||||
struct spdk_bdev_io *bdev_io;
|
struct spdk_bdev_io *bdev_io;
|
||||||
struct raid_bdev_io_channel *raid_bdev_io_channel;
|
|
||||||
struct raid_bdev *raid_bdev;
|
struct raid_bdev *raid_bdev;
|
||||||
int ret;
|
int ret;
|
||||||
uint64_t start_strip;
|
uint64_t start_strip;
|
||||||
@ -561,8 +552,7 @@ raid_bdev_waitq_io_process(void *ctx)
|
|||||||
* Try to submit childs of parent bdev io. If failed due to resource
|
* Try to submit childs of parent bdev io. If failed due to resource
|
||||||
* crunch then break the loop and don't try to process other queued IOs.
|
* crunch then break the loop and don't try to process other queued IOs.
|
||||||
*/
|
*/
|
||||||
raid_bdev_io_channel = spdk_io_channel_get_ctx(raid_bdev_io->ch);
|
raid_bdev = (struct raid_bdev *)bdev_io->bdev->ctxt;
|
||||||
raid_bdev = raid_bdev_io_channel->raid_bdev;
|
|
||||||
if (raid_bdev->num_base_bdevs > 1) {
|
if (raid_bdev->num_base_bdevs > 1) {
|
||||||
start_strip = bdev_io->u.bdev.offset_blocks >> raid_bdev->strip_size_shift;
|
start_strip = bdev_io->u.bdev.offset_blocks >> raid_bdev->strip_size_shift;
|
||||||
end_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) >>
|
end_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) >>
|
||||||
@ -591,7 +581,6 @@ raid_bdev_waitq_io_process(void *ctx)
|
|||||||
static void
|
static void
|
||||||
_raid_bdev_submit_rw_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
|
_raid_bdev_submit_rw_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
|
||||||
{
|
{
|
||||||
struct raid_bdev_io_channel *raid_bdev_io_channel;
|
|
||||||
struct raid_bdev_io *raid_bdev_io;
|
struct raid_bdev_io *raid_bdev_io;
|
||||||
struct raid_bdev *raid_bdev;
|
struct raid_bdev *raid_bdev;
|
||||||
uint64_t start_strip = 0;
|
uint64_t start_strip = 0;
|
||||||
@ -607,8 +596,7 @@ _raid_bdev_submit_rw_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bd
|
|||||||
/*
|
/*
|
||||||
* IO parameters used during io split and io completion
|
* IO parameters used during io split and io completion
|
||||||
*/
|
*/
|
||||||
raid_bdev_io_channel = spdk_io_channel_get_ctx(ch);
|
raid_bdev = (struct raid_bdev *)bdev_io->bdev->ctxt;
|
||||||
raid_bdev = raid_bdev_io_channel->raid_bdev;
|
|
||||||
raid_bdev_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
|
raid_bdev_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
|
||||||
if (raid_bdev->num_base_bdevs > 1) {
|
if (raid_bdev->num_base_bdevs > 1) {
|
||||||
start_strip = bdev_io->u.bdev.offset_blocks >> raid_bdev->strip_size_shift;
|
start_strip = bdev_io->u.bdev.offset_blocks >> raid_bdev->strip_size_shift;
|
||||||
|
@ -200,9 +200,6 @@ struct raid_config {
|
|||||||
struct raid_bdev_io_channel {
|
struct raid_bdev_io_channel {
|
||||||
/* Array of IO channels of base bdevs */
|
/* Array of IO channels of base bdevs */
|
||||||
struct spdk_io_channel **base_bdevs_io_channel;
|
struct spdk_io_channel **base_bdevs_io_channel;
|
||||||
|
|
||||||
/* raid bdev context pointer */
|
|
||||||
struct raid_bdev *raid_bdev;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* TAIL heads for various raid bdev lists */
|
/* TAIL heads for various raid bdev lists */
|
||||||
|
@ -655,8 +655,10 @@ bdev_io_cleanup(struct spdk_bdev_io *bdev_io)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
bdev_io_initialize(struct spdk_bdev_io *bdev_io, uint64_t lba, uint64_t blocks, int16_t iotype)
|
bdev_io_initialize(struct spdk_bdev_io *bdev_io, struct spdk_bdev *bdev,
|
||||||
|
uint64_t lba, uint64_t blocks, int16_t iotype)
|
||||||
{
|
{
|
||||||
|
bdev_io->bdev = bdev;
|
||||||
bdev_io->u.bdev.offset_blocks = lba;
|
bdev_io->u.bdev.offset_blocks = lba;
|
||||||
bdev_io->u.bdev.num_blocks = blocks;
|
bdev_io->u.bdev.num_blocks = blocks;
|
||||||
bdev_io->type = iotype;
|
bdev_io->type = iotype;
|
||||||
@ -1296,12 +1298,10 @@ test_io_channel(void)
|
|||||||
SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
|
SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
|
||||||
|
|
||||||
CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
|
CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
|
||||||
CU_ASSERT(ch_ctx->raid_bdev == pbdev);
|
|
||||||
for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
|
for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
|
||||||
CU_ASSERT(ch_ctx->base_bdevs_io_channel && ch_ctx->base_bdevs_io_channel[i] == (void *)0x1);
|
CU_ASSERT(ch_ctx->base_bdevs_io_channel && ch_ctx->base_bdevs_io_channel[i] == (void *)0x1);
|
||||||
}
|
}
|
||||||
raid_bdev_destroy_cb(pbdev, ch_ctx);
|
raid_bdev_destroy_cb(pbdev, ch_ctx);
|
||||||
CU_ASSERT(ch_ctx->raid_bdev == NULL);
|
|
||||||
CU_ASSERT(ch_ctx->base_bdevs_io_channel == NULL);
|
CU_ASSERT(ch_ctx->base_bdevs_io_channel == NULL);
|
||||||
free_test_req(&req);
|
free_test_req(&req);
|
||||||
|
|
||||||
@ -1360,7 +1360,6 @@ test_write_io(void)
|
|||||||
SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
|
SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
|
||||||
|
|
||||||
CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
|
CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
|
||||||
CU_ASSERT(ch_ctx->raid_bdev == pbdev);
|
|
||||||
for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
|
for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
|
||||||
CU_ASSERT(ch_ctx->base_bdevs_io_channel && ch_ctx->base_bdevs_io_channel[i] == (void *)0x1);
|
CU_ASSERT(ch_ctx->base_bdevs_io_channel && ch_ctx->base_bdevs_io_channel[i] == (void *)0x1);
|
||||||
}
|
}
|
||||||
@ -1370,7 +1369,7 @@ test_write_io(void)
|
|||||||
bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
|
bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
|
||||||
SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
|
SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
|
||||||
io_len = (rand() % g_max_io_size) + 1;
|
io_len = (rand() % g_max_io_size) + 1;
|
||||||
bdev_io_initialize(bdev_io, lba, io_len, SPDK_BDEV_IO_TYPE_WRITE);
|
bdev_io_initialize(bdev_io, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_WRITE);
|
||||||
lba += io_len;
|
lba += io_len;
|
||||||
memset(g_io_output, 0, (g_max_io_size / g_strip_size) + 1 * sizeof(struct io_output));
|
memset(g_io_output, 0, (g_max_io_size / g_strip_size) + 1 * sizeof(struct io_output));
|
||||||
g_io_output_index = 0;
|
g_io_output_index = 0;
|
||||||
@ -1383,7 +1382,6 @@ test_write_io(void)
|
|||||||
free_test_req(&req);
|
free_test_req(&req);
|
||||||
|
|
||||||
raid_bdev_destroy_cb(pbdev, ch_ctx);
|
raid_bdev_destroy_cb(pbdev, ch_ctx);
|
||||||
CU_ASSERT(ch_ctx->raid_bdev == NULL);
|
|
||||||
CU_ASSERT(ch_ctx->base_bdevs_io_channel == NULL);
|
CU_ASSERT(ch_ctx->base_bdevs_io_channel == NULL);
|
||||||
free(ch);
|
free(ch);
|
||||||
destroy_req.name = strdup("raid1");
|
destroy_req.name = strdup("raid1");
|
||||||
@ -1440,7 +1438,6 @@ test_read_io(void)
|
|||||||
SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
|
SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
|
||||||
|
|
||||||
CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
|
CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
|
||||||
CU_ASSERT(ch_ctx->raid_bdev == pbdev);
|
|
||||||
for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
|
for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
|
||||||
CU_ASSERT(ch_ctx->base_bdevs_io_channel && ch_ctx->base_bdevs_io_channel[i] == (void *)0x1);
|
CU_ASSERT(ch_ctx->base_bdevs_io_channel && ch_ctx->base_bdevs_io_channel[i] == (void *)0x1);
|
||||||
}
|
}
|
||||||
@ -1451,7 +1448,7 @@ test_read_io(void)
|
|||||||
bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
|
bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
|
||||||
SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
|
SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
|
||||||
io_len = (rand() % g_max_io_size) + 1;
|
io_len = (rand() % g_max_io_size) + 1;
|
||||||
bdev_io_initialize(bdev_io, lba, io_len, SPDK_BDEV_IO_TYPE_READ);
|
bdev_io_initialize(bdev_io, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_READ);
|
||||||
lba += io_len;
|
lba += io_len;
|
||||||
memset(g_io_output, 0, (g_max_io_size / g_strip_size) + 1 * sizeof(struct io_output));
|
memset(g_io_output, 0, (g_max_io_size / g_strip_size) + 1 * sizeof(struct io_output));
|
||||||
g_io_output_index = 0;
|
g_io_output_index = 0;
|
||||||
@ -1463,7 +1460,6 @@ test_read_io(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
raid_bdev_destroy_cb(pbdev, ch_ctx);
|
raid_bdev_destroy_cb(pbdev, ch_ctx);
|
||||||
CU_ASSERT(ch_ctx->raid_bdev == NULL);
|
|
||||||
CU_ASSERT(ch_ctx->base_bdevs_io_channel == NULL);
|
CU_ASSERT(ch_ctx->base_bdevs_io_channel == NULL);
|
||||||
free(ch);
|
free(ch);
|
||||||
destroy_req.name = strdup("raid1");
|
destroy_req.name = strdup("raid1");
|
||||||
@ -1521,7 +1517,6 @@ test_io_failure(void)
|
|||||||
SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
|
SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
|
||||||
|
|
||||||
CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
|
CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
|
||||||
CU_ASSERT(ch_ctx->raid_bdev == pbdev);
|
|
||||||
for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
|
for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
|
||||||
CU_ASSERT(ch_ctx->base_bdevs_io_channel && ch_ctx->base_bdevs_io_channel[i] == (void *)0x1);
|
CU_ASSERT(ch_ctx->base_bdevs_io_channel && ch_ctx->base_bdevs_io_channel[i] == (void *)0x1);
|
||||||
}
|
}
|
||||||
@ -1532,7 +1527,7 @@ test_io_failure(void)
|
|||||||
bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
|
bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
|
||||||
SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
|
SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
|
||||||
io_len = (rand() % g_max_io_size) + 1;
|
io_len = (rand() % g_max_io_size) + 1;
|
||||||
bdev_io_initialize(bdev_io, lba, io_len, SPDK_BDEV_IO_TYPE_INVALID);
|
bdev_io_initialize(bdev_io, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_INVALID);
|
||||||
lba += io_len;
|
lba += io_len;
|
||||||
memset(g_io_output, 0, (g_max_io_size / g_strip_size) + 1 * sizeof(struct io_output));
|
memset(g_io_output, 0, (g_max_io_size / g_strip_size) + 1 * sizeof(struct io_output));
|
||||||
g_io_output_index = 0;
|
g_io_output_index = 0;
|
||||||
@ -1550,7 +1545,7 @@ test_io_failure(void)
|
|||||||
bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
|
bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
|
||||||
SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
|
SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
|
||||||
io_len = (rand() % g_max_io_size) + 1;
|
io_len = (rand() % g_max_io_size) + 1;
|
||||||
bdev_io_initialize(bdev_io, lba, io_len, SPDK_BDEV_IO_TYPE_WRITE);
|
bdev_io_initialize(bdev_io, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_WRITE);
|
||||||
lba += io_len;
|
lba += io_len;
|
||||||
memset(g_io_output, 0, (g_max_io_size / g_strip_size) + 1 * sizeof(struct io_output));
|
memset(g_io_output, 0, (g_max_io_size / g_strip_size) + 1 * sizeof(struct io_output));
|
||||||
g_io_output_index = 0;
|
g_io_output_index = 0;
|
||||||
@ -1562,7 +1557,6 @@ test_io_failure(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
raid_bdev_destroy_cb(pbdev, ch_ctx);
|
raid_bdev_destroy_cb(pbdev, ch_ctx);
|
||||||
CU_ASSERT(ch_ctx->raid_bdev == NULL);
|
|
||||||
CU_ASSERT(ch_ctx->base_bdevs_io_channel == NULL);
|
CU_ASSERT(ch_ctx->base_bdevs_io_channel == NULL);
|
||||||
free(ch);
|
free(ch);
|
||||||
destroy_req.name = strdup("raid1");
|
destroy_req.name = strdup("raid1");
|
||||||
@ -1622,7 +1616,6 @@ test_io_waitq(void)
|
|||||||
SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
|
SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
|
||||||
|
|
||||||
CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
|
CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
|
||||||
CU_ASSERT(ch_ctx->raid_bdev == pbdev);
|
|
||||||
SPDK_CU_ASSERT_FATAL(ch_ctx->base_bdevs_io_channel != NULL);
|
SPDK_CU_ASSERT_FATAL(ch_ctx->base_bdevs_io_channel != NULL);
|
||||||
for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
|
for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
|
||||||
CU_ASSERT(ch_ctx->base_bdevs_io_channel[i] == (void *)0x1);
|
CU_ASSERT(ch_ctx->base_bdevs_io_channel[i] == (void *)0x1);
|
||||||
@ -1636,7 +1629,7 @@ test_io_waitq(void)
|
|||||||
SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
|
SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
|
||||||
TAILQ_INSERT_TAIL(&head_io, bdev_io, module_link);
|
TAILQ_INSERT_TAIL(&head_io, bdev_io, module_link);
|
||||||
io_len = (rand() % g_max_io_size) + 1;
|
io_len = (rand() % g_max_io_size) + 1;
|
||||||
bdev_io_initialize(bdev_io, lba, io_len, SPDK_BDEV_IO_TYPE_WRITE);
|
bdev_io_initialize(bdev_io, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_WRITE);
|
||||||
g_bdev_io_submit_status = -ENOMEM;
|
g_bdev_io_submit_status = -ENOMEM;
|
||||||
lba += io_len;
|
lba += io_len;
|
||||||
raid_bdev_submit_request(ch, bdev_io);
|
raid_bdev_submit_request(ch, bdev_io);
|
||||||
@ -1656,7 +1649,6 @@ test_io_waitq(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
raid_bdev_destroy_cb(pbdev, ch_ctx);
|
raid_bdev_destroy_cb(pbdev, ch_ctx);
|
||||||
CU_ASSERT(ch_ctx->raid_bdev == NULL);
|
|
||||||
CU_ASSERT(ch_ctx->base_bdevs_io_channel == NULL);
|
CU_ASSERT(ch_ctx->base_bdevs_io_channel == NULL);
|
||||||
g_ignore_io_output = 0;
|
g_ignore_io_output = 0;
|
||||||
free(ch);
|
free(ch);
|
||||||
@ -1860,7 +1852,6 @@ test_multi_raid_with_io(void)
|
|||||||
ch_ctx = spdk_io_channel_get_ctx(&ch[i]);
|
ch_ctx = spdk_io_channel_get_ctx(&ch[i]);
|
||||||
SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
|
SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
|
||||||
CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
|
CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
|
||||||
CU_ASSERT(ch_ctx->raid_bdev == pbdev);
|
|
||||||
CU_ASSERT(ch_ctx->base_bdevs_io_channel != NULL);
|
CU_ASSERT(ch_ctx->base_bdevs_io_channel != NULL);
|
||||||
for (j = 0; j < construct_req[i].base_bdevs.num_base_bdevs; j++) {
|
for (j = 0; j < construct_req[i].base_bdevs.num_base_bdevs; j++) {
|
||||||
CU_ASSERT(ch_ctx->base_bdevs_io_channel[j] == (void *)0x1);
|
CU_ASSERT(ch_ctx->base_bdevs_io_channel[j] == (void *)0x1);
|
||||||
@ -1873,8 +1864,6 @@ test_multi_raid_with_io(void)
|
|||||||
SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
|
SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
|
||||||
io_len = (rand() % g_max_io_size) + 1;
|
io_len = (rand() % g_max_io_size) + 1;
|
||||||
iotype = (rand() % 2) ? SPDK_BDEV_IO_TYPE_WRITE : SPDK_BDEV_IO_TYPE_READ;
|
iotype = (rand() % 2) ? SPDK_BDEV_IO_TYPE_WRITE : SPDK_BDEV_IO_TYPE_READ;
|
||||||
bdev_io_initialize(bdev_io, lba, io_len, iotype);
|
|
||||||
lba += io_len;
|
|
||||||
memset(g_io_output, 0, (g_max_io_size / g_strip_size) + 1 * sizeof(struct io_output));
|
memset(g_io_output, 0, (g_max_io_size / g_strip_size) + 1 * sizeof(struct io_output));
|
||||||
g_io_output_index = 0;
|
g_io_output_index = 0;
|
||||||
raid_random = rand() % g_max_raids;
|
raid_random = rand() % g_max_raids;
|
||||||
@ -1885,6 +1874,8 @@ test_multi_raid_with_io(void)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
bdev_io_initialize(bdev_io, &pbdev->bdev, lba, io_len, iotype);
|
||||||
|
lba += io_len;
|
||||||
CU_ASSERT(pbdev != NULL);
|
CU_ASSERT(pbdev != NULL);
|
||||||
raid_bdev_submit_request(ch_random, bdev_io);
|
raid_bdev_submit_request(ch_random, bdev_io);
|
||||||
verify_io(bdev_io, g_max_base_drives, ch_ctx_random, pbdev,
|
verify_io(bdev_io, g_max_base_drives, ch_ctx_random, pbdev,
|
||||||
@ -1903,7 +1894,6 @@ test_multi_raid_with_io(void)
|
|||||||
ch_ctx = spdk_io_channel_get_ctx(&ch[i]);
|
ch_ctx = spdk_io_channel_get_ctx(&ch[i]);
|
||||||
SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
|
SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
|
||||||
raid_bdev_destroy_cb(pbdev, ch_ctx);
|
raid_bdev_destroy_cb(pbdev, ch_ctx);
|
||||||
CU_ASSERT(ch_ctx->raid_bdev == NULL);
|
|
||||||
CU_ASSERT(ch_ctx->base_bdevs_io_channel == NULL);
|
CU_ASSERT(ch_ctx->base_bdevs_io_channel == NULL);
|
||||||
destroy_req.name = strdup(construct_req[i].name);
|
destroy_req.name = strdup(construct_req[i].name);
|
||||||
count = snprintf(name, 16, "%s", destroy_req.name);
|
count = snprintf(name, 16, "%s", destroy_req.name);
|
||||||
|
Loading…
Reference in New Issue
Block a user