module/raid: suspend/resume IO

Add functions to suspend and resume IO on all channels. This will be
used to safely change the device state in case of e.g. removing a base
bdev.

Signed-off-by: Artur Paszkiewicz <artur.paszkiewicz@intel.com>
Change-Id: I203c1899bde15101e0c2bc8da7a1066a2fee6dd2
This commit is contained in:
Artur Paszkiewicz 2022-09-20 14:16:16 +02:00 committed by David Ko
parent 8c591e2d4f
commit 79eccac059
3 changed files with 496 additions and 7 deletions

View File

@ -51,6 +51,8 @@ static int raid_bdev_init(void);
static void raid_bdev_deconfigure(struct raid_bdev *raid_bdev,
raid_bdev_destruct_cb cb_fn, void *cb_arg);
static void raid_bdev_channel_on_suspended(struct raid_bdev_io_channel *raid_ch);
/*
* brief:
* raid_bdev_create_cb function is a cb function for raid bdev which creates the
@ -76,6 +78,7 @@ raid_bdev_create_cb(void *io_device, void *ctx_buf)
assert(raid_bdev->state == RAID_BDEV_STATE_ONLINE);
raid_ch->num_channels = raid_bdev->num_base_bdevs;
TAILQ_INIT(&raid_ch->suspended_ios);
raid_ch->base_channel = calloc(raid_ch->num_channels,
sizeof(struct spdk_io_channel *));
@ -83,6 +86,11 @@ raid_bdev_create_cb(void *io_device, void *ctx_buf)
SPDK_ERRLOG("Unable to allocate base bdevs io channel\n");
return -ENOMEM;
}
pthread_mutex_lock(&raid_bdev->mutex);
raid_ch->is_suspended = (raid_bdev->suspend_cnt > 0);
pthread_mutex_unlock(&raid_bdev->mutex);
for (i = 0; i < raid_ch->num_channels; i++) {
/*
* Get the spdk_io_channel for all the base bdevs. This is used during
@ -138,6 +146,7 @@ raid_bdev_destroy_cb(void *io_device, void *ctx_buf)
assert(raid_ch != NULL);
assert(raid_ch->base_channel);
assert(TAILQ_EMPTY(&raid_ch->suspended_ios));
if (raid_ch->module_channel) {
spdk_put_io_channel(raid_ch->module_channel);
@ -184,6 +193,7 @@ raid_bdev_cleanup(struct raid_bdev *raid_bdev)
static void
raid_bdev_free(struct raid_bdev *raid_bdev)
{
pthread_mutex_destroy(&raid_bdev->mutex);
free(raid_bdev->bdev.name);
free(raid_bdev);
}
@ -295,8 +305,14 @@ void
raid_bdev_io_complete(struct raid_bdev_io *raid_io, enum spdk_bdev_io_status status)
{
struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(raid_io);
struct raid_bdev_io_channel *raid_ch = raid_io->raid_ch;
spdk_bdev_io_complete(bdev_io, status);
raid_ch->num_ios--;
if (raid_ch->is_suspended && raid_ch->num_ios == 0) {
raid_bdev_channel_on_suspended(raid_ch);
}
}
/*
@ -464,9 +480,17 @@ static void
raid_bdev_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
{
struct raid_bdev_io *raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
struct raid_bdev_io_channel *raid_ch = spdk_io_channel_get_ctx(ch);
if (raid_ch->is_suspended) {
TAILQ_INSERT_TAIL(&raid_ch->suspended_ios, raid_io, link);
return;
} else {
raid_ch->num_ios++;
}
raid_io->raid_bdev = bdev_io->bdev->ctxt;
raid_io->raid_ch = spdk_io_channel_get_ctx(ch);
raid_io->raid_ch = raid_ch;
raid_io->base_bdev_io_remaining = 0;
raid_io->base_bdev_io_submitted = 0;
raid_io->base_bdev_io_status = SPDK_BDEV_IO_STATUS_SUCCESS;
@ -928,6 +952,7 @@ raid_bdev_create(const char *name, uint32_t strip_size, uint8_t num_base_bdevs,
struct spdk_bdev *raid_bdev_gen;
struct raid_bdev_module *module;
uint8_t min_operational;
int rc;
if (raid_bdev_find_by_name(name) != NULL) {
SPDK_ERRLOG("Duplicate raid bdev name found: %s\n", name);
@ -1011,12 +1036,21 @@ raid_bdev_create(const char *name, uint32_t strip_size, uint8_t num_base_bdevs,
raid_bdev->level = level;
raid_bdev->min_base_bdevs_operational = min_operational;
raid_bdev->superblock_enabled = superblock;
TAILQ_INIT(&raid_bdev->suspend_ctx);
rc = pthread_mutex_init(&raid_bdev->mutex, NULL);
if (rc) {
SPDK_ERRLOG("Cannot init mutex for raid bdev\n");
free(raid_bdev->base_bdev_info);
free(raid_bdev);
return rc;
}
raid_bdev_gen = &raid_bdev->bdev;
raid_bdev_gen->name = strdup(name);
if (!raid_bdev_gen->name) {
SPDK_ERRLOG("Unable to allocate name for raid\n");
pthread_mutex_destroy(&raid_bdev->mutex);
free(raid_bdev->base_bdev_info);
free(raid_bdev);
return -ENOMEM;
@ -1230,6 +1264,157 @@ raid_bdev_find_by_base_bdev(struct spdk_bdev *base_bdev, struct raid_bdev **_rai
return false;
}
typedef void (*raid_bdev_suspended_cb)(struct raid_bdev *raid_bdev, void *ctx);
struct raid_bdev_suspend_ctx {
raid_bdev_suspended_cb suspended_cb;
void *suspended_cb_ctx;
TAILQ_ENTRY(raid_bdev_suspend_ctx) link;
};
static void
raid_bdev_on_suspended(struct raid_bdev *raid_bdev)
{
struct raid_bdev_suspend_ctx *ctx;
while ((ctx = TAILQ_FIRST(&raid_bdev->suspend_ctx))) {
TAILQ_REMOVE(&raid_bdev->suspend_ctx, ctx, link);
ctx->suspended_cb(raid_bdev, ctx->suspended_cb_ctx);
free(ctx);
}
}
static void
raid_bdev_inc_suspend_num_channels(void *_raid_bdev)
{
struct raid_bdev *raid_bdev = _raid_bdev;
raid_bdev->suspend_num_channels++;
}
static void
raid_bdev_dec_suspend_num_channels(void *_raid_bdev)
{
struct raid_bdev *raid_bdev = _raid_bdev;
if (--raid_bdev->suspend_num_channels == 0) {
raid_bdev_on_suspended(raid_bdev);
}
}
static void
raid_bdev_channel_on_suspended(struct raid_bdev_io_channel *raid_ch)
{
struct spdk_io_channel *ch = spdk_io_channel_from_ctx(raid_ch);
struct raid_bdev *raid_bdev = spdk_io_channel_get_io_device(ch);
spdk_thread_exec_msg(spdk_thread_get_app_thread(), raid_bdev_dec_suspend_num_channels, raid_bdev);
}
static void
raid_bdev_channel_suspend(struct spdk_io_channel_iter *i)
{
struct raid_bdev *raid_bdev = spdk_io_channel_iter_get_ctx(i);
struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
struct raid_bdev_io_channel *raid_ch = spdk_io_channel_get_ctx(ch);
SPDK_DEBUGLOG(bdev_raid, "raid_ch: %p\n", raid_ch);
spdk_thread_exec_msg(spdk_thread_get_app_thread(), raid_bdev_inc_suspend_num_channels, raid_bdev);
raid_ch->is_suspended = true;
if (raid_ch->num_ios == 0) {
raid_bdev_channel_on_suspended(raid_ch);
}
spdk_for_each_channel_continue(i, 0);
}
static void
raid_bdev_suspend_continue(struct spdk_io_channel_iter *i, int status)
{
struct raid_bdev *raid_bdev = spdk_io_channel_iter_get_ctx(i);
raid_bdev_dec_suspend_num_channels(raid_bdev);
}
static int raid_bdev_suspend(struct raid_bdev *raid_bdev, raid_bdev_suspended_cb cb,
void *cb_ctx) __attribute__((unused));
static int
raid_bdev_suspend(struct raid_bdev *raid_bdev, raid_bdev_suspended_cb cb, void *cb_ctx)
{
assert(spdk_get_thread() == spdk_thread_get_app_thread());
pthread_mutex_lock(&raid_bdev->mutex);
raid_bdev->suspend_cnt++;
pthread_mutex_unlock(&raid_bdev->mutex);
if (raid_bdev->suspend_cnt > 1 && raid_bdev->suspend_num_channels == 0) {
if (cb != NULL) {
cb(raid_bdev, cb_ctx);
}
return 0;
}
if (cb != NULL) {
struct raid_bdev_suspend_ctx *ctx;
ctx = malloc(sizeof(*ctx));
if (ctx == NULL) {
return -ENOMEM;
}
ctx->suspended_cb = cb;
ctx->suspended_cb_ctx = cb_ctx;
TAILQ_INSERT_TAIL(&raid_bdev->suspend_ctx, ctx, link);
}
/* decremented in raid_bdev_suspend_continue() - in case there are no IO channels */
raid_bdev_inc_suspend_num_channels(raid_bdev);
spdk_for_each_channel(raid_bdev, raid_bdev_channel_suspend, raid_bdev,
raid_bdev_suspend_continue);
return 0;
}
static void
raid_bdev_channel_resume(struct spdk_io_channel_iter *i)
{
struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
struct raid_bdev_io_channel *raid_ch = spdk_io_channel_get_ctx(ch);
struct raid_bdev_io *raid_io;
SPDK_DEBUGLOG(bdev_raid, "raid_ch: %p\n", raid_ch);
raid_ch->is_suspended = false;
while ((raid_io = TAILQ_FIRST(&raid_ch->suspended_ios))) {
TAILQ_REMOVE(&raid_ch->suspended_ios, raid_io, link);
raid_bdev_submit_request(spdk_io_channel_from_ctx(raid_ch),
spdk_bdev_io_from_ctx(raid_io));
}
spdk_for_each_channel_continue(i, 0);
}
static void raid_bdev_resume(struct raid_bdev *raid_bdev) __attribute__((unused));
static void
raid_bdev_resume(struct raid_bdev *raid_bdev)
{
assert(spdk_get_thread() == spdk_thread_get_app_thread());
assert(raid_bdev->suspend_cnt > 0);
pthread_mutex_lock(&raid_bdev->mutex);
raid_bdev->suspend_cnt--;
pthread_mutex_unlock(&raid_bdev->mutex);
if (raid_bdev->suspend_cnt == 0) {
spdk_for_each_channel(raid_bdev, raid_bdev_channel_resume, raid_bdev, NULL);
}
}
/*
* brief:
* raid_bdev_remove_base_bdev function is called by below layers when base_bdev

View File

@ -96,6 +96,8 @@ struct raid_bdev_io {
/* Private data for the raid module */
void *module_private;
TAILQ_ENTRY(raid_bdev_io) link;
};
/*
@ -151,6 +153,18 @@ struct raid_bdev {
/* Private data for the raid module */
void *module_private;
/* Counter of callers of raid_bdev_suspend() */
uint32_t suspend_cnt;
/* Number of channels remaining to suspend */
uint32_t suspend_num_channels;
/* List of suspend contexts */
TAILQ_HEAD(, raid_bdev_suspend_ctx) suspend_ctx;
/* Device mutex */
pthread_mutex_t mutex;
};
#define RAID_FOR_EACH_BASE_BDEV(r, i) \
@ -169,6 +183,15 @@ struct raid_bdev_io_channel {
/* Private raid module IO channel */
struct spdk_io_channel *module_channel;
/* Number of raid IOs on this channel */
uint32_t num_ios;
/* Is the channel currently suspended */
bool is_suspended;
/* List of suspended IOs */
TAILQ_HEAD(, raid_bdev_io) suspended_ios;
};
/* TAIL head for raid bdev list */

View File

@ -73,6 +73,9 @@ struct raid_io_ranges g_io_ranges[MAX_TEST_IO_RANGE];
uint32_t g_io_range_idx;
uint64_t g_lba_offset;
struct spdk_io_channel g_io_channel;
bool g_bdev_io_defer_completion;
TAILQ_HEAD(, spdk_bdev_io) g_deferred_ios = TAILQ_HEAD_INITIALIZER(g_deferred_ios);
struct spdk_io_channel *g_per_thread_base_bdev_channels;
DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module));
DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
@ -126,9 +129,17 @@ DEFINE_STUB(spdk_bdev_notify_blockcnt_change, int, (struct spdk_bdev *bdev, uint
struct spdk_io_channel *
spdk_bdev_get_io_channel(struct spdk_bdev_desc *desc)
{
g_io_channel.thread = spdk_get_thread();
struct spdk_io_channel *ch;
return &g_io_channel;
if (g_per_thread_base_bdev_channels) {
ch = &g_per_thread_base_bdev_channels[g_ut_thread_id];
} else {
ch = &g_io_channel;
}
ch->thread = spdk_get_thread();
return ch;
}
static void
@ -181,6 +192,8 @@ set_globals(void)
g_json_decode_obj_err = 0;
g_json_decode_obj_create = 0;
g_lba_offset = 0;
g_bdev_io_defer_completion = false;
g_per_thread_base_bdev_channels = NULL;
}
static void
@ -196,6 +209,8 @@ base_bdevs_cleanup(void)
free(bdev);
}
}
free(g_per_thread_base_bdev_channels);
}
static void
@ -225,6 +240,7 @@ reset_globals(void)
}
g_rpc_req = NULL;
g_rpc_req_size = 0;
g_per_thread_base_bdev_channels = NULL;
}
void
@ -257,6 +273,29 @@ set_io_output(struct io_output *output,
output->iotype = iotype;
}
static void
child_io_complete(struct spdk_bdev_io *child_io, spdk_bdev_io_completion_cb cb, void *cb_arg)
{
if (g_bdev_io_defer_completion) {
child_io->internal.cb = cb;
child_io->internal.caller_ctx = cb_arg;
TAILQ_INSERT_TAIL(&g_deferred_ios, child_io, internal.link);
} else {
cb(child_io, g_child_io_status_flag, cb_arg);
}
}
static void
complete_deferred_ios(void)
{
struct spdk_bdev_io *child_io;
while ((child_io = TAILQ_FIRST(&g_deferred_ios))) {
TAILQ_REMOVE(&g_deferred_ios, child_io, internal.link);
child_io->internal.cb(child_io, g_child_io_status_flag, child_io->internal.caller_ctx);
}
}
/* It will cache the split IOs for verification */
int
spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
@ -283,7 +322,7 @@ spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
child_io = calloc(1, sizeof(struct spdk_bdev_io));
SPDK_CU_ASSERT_FATAL(child_io != NULL);
cb(child_io, g_child_io_status_flag, cb_arg);
child_io_complete(child_io, cb, cb_arg);
}
return g_bdev_io_submit_status;
@ -325,7 +364,7 @@ spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
child_io = calloc(1, sizeof(struct spdk_bdev_io));
SPDK_CU_ASSERT_FATAL(child_io != NULL);
cb(child_io, g_child_io_status_flag, cb_arg);
child_io_complete(child_io, cb, cb_arg);
}
return g_bdev_io_submit_status;
@ -350,7 +389,7 @@ spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
child_io = calloc(1, sizeof(struct spdk_bdev_io));
SPDK_CU_ASSERT_FATAL(child_io != NULL);
cb(child_io, g_child_io_status_flag, cb_arg);
child_io_complete(child_io, cb, cb_arg);
}
return g_bdev_io_submit_status;
@ -487,7 +526,7 @@ spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
child_io = calloc(1, sizeof(struct spdk_bdev_io));
SPDK_CU_ASSERT_FATAL(child_io != NULL);
cb(child_io, g_child_io_status_flag, cb_arg);
child_io_complete(child_io, cb, cb_arg);
}
return g_bdev_io_submit_status;
@ -1991,6 +2030,246 @@ test_create_raid_superblock(void)
}
static void
suspend_cb(struct raid_bdev *raid_bdev, void *ctx)
{
*(bool *)ctx = true;
}
static void
test_raid_suspend_resume(void)
{
struct rpc_bdev_raid_create req;
struct rpc_bdev_raid_delete destroy_req;
struct raid_bdev *pbdev;
struct spdk_io_channel *ch;
struct raid_bdev_io_channel *raid_ch;
struct spdk_bdev_io *bdev_io;
bool suspend_cb_called, suspend_cb_called2;
int rc;
set_globals();
CU_ASSERT(raid_bdev_init() == 0);
verify_raid_bdev_present("raid1", false);
create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
rpc_bdev_raid_create(NULL, NULL);
CU_ASSERT(g_rpc_err == 0);
verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
if (strcmp(pbdev->bdev.name, "raid1") == 0) {
break;
}
}
CU_ASSERT(pbdev != NULL);
/* suspend/resume with no io channels */
suspend_cb_called = false;
rc = raid_bdev_suspend(pbdev, suspend_cb, &suspend_cb_called);
SPDK_CU_ASSERT_FATAL(rc == 0);
poll_threads();
CU_ASSERT(suspend_cb_called == true);
raid_bdev_resume(pbdev);
poll_threads();
/* suspend/resume with one idle io channel */
ch = spdk_get_io_channel(pbdev);
SPDK_CU_ASSERT_FATAL(ch != NULL);
raid_ch = spdk_io_channel_get_ctx(ch);
suspend_cb_called = false;
rc = raid_bdev_suspend(pbdev, suspend_cb, &suspend_cb_called);
SPDK_CU_ASSERT_FATAL(rc == 0);
poll_threads();
CU_ASSERT(suspend_cb_called == true);
CU_ASSERT(raid_ch->is_suspended == true);
raid_bdev_resume(pbdev);
poll_threads();
CU_ASSERT(raid_ch->is_suspended == false);
/* suspend/resume multiple */
suspend_cb_called = false;
suspend_cb_called2 = false;
rc = raid_bdev_suspend(pbdev, suspend_cb, &suspend_cb_called);
SPDK_CU_ASSERT_FATAL(rc == 0);
rc = raid_bdev_suspend(pbdev, suspend_cb, &suspend_cb_called2);
SPDK_CU_ASSERT_FATAL(rc == 0);
poll_threads();
CU_ASSERT(suspend_cb_called == true);
CU_ASSERT(suspend_cb_called2 == true);
CU_ASSERT(raid_ch->is_suspended == true);
suspend_cb_called = false;
rc = raid_bdev_suspend(pbdev, suspend_cb, &suspend_cb_called);
SPDK_CU_ASSERT_FATAL(rc == 0);
CU_ASSERT(suspend_cb_called == true);
raid_bdev_resume(pbdev);
poll_threads();
CU_ASSERT(raid_ch->is_suspended == true);
raid_bdev_resume(pbdev);
poll_threads();
CU_ASSERT(raid_ch->is_suspended == true);
raid_bdev_resume(pbdev);
poll_threads();
CU_ASSERT(raid_ch->is_suspended == false);
/* suspend/resume with io before and after suspend */
bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
bdev_io_initialize(bdev_io, ch, &pbdev->bdev, 0, 1, SPDK_BDEV_IO_TYPE_READ);
memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
g_io_output_index = 0;
g_bdev_io_defer_completion = true;
raid_bdev_submit_request(ch, bdev_io);
CU_ASSERT(raid_ch->num_ios == 1);
suspend_cb_called = false;
rc = raid_bdev_suspend(pbdev, suspend_cb, &suspend_cb_called);
SPDK_CU_ASSERT_FATAL(rc == 0);
poll_threads();
CU_ASSERT(raid_ch->is_suspended == true);
CU_ASSERT(suspend_cb_called == false);
complete_deferred_ios();
verify_io(bdev_io, req.base_bdevs.num_base_bdevs, raid_ch, pbdev, g_child_io_status_flag);
poll_threads();
CU_ASSERT(suspend_cb_called == true);
g_io_output_index = 0;
raid_bdev_submit_request(ch, bdev_io);
CU_ASSERT(raid_ch->num_ios == 0);
CU_ASSERT(TAILQ_FIRST(&raid_ch->suspended_ios) == (struct raid_bdev_io *)bdev_io->driver_ctx);
raid_bdev_resume(pbdev);
poll_threads();
CU_ASSERT(raid_ch->is_suspended == false);
verify_io(bdev_io, req.base_bdevs.num_base_bdevs, raid_ch, pbdev, g_child_io_status_flag);
bdev_io_cleanup(bdev_io);
spdk_put_io_channel(ch);
free_test_req(&req);
create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
rpc_bdev_raid_delete(NULL, NULL);
CU_ASSERT(g_rpc_err == 0);
verify_raid_bdev_present("raid1", false);
raid_bdev_exit();
base_bdevs_cleanup();
reset_globals();
}
static void
test_raid_suspend_resume_create_ch(void)
{
struct rpc_bdev_raid_create req;
struct rpc_bdev_raid_delete destroy_req;
struct raid_bdev *pbdev;
struct spdk_io_channel *ch1, *ch2;
struct raid_bdev_io_channel *raid_ch1, *raid_ch2;
bool suspend_cb_called;
int rc;
free_threads();
allocate_threads(3);
set_thread(0);
set_globals();
CU_ASSERT(raid_bdev_init() == 0);
g_per_thread_base_bdev_channels = calloc(3, sizeof(struct spdk_io_channel));
verify_raid_bdev_present("raid1", false);
create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
rpc_bdev_raid_create(NULL, NULL);
CU_ASSERT(g_rpc_err == 0);
verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
if (strcmp(pbdev->bdev.name, "raid1") == 0) {
break;
}
}
CU_ASSERT(pbdev != NULL);
set_thread(1);
ch1 = spdk_get_io_channel(pbdev);
SPDK_CU_ASSERT_FATAL(ch1 != NULL);
raid_ch1 = spdk_io_channel_get_ctx(ch1);
/* create a new io channel during suspend */
set_thread(0);
suspend_cb_called = false;
rc = raid_bdev_suspend(pbdev, suspend_cb, &suspend_cb_called);
SPDK_CU_ASSERT_FATAL(rc == 0);
poll_thread(1);
CU_ASSERT(raid_ch1->is_suspended == true);
CU_ASSERT(suspend_cb_called == false);
set_thread(2);
ch2 = spdk_get_io_channel(pbdev);
SPDK_CU_ASSERT_FATAL(ch2 != NULL);
raid_ch2 = spdk_io_channel_get_ctx(ch2);
poll_threads();
CU_ASSERT(suspend_cb_called == true);
CU_ASSERT(raid_ch1->is_suspended == true);
CU_ASSERT(raid_ch2->is_suspended == true);
set_thread(0);
raid_bdev_resume(pbdev);
poll_threads();
CU_ASSERT(raid_ch1->is_suspended == false);
CU_ASSERT(raid_ch2->is_suspended == false);
set_thread(2);
spdk_put_io_channel(ch2);
poll_threads();
/* create a new io channel during resume */
set_thread(0);
suspend_cb_called = false;
rc = raid_bdev_suspend(pbdev, suspend_cb, &suspend_cb_called);
SPDK_CU_ASSERT_FATAL(rc == 0);
poll_threads();
CU_ASSERT(suspend_cb_called == true);
CU_ASSERT(raid_ch1->is_suspended == true);
raid_bdev_resume(pbdev);
set_thread(2);
ch2 = spdk_get_io_channel(pbdev);
SPDK_CU_ASSERT_FATAL(ch2 != NULL);
raid_ch2 = spdk_io_channel_get_ctx(ch2);
CU_ASSERT(raid_ch1->is_suspended == true);
CU_ASSERT(raid_ch2->is_suspended == false);
poll_threads();
CU_ASSERT(raid_ch1->is_suspended == false);
CU_ASSERT(raid_ch2->is_suspended == false);
set_thread(2);
spdk_put_io_channel(ch2);
poll_threads();
set_thread(1);
spdk_put_io_channel(ch1);
poll_threads();
set_thread(0);
free_test_req(&req);
create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
rpc_bdev_raid_delete(NULL, NULL);
CU_ASSERT(g_rpc_err == 0);
verify_raid_bdev_present("raid1", false);
raid_bdev_exit();
base_bdevs_cleanup();
reset_globals();
free_threads();
allocate_threads(1);
set_thread(0);
}
int
main(int argc, char **argv)
{
@ -2019,6 +2298,8 @@ main(int argc, char **argv)
CU_ADD_TEST(suite, test_raid_json_dump_info);
CU_ADD_TEST(suite, test_context_size);
CU_ADD_TEST(suite, test_raid_level_conversions);
CU_ADD_TEST(suite, test_raid_suspend_resume);
CU_ADD_TEST(suite, test_raid_suspend_resume_create_ch);
allocate_threads(1);
set_thread(0);