bdev/raid: Embed struct raid_bdev_ctxt into struct raid_bdev

Locating spdk_bdev structure to the beginning of raid_bdev structure
will simplify the hierarchy and match other bdev modules.

Change-Id: I1bfbf773bc96a4f144e6bff772ade05bb42762e9
Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-on: https://review.gerrithub.io/420818
Reviewed-by: Dariusz Stojaczyk <dariuszx.stojaczyk@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Kunal Sablok <kunal.sablok@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
This commit is contained in:
Shuhei Matsumoto 2018-07-31 09:14:18 +09:00 committed by Ben Walker
parent 6e320a7629
commit 056980c10c
4 changed files with 130 additions and 180 deletions

View File

@ -90,19 +90,19 @@ raid_bdev_create_cb(void *io_device, void *ctx_buf)
assert(raid_bdev->state == RAID_BDEV_STATE_ONLINE);
/*
* Store raid_bdev_ctxt in each channel which is used to get the read only
* Store raid_bdev in each channel which is used to get the read only
* raid bdev specific information during io split logic like base bdev
* descriptors, strip size etc
*/
ch->raid_bdev_ctxt = SPDK_CONTAINEROF(raid_bdev, struct raid_bdev_ctxt, raid_bdev);
ch->raid_bdev = raid_bdev;
ch->base_bdevs_io_channel = calloc(ch->raid_bdev_ctxt->raid_bdev.num_base_bdevs,
ch->base_bdevs_io_channel = calloc(raid_bdev->num_base_bdevs,
sizeof(struct spdk_io_channel *));
if (!ch->base_bdevs_io_channel) {
SPDK_ERRLOG("Unable to allocate base bdevs io channel\n");
return -1;
}
for (uint32_t i = 0; i < ch->raid_bdev_ctxt->raid_bdev.num_base_bdevs; i++) {
for (uint32_t i = 0; i < raid_bdev->num_base_bdevs; i++) {
/*
* Get the spdk_io_channel for all the base bdevs. This is used during
* split logic to send the respective child bdev ios to respective base
@ -150,7 +150,7 @@ raid_bdev_destroy_cb(void *io_device, void *ctx_buf)
spdk_put_io_channel(ch->base_bdevs_io_channel[i]);
ch->base_bdevs_io_channel[i] = NULL;
}
ch->raid_bdev_ctxt = NULL;
ch->raid_bdev = NULL;
free(ch->base_bdevs_io_channel);
ch->base_bdevs_io_channel = NULL;
}
@ -160,18 +160,16 @@ raid_bdev_destroy_cb(void *io_device, void *ctx_buf)
* raid_bdev_cleanup is used to cleanup and free raid_bdev related data
* structures.
* params:
* raid_bdev_ctxt - pointer to raid_bdev_ctxt
* raid_bdev - pointer to raid_bdev
* returns:
* none
*/
static void
raid_bdev_cleanup(struct raid_bdev_ctxt *raid_bdev_ctxt)
raid_bdev_cleanup(struct raid_bdev *raid_bdev)
{
struct raid_bdev *raid_bdev = &raid_bdev_ctxt->raid_bdev;
SPDK_DEBUGLOG(SPDK_LOG_BDEV_RAID, "raid_bdev_cleanup, %p name %s, state %u, raid_bdev_config %p\n",
raid_bdev_ctxt,
raid_bdev_ctxt->bdev.name, raid_bdev->state, raid_bdev->raid_bdev_config);
raid_bdev,
raid_bdev->bdev.name, raid_bdev->state, raid_bdev->raid_bdev_config);
if (raid_bdev->state == RAID_BDEV_STATE_CONFIGURING) {
TAILQ_REMOVE(&g_spdk_raid_bdev_configuring_list, raid_bdev, link_specific_list);
} else if (raid_bdev->state == RAID_BDEV_STATE_OFFLINE) {
@ -180,23 +178,23 @@ raid_bdev_cleanup(struct raid_bdev_ctxt *raid_bdev_ctxt)
assert(0);
}
TAILQ_REMOVE(&g_spdk_raid_bdev_list, raid_bdev, link_global_list);
assert(raid_bdev_ctxt->bdev.name);
free(raid_bdev_ctxt->bdev.name);
raid_bdev_ctxt->bdev.name = NULL;
assert(raid_bdev->bdev.name);
free(raid_bdev->bdev.name);
raid_bdev->bdev.name = NULL;
assert(raid_bdev->base_bdev_info);
free(raid_bdev->base_bdev_info);
raid_bdev->base_bdev_info = NULL;
if (raid_bdev->raid_bdev_config) {
raid_bdev->raid_bdev_config->raid_bdev_ctxt = NULL;
raid_bdev->raid_bdev_config->raid_bdev = NULL;
}
free(raid_bdev_ctxt);
free(raid_bdev);
}
/*
* brief:
* raid_bdev_destruct is the destruct function table pointer for raid bdev
* params:
* ctxt - pointer to raid_bdev_ctxt
* ctxt - pointer to raid_bdev
* returns:
* 0 - success
* non zero - failure
@ -204,8 +202,7 @@ raid_bdev_cleanup(struct raid_bdev_ctxt *raid_bdev_ctxt)
static int
raid_bdev_destruct(void *ctxt)
{
struct raid_bdev_ctxt *raid_bdev_ctxt = ctxt;
struct raid_bdev *raid_bdev = &raid_bdev_ctxt->raid_bdev;
struct raid_bdev *raid_bdev = ctxt;
SPDK_DEBUGLOG(SPDK_LOG_BDEV_RAID, "raid_bdev_destruct\n");
@ -229,7 +226,7 @@ raid_bdev_destruct(void *ctxt)
if (raid_bdev->num_base_bdevs_discovered == 0) {
/* Free raid_bdev when there no base bdevs left */
SPDK_DEBUGLOG(SPDK_LOG_BDEV_RAID, "raid bdev base bdevs is 0, going to free all in destruct\n");
raid_bdev_cleanup(raid_bdev_ctxt);
raid_bdev_cleanup(raid_bdev);
}
return 0;
@ -301,7 +298,7 @@ raid_bdev_send_passthru(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io
int ret;
raid_bdev_io_channel = spdk_io_channel_get_ctx(ch);
raid_bdev = &raid_bdev_io_channel->raid_bdev_ctxt->raid_bdev;
raid_bdev = raid_bdev_io_channel->raid_bdev;
raid_bdev_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
raid_bdev_io->status = SPDK_BDEV_IO_STATUS_SUCCESS;
@ -368,7 +365,7 @@ raid_bdev_submit_children(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_
{
struct raid_bdev_io_channel *raid_bdev_io_channel = spdk_io_channel_get_ctx(ch);
struct raid_bdev_io *raid_bdev_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
struct raid_bdev *raid_bdev = &raid_bdev_io_channel->raid_bdev_ctxt->raid_bdev;
struct raid_bdev *raid_bdev = raid_bdev_io_channel->raid_bdev;
uint64_t pd_strip;
uint32_t offset_in_strip;
uint64_t pd_lba;
@ -565,7 +562,7 @@ raid_bdev_waitq_io_process(void *ctx)
* crunch then break the loop and don't try to process other queued IOs.
*/
raid_bdev_io_channel = spdk_io_channel_get_ctx(raid_bdev_io->ch);
raid_bdev = &raid_bdev_io_channel->raid_bdev_ctxt->raid_bdev;
raid_bdev = raid_bdev_io_channel->raid_bdev;
if (raid_bdev->num_base_bdevs > 1) {
start_strip = bdev_io->u.bdev.offset_blocks >> raid_bdev->strip_size_shift;
end_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) >>
@ -611,7 +608,7 @@ _raid_bdev_submit_rw_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bd
* IO parameters used during io split and io completion
*/
raid_bdev_io_channel = spdk_io_channel_get_ctx(ch);
raid_bdev = &raid_bdev_io_channel->raid_bdev_ctxt->raid_bdev;
raid_bdev = raid_bdev_io_channel->raid_bdev;
raid_bdev_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
if (raid_bdev->num_base_bdevs > 1) {
start_strip = bdev_io->u.bdev.offset_blocks >> raid_bdev->strip_size_shift;
@ -706,23 +703,23 @@ raid_bdev_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type)
* raid_bdev_get_io_channel is the get_io_channel function table pointer for
* raid bdev. This is used to return the io channel for this raid bdev
* params:
* ctxt - pointer to raid_bdev_ctxt
* ctxt - pointer to raid_bdev
* returns:
* pointer to io channel for raid bdev
*/
static struct spdk_io_channel *
raid_bdev_get_io_channel(void *ctxt)
{
struct raid_bdev_ctxt *raid_bdev_ctxt = ctxt;
struct raid_bdev *raid_bdev = ctxt;
return spdk_get_io_channel(&raid_bdev_ctxt->raid_bdev);
return spdk_get_io_channel(raid_bdev);
}
/*
* brief:
* raid_bdev_dump_info_json is the function table pointer for raid bdev
* params:
* ctx - pointer to raid_bdev_ctxt
* ctx - pointer to raid_bdev
* w - pointer to json context
* returns:
* 0 - success
@ -731,12 +728,10 @@ raid_bdev_get_io_channel(void *ctxt)
static int
raid_bdev_dump_info_json(void *ctx, struct spdk_json_write_ctx *w)
{
struct raid_bdev_ctxt *raid_bdev_ctxt = ctx;
struct raid_bdev *raid_bdev;
struct raid_bdev *raid_bdev = ctx;
SPDK_DEBUGLOG(SPDK_LOG_BDEV_RAID, "raid_bdev_dump_config_json\n");
assert(raid_bdev_ctxt != NULL);
raid_bdev = &raid_bdev_ctxt->raid_bdev;
assert(raid_bdev != NULL);
/* Dump the raid bdev configuration related information */
spdk_json_write_name(w, "raid");
@ -1171,7 +1166,6 @@ raid_bdev_remove_base_bdev(void *ctx)
struct spdk_bdev *base_bdev = ctx;
struct raid_bdev *raid_bdev;
struct raid_bdev *next_raid_bdev;
struct raid_bdev_ctxt *raid_bdev_ctxt;
uint16_t i;
bool found = false;
@ -1198,7 +1192,6 @@ raid_bdev_remove_base_bdev(void *ctx)
assert(raid_bdev != NULL);
assert(raid_bdev->base_bdev_info[i].base_bdev);
assert(raid_bdev->base_bdev_info[i].base_bdev_desc);
raid_bdev_ctxt = SPDK_CONTAINEROF(raid_bdev, struct raid_bdev_ctxt, raid_bdev);
raid_bdev->base_bdev_info[i].base_bdev_remove_scheduled = true;
if (raid_bdev->destruct_called == true && raid_bdev->base_bdev_info[i].base_bdev != NULL) {
@ -1211,7 +1204,7 @@ raid_bdev_remove_base_bdev(void *ctx)
raid_bdev->num_base_bdevs_discovered--;
if (raid_bdev->num_base_bdevs_discovered == 0) {
/* Since there is no base bdev for this raid, so free the raid device */
raid_bdev_cleanup(raid_bdev_ctxt);
raid_bdev_cleanup(raid_bdev);
return;
}
}
@ -1228,8 +1221,8 @@ raid_bdev_remove_base_bdev(void *ctx)
assert(raid_bdev->num_base_bdevs_discovered);
TAILQ_INSERT_TAIL(&g_spdk_raid_bdev_offline_list, raid_bdev, link_specific_list);
SPDK_DEBUGLOG(SPDK_LOG_BDEV_RAID, "raid bdev state chaning from online to offline\n");
spdk_io_device_unregister(&raid_bdev_ctxt->raid_bdev, NULL);
spdk_bdev_unregister(&raid_bdev_ctxt->bdev, NULL, NULL);
spdk_io_device_unregister(raid_bdev, NULL);
spdk_bdev_unregister(&raid_bdev->bdev, NULL, NULL);
}
}
@ -1248,7 +1241,6 @@ int
raid_bdev_add_base_device(struct spdk_bdev *bdev)
{
struct raid_bdev_config *raid_bdev_config = NULL;
struct raid_bdev_ctxt *raid_bdev_ctxt;
struct raid_bdev *raid_bdev;
struct spdk_bdev_desc *desc;
struct spdk_bdev *raid_bdev_gen;
@ -1278,36 +1270,35 @@ raid_bdev_add_base_device(struct spdk_bdev *bdev)
return -1;
}
SPDK_DEBUGLOG(SPDK_LOG_BDEV_RAID, "bdev %s is claimed\n", bdev->name);
SPDK_DEBUGLOG(SPDK_LOG_BDEV_RAID, "raid_bdev_config->raid_bdev_ctxt %p\n",
raid_bdev_config->raid_bdev_ctxt);
SPDK_DEBUGLOG(SPDK_LOG_BDEV_RAID, "raid_bdev_config->raid_bdev %p\n",
raid_bdev_config->raid_bdev);
if (!raid_bdev_config->raid_bdev_ctxt) {
if (!raid_bdev_config->raid_bdev) {
/* Allocate raid_bdev entity if it is not already allocated */
raid_bdev_ctxt = calloc(1, sizeof(*raid_bdev_ctxt));
if (!raid_bdev_ctxt) {
raid_bdev = calloc(1, sizeof(*raid_bdev));
if (!raid_bdev) {
SPDK_ERRLOG("Unable to allocate memory for raid bdev for bdev '%s'\n", bdev->name);
spdk_bdev_module_release_bdev(bdev);
spdk_bdev_close(desc);
return -1;
}
raid_bdev = &raid_bdev_ctxt->raid_bdev;
raid_bdev->num_base_bdevs = raid_bdev_config->num_base_bdevs;
raid_bdev->base_bdev_info = calloc(raid_bdev->num_base_bdevs, sizeof(struct raid_base_bdev_info));
if (!raid_bdev->base_bdev_info) {
SPDK_ERRLOG("Unable able to allocate base bdev info\n");
free(raid_bdev_ctxt);
free(raid_bdev);
spdk_bdev_module_release_bdev(bdev);
spdk_bdev_close(desc);
return -1;
}
raid_bdev_config->raid_bdev_ctxt = raid_bdev_ctxt;
raid_bdev_config->raid_bdev = raid_bdev;
raid_bdev->strip_size = raid_bdev_config->strip_size;
raid_bdev->state = RAID_BDEV_STATE_CONFIGURING;
raid_bdev->raid_bdev_config = raid_bdev_config;
TAILQ_INSERT_TAIL(&g_spdk_raid_bdev_configuring_list, raid_bdev, link_specific_list);
TAILQ_INSERT_TAIL(&g_spdk_raid_bdev_list, raid_bdev, link_global_list);
} else {
raid_bdev = &raid_bdev_config->raid_bdev_ctxt->raid_bdev;
raid_bdev = raid_bdev_config->raid_bdev;
}
assert(raid_bdev->state != RAID_BDEV_STATE_ONLINE);
@ -1345,8 +1336,7 @@ raid_bdev_add_base_device(struct spdk_bdev *bdev)
return -1;
}
}
raid_bdev_ctxt = SPDK_CONTAINEROF(raid_bdev, struct raid_bdev_ctxt, raid_bdev);
raid_bdev_gen = &raid_bdev_ctxt->bdev;
raid_bdev_gen = &raid_bdev->bdev;
raid_bdev_gen->name = strdup(raid_bdev_config->name);
if (!raid_bdev_gen->name) {
SPDK_ERRLOG("Unable to allocate name for raid\n");
@ -1359,7 +1349,7 @@ raid_bdev_add_base_device(struct spdk_bdev *bdev)
raid_bdev_gen->write_cache = 0;
raid_bdev_gen->blocklen = blocklen;
raid_bdev_gen->optimal_io_boundary = 0;
raid_bdev_gen->ctxt = raid_bdev_ctxt;
raid_bdev_gen->ctxt = raid_bdev;
raid_bdev_gen->fn_table = &g_raid_bdev_fn_table;
raid_bdev_gen->module = &g_raid_if;
raid_bdev->strip_size = (raid_bdev->strip_size * 1024) / blocklen;

View File

@ -78,10 +78,14 @@ struct raid_base_bdev_info {
};
/*
* raid_bdev contains the information related to any raid bdev either configured or
* in configuring list
* raid_bdev is the single entity structure which contains SPDK block device
* and the information related to any raid bdev either configured or
* in configuring list. io device is created on this.
*/
struct raid_bdev {
/* raid bdev device, this will get registered in bdev layer */
struct spdk_bdev bdev;
/* link of raid bdev to link it to configured, configuring or offline list */
TAILQ_ENTRY(raid_bdev) link_specific_list;
@ -119,18 +123,6 @@ struct raid_bdev {
bool destruct_called;
};
/*
* raid_bdev_ctxt is the single entity structure for entire bdev which is
* allocated for any raid bdev
*/
struct raid_bdev_ctxt {
/* raid bdev device, this will get registered in bdev layer */
struct spdk_bdev bdev;
/* raid_bdev object, io device will be created on this */
struct raid_bdev raid_bdev;
};
/*
* raid_bdev_io is the context part of bdev_io. It contains the information
* related to bdev_io for a pooled bdev
@ -173,7 +165,7 @@ struct raid_bdev_config {
struct raid_base_bdev_config *base_bdev;
/* Points to already created raid bdev */
struct raid_bdev_ctxt *raid_bdev_ctxt;
struct raid_bdev *raid_bdev;
char *name;
@ -210,7 +202,7 @@ struct raid_bdev_io_channel {
struct spdk_io_channel **base_bdevs_io_channel;
/* raid bdev context pointer */
struct raid_bdev_ctxt *raid_bdev_ctxt;
struct raid_bdev *raid_bdev;
};
/* TAIL heads for various raid bdev lists */

View File

@ -53,19 +53,17 @@ SPDK_LOG_REGISTER_COMPONENT("raidrpc", SPDK_LOG_RAID_RPC)
* name - raid bdev name
* returns:
* NULL - raid bdev not present
* non NULL - raid bdev present, returns raid_bdev_ctxt
* non NULL - raid bdev present, returns raid_bdev
*/
static struct raid_bdev_ctxt *
static struct raid_bdev *
check_raid_bdev_present(char *raid_bdev_name)
{
struct raid_bdev *raid_bdev;
struct raid_bdev_ctxt *raid_bdev_ctxt;
TAILQ_FOREACH(raid_bdev, &g_spdk_raid_bdev_list, link_global_list) {
raid_bdev_ctxt = SPDK_CONTAINEROF(raid_bdev, struct raid_bdev_ctxt, raid_bdev);
if (strcmp(raid_bdev_ctxt->bdev.name, raid_bdev_name) == 0) {
if (strcmp(raid_bdev->bdev.name, raid_bdev_name) == 0) {
/* raid bdev found */
return raid_bdev_ctxt;
return raid_bdev;
}
}
@ -123,7 +121,6 @@ spdk_rpc_get_raid_bdevs(struct spdk_jsonrpc_request *request, const struct spdk_
struct rpc_get_raid_bdevs req = {};
struct spdk_json_write_ctx *w;
struct raid_bdev *raid_bdev;
struct raid_bdev_ctxt *raid_bdev_ctxt;
if (spdk_json_decode_object(params, rpc_get_raid_bdevs_decoders,
SPDK_COUNTOF(rpc_get_raid_bdevs_decoders),
@ -153,23 +150,19 @@ spdk_rpc_get_raid_bdevs(struct spdk_jsonrpc_request *request, const struct spdk_
/* Get raid bdev list based on the category requested */
if (strcmp(req.category, "all") == 0) {
TAILQ_FOREACH(raid_bdev, &g_spdk_raid_bdev_list, link_global_list) {
raid_bdev_ctxt = SPDK_CONTAINEROF(raid_bdev, struct raid_bdev_ctxt, raid_bdev);
spdk_json_write_string(w, raid_bdev_ctxt->bdev.name);
spdk_json_write_string(w, raid_bdev->bdev.name);
}
} else if (strcmp(req.category, "online") == 0) {
TAILQ_FOREACH(raid_bdev, &g_spdk_raid_bdev_configured_list, link_specific_list) {
raid_bdev_ctxt = SPDK_CONTAINEROF(raid_bdev, struct raid_bdev_ctxt, raid_bdev);
spdk_json_write_string(w, raid_bdev_ctxt->bdev.name);
spdk_json_write_string(w, raid_bdev->bdev.name);
}
} else if (strcmp(req.category, "configuring") == 0) {
TAILQ_FOREACH(raid_bdev, &g_spdk_raid_bdev_configuring_list, link_specific_list) {
raid_bdev_ctxt = SPDK_CONTAINEROF(raid_bdev, struct raid_bdev_ctxt, raid_bdev);
spdk_json_write_string(w, raid_bdev_ctxt->bdev.name);
spdk_json_write_string(w, raid_bdev->bdev.name);
}
} else {
TAILQ_FOREACH(raid_bdev, &g_spdk_raid_bdev_offline_list, link_specific_list) {
raid_bdev_ctxt = SPDK_CONTAINEROF(raid_bdev, struct raid_bdev_ctxt, raid_bdev);
spdk_json_write_string(w, raid_bdev_ctxt->bdev.name);
spdk_json_write_string(w, raid_bdev->bdev.name);
}
}
spdk_json_write_array_end(w);
@ -253,17 +246,16 @@ static const struct spdk_json_object_decoder rpc_construct_raid_bdev_decoders[]
* raid_bdev_config - pointer to raid_bdev_config structure
* returns:
* NULL - raid not present
* non NULL - raid present, returns raid_bdev_ctxt
* non NULL - raid present, returns raid_bdev
*/
static void
check_and_remove_raid_bdev(struct raid_bdev_config *raid_bdev_config)
{
struct raid_bdev *raid_bdev;
struct raid_bdev_ctxt *raid_bdev_ctxt;
/* Get the raid structured allocated if exists */
raid_bdev_ctxt = raid_bdev_config->raid_bdev_ctxt;
if (raid_bdev_ctxt == NULL) {
raid_bdev = raid_bdev_config->raid_bdev;
if (raid_bdev == NULL) {
return;
}
@ -271,8 +263,7 @@ check_and_remove_raid_bdev(struct raid_bdev_config *raid_bdev_config)
* raid should be in configuring state as this function is used to cleanup
* the raid during unsuccessful construction of raid
*/
assert(raid_bdev_ctxt->raid_bdev.state == RAID_BDEV_STATE_CONFIGURING);
raid_bdev = &raid_bdev_ctxt->raid_bdev;
assert(raid_bdev->state == RAID_BDEV_STATE_CONFIGURING);
for (uint32_t i = 0; i < raid_bdev->num_base_bdevs; i++) {
assert(raid_bdev->base_bdev_info != NULL);
if (raid_bdev->base_bdev_info[i].base_bdev) {
@ -290,8 +281,8 @@ check_and_remove_raid_bdev(struct raid_bdev_config *raid_bdev_config)
TAILQ_REMOVE(&g_spdk_raid_bdev_configuring_list, raid_bdev, link_specific_list);
TAILQ_REMOVE(&g_spdk_raid_bdev_list, raid_bdev, link_global_list);
free(raid_bdev->base_bdev_info);
free(raid_bdev_ctxt);
raid_bdev_config->raid_bdev_ctxt = NULL;
free(raid_bdev);
raid_bdev_config->raid_bdev = NULL;
}
/*
@ -310,8 +301,8 @@ spdk_rpc_construct_raid_bdev(struct spdk_jsonrpc_request *request,
{
struct rpc_construct_raid_bdev req = {};
struct spdk_json_write_ctx *w;
struct raid_bdev_ctxt *raid_bdev_ctxt;
struct raid_bdev_config *raid_bdev_config;
struct raid_bdev *raid_bdev;
struct spdk_bdev *base_bdev;
int rc;
@ -324,8 +315,8 @@ spdk_rpc_construct_raid_bdev(struct spdk_jsonrpc_request *request,
}
/* Fail the command if raid bdev is already present */
raid_bdev_ctxt = check_raid_bdev_present(req.name);
if (raid_bdev_ctxt != NULL) {
raid_bdev = check_raid_bdev_present(req.name);
if (raid_bdev != NULL) {
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS,
"raid bdev already present");
free_rpc_construct_raid_bdev(&req);
@ -446,7 +437,7 @@ raid_bdev_config_destroy_check_raid_bdev_exists(void *arg)
struct raid_bdev_config *raid_cfg = arg;
assert(raid_cfg != NULL);
if (raid_cfg->raid_bdev_ctxt != NULL) {
if (raid_cfg->raid_bdev != NULL) {
/* If raid bdev still exists, schedule event and come back later */
spdk_thread_send_msg(spdk_get_thread(), raid_bdev_config_destroy_check_raid_bdev_exists, raid_cfg);
return;
@ -468,7 +459,7 @@ static void
raid_bdev_config_destroy(struct raid_bdev_config *raid_cfg)
{
assert(raid_cfg != NULL);
if (raid_cfg->raid_bdev_ctxt != NULL) {
if (raid_cfg->raid_bdev != NULL) {
/*
* If raid bdev exists for this config, wait for raid bdev to get
* destroyed and come back later

View File

@ -761,14 +761,12 @@ verify_raid_config_present(const char *name, bool presence)
static void
verify_raid_bdev_present(const char *name, bool presence)
{
struct raid_bdev_ctxt *pbdev_ctxt = NULL;
struct raid_bdev *pbdev;
bool pbdev_found;
pbdev_found = false;
TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_list, link_global_list) {
pbdev_ctxt = SPDK_CONTAINEROF(pbdev, struct raid_bdev_ctxt, raid_bdev);
if (strcmp(pbdev_ctxt->bdev.name, name) == 0) {
if (strcmp(pbdev->bdev.name, name) == 0) {
pbdev_found = true;
break;
}
@ -791,7 +789,7 @@ verify_raid_config(struct rpc_construct_raid_bdev *r, bool presence)
if (presence == false) {
break;
}
CU_ASSERT(raid_cfg->raid_bdev_ctxt != NULL);
CU_ASSERT(raid_cfg->raid_bdev != NULL);
CU_ASSERT(raid_cfg->strip_size == r->strip_size);
CU_ASSERT(raid_cfg->num_base_bdevs == r->base_bdevs.num_base_bdevs);
CU_ASSERT(raid_cfg->raid_level == r->raid_level);
@ -815,7 +813,6 @@ verify_raid_config(struct rpc_construct_raid_bdev *r, bool presence)
static void
verify_raid_bdev(struct rpc_construct_raid_bdev *r, bool presence, uint32_t raid_state)
{
struct raid_bdev_ctxt *pbdev_ctxt = NULL;
struct raid_bdev *pbdev;
uint32_t i;
struct spdk_bdev *bdev = NULL;
@ -824,13 +821,12 @@ verify_raid_bdev(struct rpc_construct_raid_bdev *r, bool presence, uint32_t raid
pbdev_found = false;
TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_list, link_global_list) {
pbdev_ctxt = SPDK_CONTAINEROF(pbdev, struct raid_bdev_ctxt, raid_bdev);
if (strcmp(pbdev_ctxt->bdev.name, r->name) == 0) {
if (strcmp(pbdev->bdev.name, r->name) == 0) {
pbdev_found = true;
if (presence == false) {
break;
}
CU_ASSERT(pbdev->raid_bdev_config->raid_bdev_ctxt == pbdev_ctxt);
CU_ASSERT(pbdev->raid_bdev_config->raid_bdev == pbdev);
CU_ASSERT(pbdev->base_bdev_info != NULL);
CU_ASSERT(pbdev->strip_size == ((r->strip_size * 1024) / g_block_len));
CU_ASSERT(pbdev->strip_size_shift == spdk_u32log2(((r->strip_size * 1024) / g_block_len)));
@ -854,14 +850,14 @@ verify_raid_bdev(struct rpc_construct_raid_bdev *r, bool presence, uint32_t raid
}
}
CU_ASSERT((((min_blockcnt / (r->strip_size * 1024 / g_block_len)) * (r->strip_size * 1024 /
g_block_len)) * r->base_bdevs.num_base_bdevs) == pbdev_ctxt->bdev.blockcnt);
CU_ASSERT(strcmp(pbdev_ctxt->bdev.product_name, "Pooled Device") == 0);
CU_ASSERT(pbdev_ctxt->bdev.write_cache == 0);
CU_ASSERT(pbdev_ctxt->bdev.blocklen == g_block_len);
CU_ASSERT(pbdev_ctxt->bdev.optimal_io_boundary == 0);
CU_ASSERT(pbdev_ctxt->bdev.ctxt == pbdev_ctxt);
CU_ASSERT(pbdev_ctxt->bdev.fn_table == &g_raid_bdev_fn_table);
CU_ASSERT(pbdev_ctxt->bdev.module == &g_raid_if);
g_block_len)) * r->base_bdevs.num_base_bdevs) == pbdev->bdev.blockcnt);
CU_ASSERT(strcmp(pbdev->bdev.product_name, "Pooled Device") == 0);
CU_ASSERT(pbdev->bdev.write_cache == 0);
CU_ASSERT(pbdev->bdev.blocklen == g_block_len);
CU_ASSERT(pbdev->bdev.optimal_io_boundary == 0);
CU_ASSERT(pbdev->bdev.ctxt == pbdev);
CU_ASSERT(pbdev->bdev.fn_table == &g_raid_bdev_fn_table);
CU_ASSERT(pbdev->bdev.module == &g_raid_if);
break;
}
}
@ -873,24 +869,21 @@ verify_raid_bdev(struct rpc_construct_raid_bdev *r, bool presence, uint32_t raid
pbdev_found = false;
if (raid_state == RAID_BDEV_STATE_ONLINE) {
TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_configured_list, link_specific_list) {
pbdev_ctxt = SPDK_CONTAINEROF(pbdev, struct raid_bdev_ctxt, raid_bdev);
if (strcmp(pbdev_ctxt->bdev.name, r->name) == 0) {
if (strcmp(pbdev->bdev.name, r->name) == 0) {
pbdev_found = true;
break;
}
}
} else if (raid_state == RAID_BDEV_STATE_CONFIGURING) {
TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_configuring_list, link_specific_list) {
pbdev_ctxt = SPDK_CONTAINEROF(pbdev, struct raid_bdev_ctxt, raid_bdev);
if (strcmp(pbdev_ctxt->bdev.name, r->name) == 0) {
if (strcmp(pbdev->bdev.name, r->name) == 0) {
pbdev_found = true;
break;
}
}
} else if (raid_state == RAID_BDEV_STATE_OFFLINE) {
TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_offline_list, link_specific_list) {
pbdev_ctxt = SPDK_CONTAINEROF(pbdev, struct raid_bdev_ctxt, raid_bdev);
if (strcmp(pbdev_ctxt->bdev.name, r->name) == 0) {
if (strcmp(pbdev->bdev.name, r->name) == 0) {
pbdev_found = true;
break;
}
@ -1275,7 +1268,6 @@ test_io_channel(void)
struct rpc_construct_raid_bdev req;
struct rpc_destroy_raid_bdev destroy_req;
struct raid_bdev *pbdev;
struct raid_bdev_ctxt *pbdev_ctxt = NULL;
struct raid_bdev_io_channel *ch_ctx;
uint32_t i;
@ -1295,22 +1287,21 @@ test_io_channel(void)
verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_list, link_global_list) {
pbdev_ctxt = SPDK_CONTAINEROF(pbdev, struct raid_bdev_ctxt, raid_bdev);
if (strcmp(pbdev_ctxt->bdev.name, req.name) == 0) {
if (strcmp(pbdev->bdev.name, req.name) == 0) {
break;
}
}
CU_ASSERT(pbdev_ctxt != NULL);
CU_ASSERT(pbdev != NULL);
ch_ctx = calloc(1, sizeof(struct raid_bdev_io_channel));
SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
CU_ASSERT(raid_bdev_create_cb(&pbdev_ctxt->raid_bdev, ch_ctx) == 0);
CU_ASSERT(ch_ctx->raid_bdev_ctxt == pbdev_ctxt);
CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
CU_ASSERT(ch_ctx->raid_bdev == pbdev);
for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
CU_ASSERT(ch_ctx->base_bdevs_io_channel && ch_ctx->base_bdevs_io_channel[i] == (void *)0x1);
}
raid_bdev_destroy_cb(&pbdev_ctxt->raid_bdev, ch_ctx);
CU_ASSERT(ch_ctx->raid_bdev_ctxt == NULL);
raid_bdev_destroy_cb(pbdev, ch_ctx);
CU_ASSERT(ch_ctx->raid_bdev == NULL);
CU_ASSERT(ch_ctx->base_bdevs_io_channel == NULL);
free_test_req(&req);
@ -1336,7 +1327,6 @@ test_write_io(void)
struct rpc_construct_raid_bdev req;
struct rpc_destroy_raid_bdev destroy_req;
struct raid_bdev *pbdev;
struct raid_bdev_ctxt *pbdev_ctxt = NULL;
struct spdk_io_channel *ch;
struct raid_bdev_io_channel *ch_ctx;
uint32_t i;
@ -1359,19 +1349,18 @@ test_write_io(void)
verify_raid_config(&req, true);
verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_list, link_global_list) {
pbdev_ctxt = SPDK_CONTAINEROF(pbdev, struct raid_bdev_ctxt, raid_bdev);
if (strcmp(pbdev_ctxt->bdev.name, req.name) == 0) {
if (strcmp(pbdev->bdev.name, req.name) == 0) {
break;
}
}
CU_ASSERT(pbdev_ctxt != NULL);
CU_ASSERT(pbdev != NULL);
ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel));
SPDK_CU_ASSERT_FATAL(ch != NULL);
ch_ctx = spdk_io_channel_get_ctx(ch);
SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
CU_ASSERT(raid_bdev_create_cb(&pbdev_ctxt->raid_bdev, ch_ctx) == 0);
CU_ASSERT(ch_ctx->raid_bdev_ctxt == pbdev_ctxt);
CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
CU_ASSERT(ch_ctx->raid_bdev == pbdev);
for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
CU_ASSERT(ch_ctx->base_bdevs_io_channel && ch_ctx->base_bdevs_io_channel[i] == (void *)0x1);
}
@ -1386,15 +1375,15 @@ test_write_io(void)
memset(g_io_output, 0, (g_max_io_size / g_strip_size) + 1 * sizeof(struct io_output));
g_io_output_index = 0;
raid_bdev_submit_request(ch, bdev_io);
verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, &pbdev_ctxt->raid_bdev,
verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
g_child_io_status_flag);
bdev_io_cleanup(bdev_io);
free(bdev_io);
}
free_test_req(&req);
raid_bdev_destroy_cb(&pbdev_ctxt->raid_bdev, ch_ctx);
CU_ASSERT(ch_ctx->raid_bdev_ctxt == NULL);
raid_bdev_destroy_cb(pbdev, ch_ctx);
CU_ASSERT(ch_ctx->raid_bdev == NULL);
CU_ASSERT(ch_ctx->base_bdevs_io_channel == NULL);
free(ch);
destroy_req.name = strdup("raid1");
@ -1418,7 +1407,6 @@ test_read_io(void)
struct rpc_construct_raid_bdev req;
struct rpc_destroy_raid_bdev destroy_req;
struct raid_bdev *pbdev;
struct raid_bdev_ctxt *pbdev_ctxt = NULL;
struct spdk_io_channel *ch;
struct raid_bdev_io_channel *ch_ctx;
uint32_t i;
@ -1441,19 +1429,18 @@ test_read_io(void)
verify_raid_config(&req, true);
verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_list, link_global_list) {
pbdev_ctxt = SPDK_CONTAINEROF(pbdev, struct raid_bdev_ctxt, raid_bdev);
if (strcmp(pbdev_ctxt->bdev.name, req.name) == 0) {
if (strcmp(pbdev->bdev.name, req.name) == 0) {
break;
}
}
CU_ASSERT(pbdev_ctxt != NULL);
CU_ASSERT(pbdev != NULL);
ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel));
SPDK_CU_ASSERT_FATAL(ch != NULL);
ch_ctx = spdk_io_channel_get_ctx(ch);
SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
CU_ASSERT(raid_bdev_create_cb(&pbdev_ctxt->raid_bdev, ch_ctx) == 0);
CU_ASSERT(ch_ctx->raid_bdev_ctxt == pbdev_ctxt);
CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
CU_ASSERT(ch_ctx->raid_bdev == pbdev);
for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
CU_ASSERT(ch_ctx->base_bdevs_io_channel && ch_ctx->base_bdevs_io_channel[i] == (void *)0x1);
}
@ -1469,14 +1456,14 @@ test_read_io(void)
memset(g_io_output, 0, (g_max_io_size / g_strip_size) + 1 * sizeof(struct io_output));
g_io_output_index = 0;
raid_bdev_submit_request(ch, bdev_io);
verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, &pbdev_ctxt->raid_bdev,
verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
g_child_io_status_flag);
bdev_io_cleanup(bdev_io);
free(bdev_io);
}
raid_bdev_destroy_cb(&pbdev_ctxt->raid_bdev, ch_ctx);
CU_ASSERT(ch_ctx->raid_bdev_ctxt == NULL);
raid_bdev_destroy_cb(pbdev, ch_ctx);
CU_ASSERT(ch_ctx->raid_bdev == NULL);
CU_ASSERT(ch_ctx->base_bdevs_io_channel == NULL);
free(ch);
destroy_req.name = strdup("raid1");
@ -1501,7 +1488,6 @@ test_io_failure(void)
struct rpc_construct_raid_bdev req;
struct rpc_destroy_raid_bdev destroy_req;
struct raid_bdev *pbdev;
struct raid_bdev_ctxt *pbdev_ctxt = NULL;
struct spdk_io_channel *ch;
struct raid_bdev_io_channel *ch_ctx;
uint32_t i;
@ -1524,19 +1510,18 @@ test_io_failure(void)
verify_raid_config(&req, true);
verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_list, link_global_list) {
pbdev_ctxt = SPDK_CONTAINEROF(pbdev, struct raid_bdev_ctxt, raid_bdev);
if (strcmp(pbdev_ctxt->bdev.name, req.name) == 0) {
if (strcmp(pbdev->bdev.name, req.name) == 0) {
break;
}
}
CU_ASSERT(pbdev_ctxt != NULL);
CU_ASSERT(pbdev != NULL);
ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel));
SPDK_CU_ASSERT_FATAL(ch != NULL);
ch_ctx = spdk_io_channel_get_ctx(ch);
SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
CU_ASSERT(raid_bdev_create_cb(&pbdev_ctxt->raid_bdev, ch_ctx) == 0);
CU_ASSERT(ch_ctx->raid_bdev_ctxt == pbdev_ctxt);
CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
CU_ASSERT(ch_ctx->raid_bdev == pbdev);
for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
CU_ASSERT(ch_ctx->base_bdevs_io_channel && ch_ctx->base_bdevs_io_channel[i] == (void *)0x1);
}
@ -1552,7 +1537,7 @@ test_io_failure(void)
memset(g_io_output, 0, (g_max_io_size / g_strip_size) + 1 * sizeof(struct io_output));
g_io_output_index = 0;
raid_bdev_submit_request(ch, bdev_io);
verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, &pbdev_ctxt->raid_bdev,
verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
INVALID_IO_SUBMIT);
bdev_io_cleanup(bdev_io);
free(bdev_io);
@ -1570,14 +1555,14 @@ test_io_failure(void)
memset(g_io_output, 0, (g_max_io_size / g_strip_size) + 1 * sizeof(struct io_output));
g_io_output_index = 0;
raid_bdev_submit_request(ch, bdev_io);
verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, &pbdev_ctxt->raid_bdev,
verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
g_child_io_status_flag);
bdev_io_cleanup(bdev_io);
free(bdev_io);
}
raid_bdev_destroy_cb(&pbdev_ctxt->raid_bdev, ch_ctx);
CU_ASSERT(ch_ctx->raid_bdev_ctxt == NULL);
raid_bdev_destroy_cb(pbdev, ch_ctx);
CU_ASSERT(ch_ctx->raid_bdev == NULL);
CU_ASSERT(ch_ctx->base_bdevs_io_channel == NULL);
free(ch);
destroy_req.name = strdup("raid1");
@ -1602,7 +1587,6 @@ test_io_waitq(void)
struct rpc_construct_raid_bdev req;
struct rpc_destroy_raid_bdev destroy_req;
struct raid_bdev *pbdev;
struct raid_bdev_ctxt *pbdev_ctxt = NULL;
struct spdk_io_channel *ch;
struct raid_bdev_io_channel *ch_ctx;
uint32_t i;
@ -1627,19 +1611,18 @@ test_io_waitq(void)
verify_raid_config(&req, true);
verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_list, link_global_list) {
pbdev_ctxt = SPDK_CONTAINEROF(pbdev, struct raid_bdev_ctxt, raid_bdev);
if (strcmp(pbdev_ctxt->bdev.name, req.name) == 0) {
if (strcmp(pbdev->bdev.name, req.name) == 0) {
break;
}
}
SPDK_CU_ASSERT_FATAL(pbdev_ctxt != NULL);
SPDK_CU_ASSERT_FATAL(pbdev != NULL);
ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel));
SPDK_CU_ASSERT_FATAL(ch != NULL);
ch_ctx = spdk_io_channel_get_ctx(ch);
SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
CU_ASSERT(raid_bdev_create_cb(&pbdev_ctxt->raid_bdev, ch_ctx) == 0);
CU_ASSERT(ch_ctx->raid_bdev_ctxt == pbdev_ctxt);
CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
CU_ASSERT(ch_ctx->raid_bdev == pbdev);
SPDK_CU_ASSERT_FATAL(ch_ctx->base_bdevs_io_channel != NULL);
for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
CU_ASSERT(ch_ctx->base_bdevs_io_channel[i] == (void *)0x1);
@ -1672,8 +1655,8 @@ test_io_waitq(void)
free(bdev_io);
}
raid_bdev_destroy_cb(&pbdev_ctxt->raid_bdev, ch_ctx);
CU_ASSERT(ch_ctx->raid_bdev_ctxt == NULL);
raid_bdev_destroy_cb(pbdev, ch_ctx);
CU_ASSERT(ch_ctx->raid_bdev == NULL);
CU_ASSERT(ch_ctx->base_bdevs_io_channel == NULL);
g_ignore_io_output = 0;
free(ch);
@ -1837,7 +1820,6 @@ test_multi_raid_with_io(void)
uint32_t count;
uint32_t bbdev_idx = 0;
struct raid_bdev *pbdev;
struct raid_bdev_ctxt *pbdev_ctxt = NULL;
struct spdk_io_channel *ch;
struct raid_bdev_io_channel *ch_ctx;
struct spdk_bdev_io *bdev_io;
@ -1870,16 +1852,15 @@ test_multi_raid_with_io(void)
verify_raid_config(&construct_req[i], true);
verify_raid_bdev(&construct_req[i], true, RAID_BDEV_STATE_ONLINE);
TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_list, link_global_list) {
pbdev_ctxt = SPDK_CONTAINEROF(pbdev, struct raid_bdev_ctxt, raid_bdev);
if (strcmp(pbdev_ctxt->bdev.name, construct_req[i].name) == 0) {
if (strcmp(pbdev->bdev.name, construct_req[i].name) == 0) {
break;
}
}
CU_ASSERT(pbdev_ctxt != NULL);
CU_ASSERT(pbdev != NULL);
ch_ctx = spdk_io_channel_get_ctx(&ch[i]);
SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
CU_ASSERT(raid_bdev_create_cb(&pbdev_ctxt->raid_bdev, ch_ctx) == 0);
CU_ASSERT(ch_ctx->raid_bdev_ctxt == pbdev_ctxt);
CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
CU_ASSERT(ch_ctx->raid_bdev == pbdev);
CU_ASSERT(ch_ctx->base_bdevs_io_channel != NULL);
for (j = 0; j < construct_req[i].base_bdevs.num_base_bdevs; j++) {
CU_ASSERT(ch_ctx->base_bdevs_io_channel[j] == (void *)0x1);
@ -1900,14 +1881,13 @@ test_multi_raid_with_io(void)
ch_random = &ch[raid_random];
ch_ctx_random = spdk_io_channel_get_ctx(ch_random);
TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_list, link_global_list) {
pbdev_ctxt = SPDK_CONTAINEROF(pbdev, struct raid_bdev_ctxt, raid_bdev);
if (strcmp(pbdev_ctxt->bdev.name, construct_req[raid_random].name) == 0) {
if (strcmp(pbdev->bdev.name, construct_req[raid_random].name) == 0) {
break;
}
}
CU_ASSERT(pbdev_ctxt != NULL);
CU_ASSERT(pbdev != NULL);
raid_bdev_submit_request(ch_random, bdev_io);
verify_io(bdev_io, g_max_base_drives, ch_ctx_random, &pbdev_ctxt->raid_bdev,
verify_io(bdev_io, g_max_base_drives, ch_ctx_random, pbdev,
g_child_io_status_flag);
bdev_io_cleanup(bdev_io);
free(bdev_io);
@ -1915,16 +1895,15 @@ test_multi_raid_with_io(void)
for (i = 0; i < g_max_raids; i++) {
TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_list, link_global_list) {
pbdev_ctxt = SPDK_CONTAINEROF(pbdev, struct raid_bdev_ctxt, raid_bdev);
if (strcmp(pbdev_ctxt->bdev.name, construct_req[i].name) == 0) {
if (strcmp(pbdev->bdev.name, construct_req[i].name) == 0) {
break;
}
}
CU_ASSERT(pbdev_ctxt != NULL);
CU_ASSERT(pbdev != NULL);
ch_ctx = spdk_io_channel_get_ctx(&ch[i]);
SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
raid_bdev_destroy_cb(&pbdev_ctxt->raid_bdev, ch_ctx);
CU_ASSERT(ch_ctx->raid_bdev_ctxt == NULL);
raid_bdev_destroy_cb(pbdev, ch_ctx);
CU_ASSERT(ch_ctx->raid_bdev == NULL);
CU_ASSERT(ch_ctx->base_bdevs_io_channel == NULL);
destroy_req.name = strdup(construct_req[i].name);
count = snprintf(name, 16, "%s", destroy_req.name);
@ -2083,7 +2062,6 @@ test_raid_json_dump_info(void)
struct rpc_construct_raid_bdev req;
struct rpc_destroy_raid_bdev destroy_req;
struct raid_bdev *pbdev;
struct raid_bdev_ctxt *pbdev_ctxt = NULL;
set_globals();
create_test_req(&req, "raid1", 0, true);
@ -2100,14 +2078,13 @@ test_raid_json_dump_info(void)
verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_list, link_global_list) {
pbdev_ctxt = SPDK_CONTAINEROF(pbdev, struct raid_bdev_ctxt, raid_bdev);
if (strcmp(pbdev_ctxt->bdev.name, req.name) == 0) {
if (strcmp(pbdev->bdev.name, req.name) == 0) {
break;
}
}
CU_ASSERT(pbdev_ctxt != NULL);
CU_ASSERT(pbdev != NULL);
CU_ASSERT(raid_bdev_dump_info_json(pbdev_ctxt, NULL) == 0);
CU_ASSERT(raid_bdev_dump_info_json(pbdev, NULL) == 0);
free_test_req(&req);