2022-06-03 19:15:11 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
2022-11-01 20:26:26 +00:00
|
|
|
* Copyright (C) 2018 Intel Corporation.
|
2018-05-08 11:30:29 +00:00
|
|
|
* All rights reserved.
|
2021-10-21 07:28:48 +00:00
|
|
|
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
2018-05-08 11:30:29 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include "bdev_raid.h"
|
|
|
|
#include "spdk/env.h"
|
2020-03-03 09:20:43 +00:00
|
|
|
#include "spdk/thread.h"
|
2020-10-06 16:16:26 +00:00
|
|
|
#include "spdk/log.h"
|
2018-05-08 11:30:29 +00:00
|
|
|
#include "spdk/string.h"
|
|
|
|
#include "spdk/util.h"
|
|
|
|
#include "spdk/json.h"
|
|
|
|
|
2018-07-31 20:09:19 +00:00
|
|
|
static bool g_shutdown_started = false;
|
|
|
|
|
2018-05-08 11:30:29 +00:00
|
|
|
/* List of all raid bdevs */
|
2022-08-23 10:52:55 +00:00
|
|
|
struct raid_all_tailq g_raid_bdev_list = TAILQ_HEAD_INITIALIZER(g_raid_bdev_list);
|
2018-05-08 11:30:29 +00:00
|
|
|
|
2019-10-09 09:22:41 +00:00
|
|
|
static TAILQ_HEAD(, raid_bdev_module) g_raid_modules = TAILQ_HEAD_INITIALIZER(g_raid_modules);
|
|
|
|
|
2022-06-22 21:35:04 +00:00
|
|
|
static struct raid_bdev_module *
|
|
|
|
raid_bdev_module_find(enum raid_level level)
|
2019-10-09 09:22:41 +00:00
|
|
|
{
|
|
|
|
struct raid_bdev_module *raid_module;
|
|
|
|
|
|
|
|
TAILQ_FOREACH(raid_module, &g_raid_modules, link) {
|
|
|
|
if (raid_module->level == level) {
|
|
|
|
return raid_module;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2022-06-22 21:35:04 +00:00
|
|
|
void
|
|
|
|
raid_bdev_module_list_add(struct raid_bdev_module *raid_module)
|
2019-10-09 09:22:41 +00:00
|
|
|
{
|
|
|
|
if (raid_bdev_module_find(raid_module->level) != NULL) {
|
|
|
|
SPDK_ERRLOG("module for raid level '%s' already registered.\n",
|
|
|
|
raid_bdev_level_to_str(raid_module->level));
|
|
|
|
assert(false);
|
|
|
|
} else {
|
|
|
|
TAILQ_INSERT_TAIL(&g_raid_modules, raid_module, link);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-08 11:30:29 +00:00
|
|
|
/* Function declarations */
|
2019-04-08 23:19:41 +00:00
|
|
|
static void raid_bdev_examine(struct spdk_bdev *bdev);
|
|
|
|
static int raid_bdev_init(void);
|
2019-04-09 00:40:54 +00:00
|
|
|
static void raid_bdev_deconfigure(struct raid_bdev *raid_bdev,
|
|
|
|
raid_bdev_destruct_cb cb_fn, void *cb_arg);
|
2018-05-08 11:30:29 +00:00
|
|
|
|
2022-09-20 12:16:16 +00:00
|
|
|
static void raid_bdev_channel_on_suspended(struct raid_bdev_io_channel *raid_ch);
|
|
|
|
|
2018-05-08 11:30:29 +00:00
|
|
|
/*
|
|
|
|
* brief:
|
|
|
|
* raid_bdev_create_cb function is a cb function for raid bdev which creates the
|
|
|
|
* hierarchy from raid bdev to base bdev io channels. It will be called per core
|
|
|
|
* params:
|
|
|
|
* io_device - pointer to raid bdev io device represented by raid_bdev
|
|
|
|
* ctx_buf - pointer to context buffer for raid bdev io channel
|
|
|
|
* returns:
|
|
|
|
* 0 - success
|
|
|
|
* non zero - failure
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
raid_bdev_create_cb(void *io_device, void *ctx_buf)
|
|
|
|
{
|
|
|
|
struct raid_bdev *raid_bdev = io_device;
|
2018-08-20 23:25:27 +00:00
|
|
|
struct raid_bdev_io_channel *raid_ch = ctx_buf;
|
2019-11-13 09:50:00 +00:00
|
|
|
uint8_t i;
|
2019-12-09 14:44:08 +00:00
|
|
|
int ret = 0;
|
2018-05-08 11:30:29 +00:00
|
|
|
|
2020-09-04 11:27:29 +00:00
|
|
|
SPDK_DEBUGLOG(bdev_raid, "raid_bdev_create_cb, %p\n", raid_ch);
|
2018-05-08 11:30:29 +00:00
|
|
|
|
|
|
|
assert(raid_bdev != NULL);
|
|
|
|
assert(raid_bdev->state == RAID_BDEV_STATE_ONLINE);
|
|
|
|
|
2019-08-06 23:53:26 +00:00
|
|
|
raid_ch->num_channels = raid_bdev->num_base_bdevs;
|
2022-09-20 12:16:16 +00:00
|
|
|
TAILQ_INIT(&raid_ch->suspended_ios);
|
2019-08-06 23:53:26 +00:00
|
|
|
|
|
|
|
raid_ch->base_channel = calloc(raid_ch->num_channels,
|
2018-08-21 03:57:26 +00:00
|
|
|
sizeof(struct spdk_io_channel *));
|
|
|
|
if (!raid_ch->base_channel) {
|
2018-05-08 11:30:29 +00:00
|
|
|
SPDK_ERRLOG("Unable to allocate base bdevs io channel\n");
|
2018-08-28 00:56:22 +00:00
|
|
|
return -ENOMEM;
|
2018-05-08 11:30:29 +00:00
|
|
|
}
|
2022-09-20 12:16:16 +00:00
|
|
|
|
|
|
|
pthread_mutex_lock(&raid_bdev->mutex);
|
|
|
|
raid_ch->is_suspended = (raid_bdev->suspend_cnt > 0);
|
|
|
|
|
2019-11-13 09:50:00 +00:00
|
|
|
for (i = 0; i < raid_ch->num_channels; i++) {
|
2018-05-08 11:30:29 +00:00
|
|
|
/*
|
|
|
|
* Get the spdk_io_channel for all the base bdevs. This is used during
|
|
|
|
* split logic to send the respective child bdev ios to respective base
|
|
|
|
* bdev io channel.
|
|
|
|
*/
|
2022-09-28 10:06:31 +00:00
|
|
|
if (raid_bdev->base_bdev_info[i].desc == NULL) {
|
|
|
|
continue;
|
|
|
|
}
|
2018-08-21 03:57:26 +00:00
|
|
|
raid_ch->base_channel[i] = spdk_bdev_get_io_channel(
|
|
|
|
raid_bdev->base_bdev_info[i].desc);
|
|
|
|
if (!raid_ch->base_channel[i]) {
|
2018-05-08 11:30:29 +00:00
|
|
|
SPDK_ERRLOG("Unable to create io channel for base bdev\n");
|
2019-12-09 14:44:08 +00:00
|
|
|
ret = -ENOMEM;
|
|
|
|
break;
|
2018-05-08 11:30:29 +00:00
|
|
|
}
|
|
|
|
}
|
2022-09-28 10:06:31 +00:00
|
|
|
pthread_mutex_unlock(&raid_bdev->mutex);
|
2018-05-08 11:30:29 +00:00
|
|
|
|
2019-12-09 14:44:08 +00:00
|
|
|
if (!ret && raid_bdev->module->get_io_channel) {
|
|
|
|
raid_ch->module_channel = raid_bdev->module->get_io_channel(raid_bdev);
|
|
|
|
if (!raid_ch->module_channel) {
|
|
|
|
SPDK_ERRLOG("Unable to create io channel for raid module\n");
|
|
|
|
ret = -ENOMEM;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret) {
|
2022-09-28 10:06:31 +00:00
|
|
|
for (i = 0; i < raid_ch->num_channels; i++) {
|
|
|
|
if (raid_ch->base_channel[i] != NULL) {
|
|
|
|
spdk_put_io_channel(raid_ch->base_channel[i]);
|
|
|
|
}
|
2019-12-09 14:44:08 +00:00
|
|
|
}
|
|
|
|
free(raid_ch->base_channel);
|
|
|
|
raid_ch->base_channel = NULL;
|
|
|
|
}
|
|
|
|
return ret;
|
2018-05-08 11:30:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* brief:
|
|
|
|
* raid_bdev_destroy_cb function is a cb function for raid bdev which deletes the
|
|
|
|
* hierarchy from raid bdev to base bdev io channels. It will be called per core
|
|
|
|
* params:
|
|
|
|
* io_device - pointer to raid bdev io device represented by raid_bdev
|
|
|
|
* ctx_buf - pointer to context buffer for raid bdev io channel
|
|
|
|
* returns:
|
|
|
|
* none
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
raid_bdev_destroy_cb(void *io_device, void *ctx_buf)
|
|
|
|
{
|
2018-08-20 23:25:27 +00:00
|
|
|
struct raid_bdev_io_channel *raid_ch = ctx_buf;
|
2019-11-13 09:50:00 +00:00
|
|
|
uint8_t i;
|
2018-05-08 11:30:29 +00:00
|
|
|
|
2020-09-04 11:27:29 +00:00
|
|
|
SPDK_DEBUGLOG(bdev_raid, "raid_bdev_destroy_cb\n");
|
2018-05-08 11:30:29 +00:00
|
|
|
|
2018-08-20 23:25:27 +00:00
|
|
|
assert(raid_ch != NULL);
|
2018-08-21 03:57:26 +00:00
|
|
|
assert(raid_ch->base_channel);
|
2022-09-20 12:16:16 +00:00
|
|
|
assert(TAILQ_EMPTY(&raid_ch->suspended_ios));
|
2019-12-09 14:44:08 +00:00
|
|
|
|
|
|
|
if (raid_ch->module_channel) {
|
|
|
|
spdk_put_io_channel(raid_ch->module_channel);
|
|
|
|
}
|
|
|
|
|
2019-11-13 09:50:00 +00:00
|
|
|
for (i = 0; i < raid_ch->num_channels; i++) {
|
2018-05-08 11:30:29 +00:00
|
|
|
/* Free base bdev channels */
|
2022-09-28 10:06:31 +00:00
|
|
|
if (raid_ch->base_channel[i] != NULL) {
|
|
|
|
spdk_put_io_channel(raid_ch->base_channel[i]);
|
|
|
|
}
|
2018-05-08 11:30:29 +00:00
|
|
|
}
|
2018-08-21 03:57:26 +00:00
|
|
|
free(raid_ch->base_channel);
|
|
|
|
raid_ch->base_channel = NULL;
|
2018-05-08 11:30:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* brief:
|
2022-02-03 15:43:25 +00:00
|
|
|
* raid_bdev_cleanup is used to cleanup raid_bdev related data
|
2018-05-08 11:30:29 +00:00
|
|
|
* structures.
|
|
|
|
* params:
|
2018-07-31 00:14:18 +00:00
|
|
|
* raid_bdev - pointer to raid_bdev
|
2018-05-08 11:30:29 +00:00
|
|
|
* returns:
|
|
|
|
* none
|
|
|
|
*/
|
2019-04-08 05:54:22 +00:00
|
|
|
static void
|
2018-07-31 00:14:18 +00:00
|
|
|
raid_bdev_cleanup(struct raid_bdev *raid_bdev)
|
2018-05-08 11:30:29 +00:00
|
|
|
{
|
2022-08-26 11:02:27 +00:00
|
|
|
struct raid_base_bdev_info *base_info;
|
|
|
|
|
2022-11-03 13:25:30 +00:00
|
|
|
SPDK_DEBUGLOG(bdev_raid, "raid_bdev_cleanup, %p name %s, state %s\n",
|
|
|
|
raid_bdev, raid_bdev->bdev.name, raid_bdev_state_to_str(raid_bdev->state));
|
2022-08-23 10:52:55 +00:00
|
|
|
assert(raid_bdev->state != RAID_BDEV_STATE_ONLINE);
|
2022-09-13 10:34:53 +00:00
|
|
|
assert(spdk_get_thread() == spdk_thread_get_app_thread());
|
2022-08-26 11:02:27 +00:00
|
|
|
|
|
|
|
RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
|
|
|
|
assert(base_info->bdev == NULL);
|
|
|
|
assert(base_info->desc == NULL);
|
|
|
|
free(base_info->name);
|
|
|
|
}
|
|
|
|
|
2019-04-08 06:57:04 +00:00
|
|
|
TAILQ_REMOVE(&g_raid_bdev_list, raid_bdev, global_link);
|
2018-05-08 11:30:29 +00:00
|
|
|
free(raid_bdev->base_bdev_info);
|
2022-02-03 15:43:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
raid_bdev_free(struct raid_bdev *raid_bdev)
|
|
|
|
{
|
2022-09-20 12:16:16 +00:00
|
|
|
pthread_mutex_destroy(&raid_bdev->mutex);
|
2022-02-03 15:43:25 +00:00
|
|
|
free(raid_bdev->bdev.name);
|
2018-07-31 00:14:18 +00:00
|
|
|
free(raid_bdev);
|
2018-05-08 11:30:29 +00:00
|
|
|
}
|
|
|
|
|
2022-02-03 15:43:25 +00:00
|
|
|
static void
|
|
|
|
raid_bdev_cleanup_and_free(struct raid_bdev *raid_bdev)
|
|
|
|
{
|
|
|
|
raid_bdev_cleanup(raid_bdev);
|
|
|
|
raid_bdev_free(raid_bdev);
|
|
|
|
}
|
|
|
|
|
2018-08-03 06:34:10 +00:00
|
|
|
/*
|
|
|
|
* brief:
|
|
|
|
* free resource of base bdev for raid bdev
|
|
|
|
* params:
|
2019-11-05 09:32:18 +00:00
|
|
|
* base_info - raid base bdev info
|
2018-08-03 06:34:10 +00:00
|
|
|
* returns:
|
|
|
|
* 0 - success
|
|
|
|
* non zero - failure
|
|
|
|
*/
|
2019-04-08 23:18:34 +00:00
|
|
|
static void
|
2022-11-25 12:07:59 +00:00
|
|
|
raid_bdev_free_base_bdev_resource(struct raid_base_bdev_info *base_info)
|
2018-08-03 06:34:10 +00:00
|
|
|
{
|
2022-11-25 12:07:59 +00:00
|
|
|
struct raid_bdev *raid_bdev = base_info->raid_bdev;
|
|
|
|
|
2022-09-13 10:34:53 +00:00
|
|
|
assert(spdk_get_thread() == spdk_thread_get_app_thread());
|
|
|
|
|
2022-08-26 11:02:27 +00:00
|
|
|
free(base_info->name);
|
|
|
|
base_info->name = NULL;
|
|
|
|
|
|
|
|
if (base_info->bdev == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(base_info->desc);
|
2019-11-05 09:32:18 +00:00
|
|
|
spdk_bdev_module_release_bdev(base_info->bdev);
|
2022-09-13 10:34:53 +00:00
|
|
|
spdk_bdev_close(base_info->desc);
|
2019-11-05 09:32:18 +00:00
|
|
|
base_info->desc = NULL;
|
|
|
|
base_info->bdev = NULL;
|
2018-08-03 06:34:10 +00:00
|
|
|
|
|
|
|
assert(raid_bdev->num_base_bdevs_discovered);
|
|
|
|
raid_bdev->num_base_bdevs_discovered--;
|
|
|
|
}
|
|
|
|
|
2022-02-03 15:43:25 +00:00
|
|
|
static void
|
|
|
|
raid_bdev_io_device_unregister_cb(void *io_device)
|
|
|
|
{
|
|
|
|
struct raid_bdev *raid_bdev = io_device;
|
|
|
|
|
|
|
|
if (raid_bdev->num_base_bdevs_discovered == 0) {
|
|
|
|
/* Free raid_bdev when there are no base bdevs left */
|
|
|
|
SPDK_DEBUGLOG(bdev_raid, "raid bdev base bdevs is 0, going to free all in destruct\n");
|
|
|
|
raid_bdev_cleanup(raid_bdev);
|
|
|
|
spdk_bdev_destruct_done(&raid_bdev->bdev, 0);
|
|
|
|
raid_bdev_free(raid_bdev);
|
|
|
|
} else {
|
|
|
|
spdk_bdev_destruct_done(&raid_bdev->bdev, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-27 14:08:08 +00:00
|
|
|
void
|
|
|
|
raid_bdev_module_stop_done(struct raid_bdev *raid_bdev)
|
|
|
|
{
|
|
|
|
if (raid_bdev->state != RAID_BDEV_STATE_CONFIGURING) {
|
|
|
|
spdk_io_device_unregister(raid_bdev, raid_bdev_io_device_unregister_cb);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-13 10:34:53 +00:00
|
|
|
static void
|
|
|
|
_raid_bdev_destruct(void *ctxt)
|
2018-05-08 11:30:29 +00:00
|
|
|
{
|
2018-07-31 00:14:18 +00:00
|
|
|
struct raid_bdev *raid_bdev = ctxt;
|
2019-11-05 09:32:18 +00:00
|
|
|
struct raid_base_bdev_info *base_info;
|
2018-05-08 11:30:29 +00:00
|
|
|
|
2020-09-04 11:27:29 +00:00
|
|
|
SPDK_DEBUGLOG(bdev_raid, "raid_bdev_destruct\n");
|
2018-05-08 11:30:29 +00:00
|
|
|
|
2019-11-05 09:32:18 +00:00
|
|
|
RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
|
2018-05-08 11:30:29 +00:00
|
|
|
/*
|
|
|
|
* Close all base bdev descriptors for which call has come from below
|
2018-07-31 20:09:19 +00:00
|
|
|
* layers. Also close the descriptors if we have started shutdown.
|
2018-05-08 11:30:29 +00:00
|
|
|
*/
|
2022-08-26 11:02:27 +00:00
|
|
|
if (g_shutdown_started || base_info->remove_scheduled == true) {
|
2022-11-25 12:07:59 +00:00
|
|
|
raid_bdev_free_base_bdev_resource(base_info);
|
2018-05-08 11:30:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-31 20:09:19 +00:00
|
|
|
if (g_shutdown_started) {
|
|
|
|
raid_bdev->state = RAID_BDEV_STATE_OFFLINE;
|
|
|
|
}
|
|
|
|
|
2022-04-11 23:00:22 +00:00
|
|
|
if (raid_bdev->module->stop != NULL) {
|
2022-09-27 14:08:08 +00:00
|
|
|
if (raid_bdev->module->stop(raid_bdev) == false) {
|
2022-09-13 10:34:53 +00:00
|
|
|
return;
|
2022-09-27 14:08:08 +00:00
|
|
|
}
|
2022-04-11 23:00:22 +00:00
|
|
|
}
|
|
|
|
|
2022-09-27 14:08:08 +00:00
|
|
|
raid_bdev_module_stop_done(raid_bdev);
|
2022-09-13 10:34:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
raid_bdev_destruct(void *ctx)
|
|
|
|
{
|
|
|
|
spdk_thread_exec_msg(spdk_thread_get_app_thread(), _raid_bdev_destruct, ctx);
|
2018-05-08 11:30:29 +00:00
|
|
|
|
2022-02-03 15:43:25 +00:00
|
|
|
return 1;
|
2018-05-08 11:30:29 +00:00
|
|
|
}
|
|
|
|
|
2019-10-10 10:48:13 +00:00
|
|
|
void
|
|
|
|
raid_bdev_io_complete(struct raid_bdev_io *raid_io, enum spdk_bdev_io_status status)
|
|
|
|
{
|
|
|
|
struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(raid_io);
|
2022-09-20 12:16:16 +00:00
|
|
|
struct raid_bdev_io_channel *raid_ch = raid_io->raid_ch;
|
2019-10-10 10:48:13 +00:00
|
|
|
|
|
|
|
spdk_bdev_io_complete(bdev_io, status);
|
2022-09-20 12:16:16 +00:00
|
|
|
|
|
|
|
raid_ch->num_ios--;
|
|
|
|
if (raid_ch->is_suspended && raid_ch->num_ios == 0) {
|
|
|
|
raid_bdev_channel_on_suspended(raid_ch);
|
|
|
|
}
|
2019-10-10 10:48:13 +00:00
|
|
|
}
|
|
|
|
|
2019-09-27 14:44:56 +00:00
|
|
|
/*
|
|
|
|
* brief:
|
2019-11-15 14:07:32 +00:00
|
|
|
* raid_bdev_io_complete_part - signal the completion of a part of the expected
|
|
|
|
* base bdev IOs and complete the raid_io if this is the final expected IO.
|
|
|
|
* The caller should first set raid_io->base_bdev_io_remaining. This function
|
|
|
|
* will decrement this counter by the value of the 'completed' parameter and
|
|
|
|
* complete the raid_io if the counter reaches 0. The caller is free to
|
|
|
|
* interpret the 'base_bdev_io_remaining' and 'completed' values as needed,
|
|
|
|
* it can represent e.g. blocks or IOs.
|
2019-09-27 14:44:56 +00:00
|
|
|
* params:
|
2019-11-15 14:07:32 +00:00
|
|
|
* raid_io - pointer to raid_bdev_io
|
|
|
|
* completed - the part of the raid_io that has been completed
|
|
|
|
* status - status of the base IO
|
2019-09-27 14:44:56 +00:00
|
|
|
* returns:
|
2019-11-15 14:07:32 +00:00
|
|
|
* true - if the raid_io is completed
|
|
|
|
* false - otherwise
|
2019-09-27 14:44:56 +00:00
|
|
|
*/
|
2019-11-15 14:07:32 +00:00
|
|
|
bool
|
|
|
|
raid_bdev_io_complete_part(struct raid_bdev_io *raid_io, uint64_t completed,
|
|
|
|
enum spdk_bdev_io_status status)
|
2019-09-27 14:44:56 +00:00
|
|
|
{
|
2019-11-15 14:07:32 +00:00
|
|
|
assert(raid_io->base_bdev_io_remaining >= completed);
|
|
|
|
raid_io->base_bdev_io_remaining -= completed;
|
2019-09-27 14:44:56 +00:00
|
|
|
|
2019-11-15 14:07:32 +00:00
|
|
|
if (status != SPDK_BDEV_IO_STATUS_SUCCESS) {
|
|
|
|
raid_io->base_bdev_io_status = status;
|
2019-09-27 14:44:56 +00:00
|
|
|
}
|
|
|
|
|
2019-11-15 14:07:32 +00:00
|
|
|
if (raid_io->base_bdev_io_remaining == 0) {
|
2019-10-10 10:48:13 +00:00
|
|
|
raid_bdev_io_complete(raid_io, raid_io->base_bdev_io_status);
|
2019-11-15 14:07:32 +00:00
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
return false;
|
2019-09-27 14:44:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-24 16:24:00 +00:00
|
|
|
/*
|
|
|
|
* brief:
|
2019-10-08 10:39:36 +00:00
|
|
|
* raid_bdev_queue_io_wait function processes the IO which failed to submit.
|
|
|
|
* It will try to queue the IOs after storing the context to bdev wait queue logic.
|
2019-02-24 16:24:00 +00:00
|
|
|
* params:
|
2019-10-10 10:39:08 +00:00
|
|
|
* raid_io - pointer to raid_bdev_io
|
|
|
|
* bdev - the block device that the IO is submitted to
|
|
|
|
* ch - io channel
|
|
|
|
* cb_fn - callback when the spdk_bdev_io for bdev becomes available
|
2019-02-24 16:24:00 +00:00
|
|
|
* returns:
|
|
|
|
* none
|
|
|
|
*/
|
2019-10-08 10:33:15 +00:00
|
|
|
void
|
2019-10-10 10:39:08 +00:00
|
|
|
raid_bdev_queue_io_wait(struct raid_bdev_io *raid_io, struct spdk_bdev *bdev,
|
|
|
|
struct spdk_io_channel *ch, spdk_bdev_io_wait_cb cb_fn)
|
2019-02-24 16:24:00 +00:00
|
|
|
{
|
2019-10-10 10:39:08 +00:00
|
|
|
raid_io->waitq_entry.bdev = bdev;
|
2019-11-06 11:46:55 +00:00
|
|
|
raid_io->waitq_entry.cb_fn = cb_fn;
|
2019-10-10 10:39:08 +00:00
|
|
|
raid_io->waitq_entry.cb_arg = raid_io;
|
|
|
|
spdk_bdev_queue_io_wait(bdev, ch, &raid_io->waitq_entry);
|
2019-02-24 16:24:00 +00:00
|
|
|
}
|
|
|
|
|
2019-11-15 14:07:32 +00:00
|
|
|
static void
|
|
|
|
raid_base_bdev_reset_complete(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
|
|
|
|
{
|
|
|
|
struct raid_bdev_io *raid_io = cb_arg;
|
|
|
|
|
|
|
|
spdk_bdev_free_io(bdev_io);
|
|
|
|
|
|
|
|
raid_bdev_io_complete_part(raid_io, 1, success ?
|
|
|
|
SPDK_BDEV_IO_STATUS_SUCCESS :
|
|
|
|
SPDK_BDEV_IO_STATUS_FAILED);
|
|
|
|
}
|
|
|
|
|
2022-06-22 21:35:04 +00:00
|
|
|
static void raid_bdev_submit_reset_request(struct raid_bdev_io *raid_io);
|
2019-10-10 10:01:28 +00:00
|
|
|
|
|
|
|
static void
|
2019-10-10 10:39:08 +00:00
|
|
|
_raid_bdev_submit_reset_request(void *_raid_io)
|
2019-10-10 10:01:28 +00:00
|
|
|
{
|
2019-10-10 10:39:08 +00:00
|
|
|
struct raid_bdev_io *raid_io = _raid_io;
|
2019-10-10 10:01:28 +00:00
|
|
|
|
|
|
|
raid_bdev_submit_reset_request(raid_io);
|
|
|
|
}
|
|
|
|
|
2018-08-24 16:40:22 +00:00
|
|
|
/*
|
|
|
|
* brief:
|
2019-10-10 10:01:28 +00:00
|
|
|
* raid_bdev_submit_reset_request function submits reset requests
|
2018-08-24 16:40:22 +00:00
|
|
|
* to member disks; it will submit as many as possible unless a reset fails with -ENOMEM, in
|
|
|
|
* which case it will queue it for later submission
|
|
|
|
* params:
|
2019-10-10 10:01:28 +00:00
|
|
|
* raid_io
|
2018-08-24 16:40:22 +00:00
|
|
|
* returns:
|
|
|
|
* none
|
|
|
|
*/
|
|
|
|
static void
|
2019-10-10 10:01:28 +00:00
|
|
|
raid_bdev_submit_reset_request(struct raid_bdev_io *raid_io)
|
2018-08-24 16:40:22 +00:00
|
|
|
{
|
|
|
|
struct raid_bdev *raid_bdev;
|
|
|
|
int ret;
|
|
|
|
uint8_t i;
|
2019-11-06 11:59:01 +00:00
|
|
|
struct raid_base_bdev_info *base_info;
|
|
|
|
struct spdk_io_channel *base_ch;
|
2018-08-24 16:40:22 +00:00
|
|
|
|
2019-10-10 10:07:02 +00:00
|
|
|
raid_bdev = raid_io->raid_bdev;
|
2018-08-24 16:40:22 +00:00
|
|
|
|
2019-11-15 14:07:32 +00:00
|
|
|
if (raid_io->base_bdev_io_remaining == 0) {
|
|
|
|
raid_io->base_bdev_io_remaining = raid_bdev->num_base_bdevs;
|
|
|
|
}
|
2019-10-10 09:43:43 +00:00
|
|
|
|
2022-09-28 10:06:31 +00:00
|
|
|
for (i = raid_io->base_bdev_io_submitted; i < raid_bdev->num_base_bdevs; i++) {
|
2019-11-06 11:59:01 +00:00
|
|
|
base_info = &raid_bdev->base_bdev_info[i];
|
|
|
|
base_ch = raid_io->raid_ch->base_channel[i];
|
2022-09-28 10:06:31 +00:00
|
|
|
if (base_ch == NULL) {
|
|
|
|
raid_io->base_bdev_io_submitted++;
|
|
|
|
raid_bdev_io_complete_part(raid_io, 1, SPDK_BDEV_IO_STATUS_SUCCESS);
|
|
|
|
continue;
|
|
|
|
}
|
2019-11-06 11:59:01 +00:00
|
|
|
ret = spdk_bdev_reset(base_info->desc, base_ch,
|
2019-11-15 14:07:32 +00:00
|
|
|
raid_base_bdev_reset_complete, raid_io);
|
2018-08-24 16:40:22 +00:00
|
|
|
if (ret == 0) {
|
2019-01-21 13:09:15 +00:00
|
|
|
raid_io->base_bdev_io_submitted++;
|
2019-11-06 11:46:55 +00:00
|
|
|
} else if (ret == -ENOMEM) {
|
2019-10-10 10:39:08 +00:00
|
|
|
raid_bdev_queue_io_wait(raid_io, base_info->bdev, base_ch,
|
2019-11-06 11:46:55 +00:00
|
|
|
_raid_bdev_submit_reset_request);
|
|
|
|
return;
|
|
|
|
} else {
|
|
|
|
SPDK_ERRLOG("bdev io submit error not due to ENOMEM, it should not happen\n");
|
|
|
|
assert(false);
|
2019-10-10 10:48:13 +00:00
|
|
|
raid_bdev_io_complete(raid_io, SPDK_BDEV_IO_STATUS_FAILED);
|
2018-08-24 16:40:22 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
bdev: Not assert but pass completion status to spdk_bdev_io_get_buf_cb
When the specified buffer size to spdk_bdev_io_get_buf() is greater
than the permitted maximum, spdk_bdev_io_get_buf() asserts simply and
doesn't call the specified callback function.
SPDK SCSI library doesn't allocate read buffer and specifies
expected read buffer size, and expects that it is allocated by
spdk_bdev_io_get_buf().
Bdev perf tool also doesn't allocate read buffer and specifies
expected read buffer size, and expects that it is allocated by
spdk_bdev_io_get_buf().
When we support DIF insert and strip in iSCSI target, the read
buffer size iSCSI initiator requests and the read buffer size iSCSI target
requests will become different.
Even after that, iSCSI initiator and iSCSI target will negotiate correctly
not to cause buffer overflow in spdk_bdev_io_get_buf(), but if iSCSI
initiator ignores the result of negotiation, iSCSI initiator can request
read buffer size larger than the permitted maximum, and can cause
failure in iSCSI target. This is very flagile and should be avoided.
This patch do the following
- Add the completion status of spdk_bdev_io_get_buf() to
spdk_bdev_io_get_buf_cb(),
- spdk_bdev_io_get_buf() calls spdk_bdev_io_get_buf_cb() by setting
success to false, and return.
- spdk_bdev_io_get_buf_cb() in each bdev module calls assert if success
is false.
Subsequent patches will process the case that success is false
in spdk_bdev_io_get_buf_cb().
Change-Id: I76429a86e18a69aa085a353ac94743296d270b82
Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-on: https://review.gerrithub.io/c/446045
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Ziye Yang <ziye.yang@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
2019-02-25 00:34:28 +00:00
|
|
|
/*
|
|
|
|
* brief:
|
|
|
|
* Callback function to spdk_bdev_io_get_buf.
|
|
|
|
* params:
|
|
|
|
* ch - pointer to raid bdev io channel
|
|
|
|
* bdev_io - pointer to parent bdev_io on raid bdev device
|
|
|
|
* success - True if buffer is allocated or false otherwise.
|
|
|
|
* returns:
|
|
|
|
* none
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
raid_bdev_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
|
|
|
|
bool success)
|
|
|
|
{
|
2019-10-10 10:01:28 +00:00
|
|
|
struct raid_bdev_io *raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
|
|
|
|
|
2019-02-25 01:43:13 +00:00
|
|
|
if (!success) {
|
2019-10-10 10:48:13 +00:00
|
|
|
raid_bdev_io_complete(raid_io, SPDK_BDEV_IO_STATUS_FAILED);
|
2019-02-25 01:43:13 +00:00
|
|
|
return;
|
|
|
|
}
|
bdev: Not assert but pass completion status to spdk_bdev_io_get_buf_cb
When the specified buffer size to spdk_bdev_io_get_buf() is greater
than the permitted maximum, spdk_bdev_io_get_buf() asserts simply and
doesn't call the specified callback function.
SPDK SCSI library doesn't allocate read buffer and specifies
expected read buffer size, and expects that it is allocated by
spdk_bdev_io_get_buf().
Bdev perf tool also doesn't allocate read buffer and specifies
expected read buffer size, and expects that it is allocated by
spdk_bdev_io_get_buf().
When we support DIF insert and strip in iSCSI target, the read
buffer size iSCSI initiator requests and the read buffer size iSCSI target
requests will become different.
Even after that, iSCSI initiator and iSCSI target will negotiate correctly
not to cause buffer overflow in spdk_bdev_io_get_buf(), but if iSCSI
initiator ignores the result of negotiation, iSCSI initiator can request
read buffer size larger than the permitted maximum, and can cause
failure in iSCSI target. This is very flagile and should be avoided.
This patch do the following
- Add the completion status of spdk_bdev_io_get_buf() to
spdk_bdev_io_get_buf_cb(),
- spdk_bdev_io_get_buf() calls spdk_bdev_io_get_buf_cb() by setting
success to false, and return.
- spdk_bdev_io_get_buf_cb() in each bdev module calls assert if success
is false.
Subsequent patches will process the case that success is false
in spdk_bdev_io_get_buf_cb().
Change-Id: I76429a86e18a69aa085a353ac94743296d270b82
Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-on: https://review.gerrithub.io/c/446045
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Ziye Yang <ziye.yang@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
2019-02-25 00:34:28 +00:00
|
|
|
|
2019-10-10 12:24:07 +00:00
|
|
|
raid_io->raid_bdev->module->submit_rw_request(raid_io);
|
bdev: Not assert but pass completion status to spdk_bdev_io_get_buf_cb
When the specified buffer size to spdk_bdev_io_get_buf() is greater
than the permitted maximum, spdk_bdev_io_get_buf() asserts simply and
doesn't call the specified callback function.
SPDK SCSI library doesn't allocate read buffer and specifies
expected read buffer size, and expects that it is allocated by
spdk_bdev_io_get_buf().
Bdev perf tool also doesn't allocate read buffer and specifies
expected read buffer size, and expects that it is allocated by
spdk_bdev_io_get_buf().
When we support DIF insert and strip in iSCSI target, the read
buffer size iSCSI initiator requests and the read buffer size iSCSI target
requests will become different.
Even after that, iSCSI initiator and iSCSI target will negotiate correctly
not to cause buffer overflow in spdk_bdev_io_get_buf(), but if iSCSI
initiator ignores the result of negotiation, iSCSI initiator can request
read buffer size larger than the permitted maximum, and can cause
failure in iSCSI target. This is very flagile and should be avoided.
This patch do the following
- Add the completion status of spdk_bdev_io_get_buf() to
spdk_bdev_io_get_buf_cb(),
- spdk_bdev_io_get_buf() calls spdk_bdev_io_get_buf_cb() by setting
success to false, and return.
- spdk_bdev_io_get_buf_cb() in each bdev module calls assert if success
is false.
Subsequent patches will process the case that success is false
in spdk_bdev_io_get_buf_cb().
Change-Id: I76429a86e18a69aa085a353ac94743296d270b82
Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-on: https://review.gerrithub.io/c/446045
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Ziye Yang <ziye.yang@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
2019-02-25 00:34:28 +00:00
|
|
|
}
|
|
|
|
|
2018-05-08 11:30:29 +00:00
|
|
|
/*
|
|
|
|
* brief:
|
|
|
|
* raid_bdev_submit_request function is the submit_request function pointer of
|
|
|
|
* raid bdev function table. This is used to submit the io on raid_bdev to below
|
2018-07-27 19:25:35 +00:00
|
|
|
* layers.
|
2018-05-08 11:30:29 +00:00
|
|
|
* params:
|
|
|
|
* ch - pointer to raid bdev io channel
|
|
|
|
* bdev_io - pointer to parent bdev_io on raid bdev device
|
|
|
|
* returns:
|
|
|
|
* none
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
raid_bdev_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
|
|
|
|
{
|
2019-10-10 09:28:37 +00:00
|
|
|
struct raid_bdev_io *raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
|
2022-09-20 12:16:16 +00:00
|
|
|
struct raid_bdev_io_channel *raid_ch = spdk_io_channel_get_ctx(ch);
|
|
|
|
|
|
|
|
if (raid_ch->is_suspended) {
|
|
|
|
TAILQ_INSERT_TAIL(&raid_ch->suspended_ios, raid_io, link);
|
|
|
|
return;
|
|
|
|
} else {
|
|
|
|
raid_ch->num_ios++;
|
|
|
|
}
|
2019-10-10 09:28:37 +00:00
|
|
|
|
2019-10-10 10:07:02 +00:00
|
|
|
raid_io->raid_bdev = bdev_io->bdev->ctxt;
|
2022-09-20 12:16:16 +00:00
|
|
|
raid_io->raid_ch = raid_ch;
|
2019-11-15 14:07:32 +00:00
|
|
|
raid_io->base_bdev_io_remaining = 0;
|
2019-10-10 09:28:37 +00:00
|
|
|
raid_io->base_bdev_io_submitted = 0;
|
|
|
|
raid_io->base_bdev_io_status = SPDK_BDEV_IO_STATUS_SUCCESS;
|
|
|
|
|
2018-05-08 11:30:29 +00:00
|
|
|
switch (bdev_io->type) {
|
|
|
|
case SPDK_BDEV_IO_TYPE_READ:
|
2019-09-05 19:14:33 +00:00
|
|
|
spdk_bdev_io_get_buf(bdev_io, raid_bdev_get_buf_cb,
|
|
|
|
bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
|
2018-07-27 19:23:26 +00:00
|
|
|
break;
|
2018-05-08 11:30:29 +00:00
|
|
|
case SPDK_BDEV_IO_TYPE_WRITE:
|
2019-10-10 12:24:07 +00:00
|
|
|
raid_io->raid_bdev->module->submit_rw_request(raid_io);
|
2018-05-08 11:30:29 +00:00
|
|
|
break;
|
|
|
|
|
2018-08-24 16:40:22 +00:00
|
|
|
case SPDK_BDEV_IO_TYPE_RESET:
|
2019-10-10 10:01:28 +00:00
|
|
|
raid_bdev_submit_reset_request(raid_io);
|
2018-08-24 16:40:22 +00:00
|
|
|
break;
|
|
|
|
|
2019-02-28 13:00:11 +00:00
|
|
|
case SPDK_BDEV_IO_TYPE_FLUSH:
|
2019-01-21 13:34:21 +00:00
|
|
|
case SPDK_BDEV_IO_TYPE_UNMAP:
|
2019-10-10 12:24:07 +00:00
|
|
|
raid_io->raid_bdev->module->submit_null_payload_request(raid_io);
|
2019-01-21 13:34:21 +00:00
|
|
|
break;
|
|
|
|
|
2018-05-08 11:30:29 +00:00
|
|
|
default:
|
|
|
|
SPDK_ERRLOG("submit request, invalid io type %u\n", bdev_io->type);
|
2019-10-10 10:48:13 +00:00
|
|
|
raid_bdev_io_complete(raid_io, SPDK_BDEV_IO_STATUS_FAILED);
|
2018-05-08 11:30:29 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-21 13:34:21 +00:00
|
|
|
/*
|
|
|
|
* brief:
|
2019-02-28 12:16:30 +00:00
|
|
|
* _raid_bdev_io_type_supported checks whether io_type is supported in
|
|
|
|
* all base bdev modules of raid bdev module. If anyone among the base_bdevs
|
|
|
|
* doesn't support, the raid device doesn't supports.
|
|
|
|
*
|
2019-01-21 13:34:21 +00:00
|
|
|
* params:
|
|
|
|
* raid_bdev - pointer to raid bdev context
|
2019-02-28 12:16:30 +00:00
|
|
|
* io_type - io type
|
2019-01-21 13:34:21 +00:00
|
|
|
* returns:
|
|
|
|
* true - io_type is supported
|
|
|
|
* false - io_type is not supported
|
|
|
|
*/
|
2019-02-28 12:16:30 +00:00
|
|
|
inline static bool
|
|
|
|
_raid_bdev_io_type_supported(struct raid_bdev *raid_bdev, enum spdk_bdev_io_type io_type)
|
2019-01-21 13:34:21 +00:00
|
|
|
{
|
2019-11-05 09:32:18 +00:00
|
|
|
struct raid_base_bdev_info *base_info;
|
2019-01-21 13:34:21 +00:00
|
|
|
|
2020-01-28 11:15:10 +00:00
|
|
|
if (io_type == SPDK_BDEV_IO_TYPE_FLUSH ||
|
|
|
|
io_type == SPDK_BDEV_IO_TYPE_UNMAP) {
|
|
|
|
if (raid_bdev->module->submit_null_payload_request == NULL) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-05 09:32:18 +00:00
|
|
|
RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
|
|
|
|
if (base_info->bdev == NULL) {
|
2019-01-21 13:34:21 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2019-11-05 09:32:18 +00:00
|
|
|
if (spdk_bdev_io_type_supported(base_info->bdev, io_type) == false) {
|
2019-01-21 13:34:21 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-05-08 11:30:29 +00:00
|
|
|
/*
|
|
|
|
* brief:
|
|
|
|
* raid_bdev_io_type_supported is the io_supported function for bdev function
|
|
|
|
* table which returns whether the particular io type is supported or not by
|
|
|
|
* raid bdev module
|
|
|
|
* params:
|
|
|
|
* ctx - pointer to raid bdev context
|
|
|
|
* type - io type
|
|
|
|
* returns:
|
|
|
|
* true - io_type is supported
|
|
|
|
* false - io_type is not supported
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
raid_bdev_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type)
|
|
|
|
{
|
|
|
|
switch (io_type) {
|
|
|
|
case SPDK_BDEV_IO_TYPE_READ:
|
|
|
|
case SPDK_BDEV_IO_TYPE_WRITE:
|
|
|
|
return true;
|
2019-01-21 13:34:21 +00:00
|
|
|
|
2019-02-28 12:16:30 +00:00
|
|
|
case SPDK_BDEV_IO_TYPE_FLUSH:
|
|
|
|
case SPDK_BDEV_IO_TYPE_RESET:
|
2019-01-21 13:34:21 +00:00
|
|
|
case SPDK_BDEV_IO_TYPE_UNMAP:
|
2019-02-28 12:16:30 +00:00
|
|
|
return _raid_bdev_io_type_supported(ctx, io_type);
|
2019-01-21 13:34:21 +00:00
|
|
|
|
2018-05-08 11:30:29 +00:00
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* brief:
|
|
|
|
* raid_bdev_get_io_channel is the get_io_channel function table pointer for
|
|
|
|
* raid bdev. This is used to return the io channel for this raid bdev
|
|
|
|
* params:
|
2018-07-31 00:14:18 +00:00
|
|
|
* ctxt - pointer to raid_bdev
|
2018-05-08 11:30:29 +00:00
|
|
|
* returns:
|
|
|
|
* pointer to io channel for raid bdev
|
|
|
|
*/
|
|
|
|
static struct spdk_io_channel *
|
|
|
|
raid_bdev_get_io_channel(void *ctxt)
|
|
|
|
{
|
2018-07-31 00:14:18 +00:00
|
|
|
struct raid_bdev *raid_bdev = ctxt;
|
2018-05-08 11:30:29 +00:00
|
|
|
|
2018-07-31 00:14:18 +00:00
|
|
|
return spdk_get_io_channel(raid_bdev);
|
2018-05-08 11:30:29 +00:00
|
|
|
}
|
|
|
|
|
2022-09-01 13:47:57 +00:00
|
|
|
void
|
|
|
|
raid_bdev_write_info_json(struct raid_bdev *raid_bdev, struct spdk_json_write_ctx *w)
|
2018-05-08 11:30:29 +00:00
|
|
|
{
|
2019-11-05 09:32:18 +00:00
|
|
|
struct raid_base_bdev_info *base_info;
|
2018-05-08 11:30:29 +00:00
|
|
|
|
2018-07-31 00:14:18 +00:00
|
|
|
assert(raid_bdev != NULL);
|
2022-09-13 10:34:53 +00:00
|
|
|
assert(spdk_get_thread() == spdk_thread_get_app_thread());
|
2018-05-08 11:30:29 +00:00
|
|
|
|
2018-12-19 18:04:18 +00:00
|
|
|
spdk_json_write_named_uint32(w, "strip_size_kb", raid_bdev->strip_size_kb);
|
2022-11-03 13:25:30 +00:00
|
|
|
spdk_json_write_named_string(w, "state", raid_bdev_state_to_str(raid_bdev->state));
|
2019-10-09 11:46:46 +00:00
|
|
|
spdk_json_write_named_string(w, "raid_level", raid_bdev_level_to_str(raid_bdev->level));
|
2022-11-17 10:40:49 +00:00
|
|
|
spdk_json_write_named_bool(w, "superblock", raid_bdev->superblock_enabled);
|
2018-05-08 11:30:29 +00:00
|
|
|
spdk_json_write_named_uint32(w, "num_base_bdevs", raid_bdev->num_base_bdevs);
|
|
|
|
spdk_json_write_named_uint32(w, "num_base_bdevs_discovered", raid_bdev->num_base_bdevs_discovered);
|
|
|
|
spdk_json_write_name(w, "base_bdevs_list");
|
|
|
|
spdk_json_write_array_begin(w);
|
2019-11-05 09:32:18 +00:00
|
|
|
RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
|
|
|
|
if (base_info->bdev) {
|
|
|
|
spdk_json_write_string(w, base_info->bdev->name);
|
2018-05-08 11:30:29 +00:00
|
|
|
} else {
|
|
|
|
spdk_json_write_null(w);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
spdk_json_write_array_end(w);
|
2022-09-01 13:47:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* brief:
|
|
|
|
* raid_bdev_dump_info_json is the function table pointer for raid bdev
|
|
|
|
* params:
|
|
|
|
* ctx - pointer to raid_bdev
|
|
|
|
* w - pointer to json context
|
|
|
|
* returns:
|
|
|
|
* 0 - success
|
|
|
|
* non zero - failure
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
raid_bdev_dump_info_json(void *ctx, struct spdk_json_write_ctx *w)
|
|
|
|
{
|
|
|
|
struct raid_bdev *raid_bdev = ctx;
|
|
|
|
|
|
|
|
SPDK_DEBUGLOG(bdev_raid, "raid_bdev_dump_config_json\n");
|
|
|
|
|
|
|
|
/* Dump the raid bdev configuration related information */
|
|
|
|
spdk_json_write_named_object_begin(w, "raid");
|
|
|
|
raid_bdev_write_info_json(raid_bdev, w);
|
2018-05-08 11:30:29 +00:00
|
|
|
spdk_json_write_object_end(w);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-09-20 23:33:32 +00:00
|
|
|
/*
|
|
|
|
* brief:
|
|
|
|
* raid_bdev_write_config_json is the function table pointer for raid bdev
|
|
|
|
* params:
|
|
|
|
* bdev - pointer to spdk_bdev
|
|
|
|
* w - pointer to json context
|
|
|
|
* returns:
|
|
|
|
* none
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
raid_bdev_write_config_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w)
|
|
|
|
{
|
|
|
|
struct raid_bdev *raid_bdev = bdev->ctxt;
|
2019-11-05 09:32:18 +00:00
|
|
|
struct raid_base_bdev_info *base_info;
|
2018-09-20 23:33:32 +00:00
|
|
|
|
2022-09-13 10:34:53 +00:00
|
|
|
assert(spdk_get_thread() == spdk_thread_get_app_thread());
|
|
|
|
|
2018-09-20 23:33:32 +00:00
|
|
|
spdk_json_write_object_begin(w);
|
|
|
|
|
2019-09-10 08:41:05 +00:00
|
|
|
spdk_json_write_named_string(w, "method", "bdev_raid_create");
|
2018-09-20 23:33:32 +00:00
|
|
|
|
|
|
|
spdk_json_write_named_object_begin(w, "params");
|
|
|
|
spdk_json_write_named_string(w, "name", bdev->name);
|
2020-08-21 07:31:00 +00:00
|
|
|
spdk_json_write_named_uint32(w, "strip_size_kb", raid_bdev->strip_size_kb);
|
2019-10-09 11:46:46 +00:00
|
|
|
spdk_json_write_named_string(w, "raid_level", raid_bdev_level_to_str(raid_bdev->level));
|
2022-11-17 10:40:49 +00:00
|
|
|
spdk_json_write_named_bool(w, "superblock", raid_bdev->superblock_enabled);
|
2018-09-20 23:33:32 +00:00
|
|
|
|
|
|
|
spdk_json_write_named_array_begin(w, "base_bdevs");
|
2019-11-05 09:32:18 +00:00
|
|
|
RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
|
|
|
|
if (base_info->bdev) {
|
|
|
|
spdk_json_write_string(w, base_info->bdev->name);
|
2018-09-20 23:33:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
spdk_json_write_array_end(w);
|
|
|
|
spdk_json_write_object_end(w);
|
|
|
|
|
|
|
|
spdk_json_write_object_end(w);
|
|
|
|
}
|
|
|
|
|
2021-10-21 07:28:48 +00:00
|
|
|
static int
|
|
|
|
raid_bdev_get_memory_domains(void *ctx, struct spdk_memory_domain **domains, int array_size)
|
|
|
|
{
|
|
|
|
struct raid_bdev *raid_bdev = ctx;
|
|
|
|
struct spdk_bdev *base_bdev;
|
|
|
|
uint32_t i;
|
2022-09-28 10:06:31 +00:00
|
|
|
int domains_count = 0, rc = 0;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&raid_bdev->mutex);
|
2021-10-21 07:28:48 +00:00
|
|
|
|
2023-04-14 09:15:09 +00:00
|
|
|
if (raid_bdev->module->memory_domains_supported == false) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-10-21 07:28:48 +00:00
|
|
|
/* First loop to get the number of memory domains */
|
|
|
|
for (i = 0; i < raid_bdev->num_base_bdevs; i++) {
|
|
|
|
base_bdev = raid_bdev->base_bdev_info[i].bdev;
|
2022-09-28 10:06:31 +00:00
|
|
|
if (base_bdev == NULL) {
|
|
|
|
continue;
|
|
|
|
}
|
2021-10-21 07:28:48 +00:00
|
|
|
rc = spdk_bdev_get_memory_domains(base_bdev, NULL, 0);
|
|
|
|
if (rc < 0) {
|
2022-09-28 10:06:31 +00:00
|
|
|
goto out;
|
2021-10-21 07:28:48 +00:00
|
|
|
}
|
|
|
|
domains_count += rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!domains || array_size < domains_count) {
|
2022-09-28 10:06:31 +00:00
|
|
|
goto out;
|
2021-10-21 07:28:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < raid_bdev->num_base_bdevs; i++) {
|
|
|
|
base_bdev = raid_bdev->base_bdev_info[i].bdev;
|
2022-09-28 10:06:31 +00:00
|
|
|
if (base_bdev == NULL) {
|
|
|
|
continue;
|
|
|
|
}
|
2021-10-21 07:28:48 +00:00
|
|
|
rc = spdk_bdev_get_memory_domains(base_bdev, domains, array_size);
|
|
|
|
if (rc < 0) {
|
2022-09-28 10:06:31 +00:00
|
|
|
goto out;
|
2021-10-21 07:28:48 +00:00
|
|
|
}
|
|
|
|
domains += rc;
|
|
|
|
array_size -= rc;
|
|
|
|
}
|
2022-09-28 10:06:31 +00:00
|
|
|
out:
|
|
|
|
pthread_mutex_unlock(&raid_bdev->mutex);
|
|
|
|
|
|
|
|
if (rc < 0) {
|
|
|
|
return rc;
|
|
|
|
}
|
2021-10-21 07:28:48 +00:00
|
|
|
|
|
|
|
return domains_count;
|
|
|
|
}
|
|
|
|
|
2018-05-08 11:30:29 +00:00
|
|
|
/* g_raid_bdev_fn_table is the function table for raid bdev */
|
|
|
|
static const struct spdk_bdev_fn_table g_raid_bdev_fn_table = {
|
2018-09-20 23:33:32 +00:00
|
|
|
.destruct = raid_bdev_destruct,
|
|
|
|
.submit_request = raid_bdev_submit_request,
|
|
|
|
.io_type_supported = raid_bdev_io_type_supported,
|
|
|
|
.get_io_channel = raid_bdev_get_io_channel,
|
|
|
|
.dump_info_json = raid_bdev_dump_info_json,
|
|
|
|
.write_config_json = raid_bdev_write_config_json,
|
2021-10-21 07:28:48 +00:00
|
|
|
.get_memory_domains = raid_bdev_get_memory_domains,
|
2018-05-08 11:30:29 +00:00
|
|
|
};
|
|
|
|
|
2022-08-26 11:02:27 +00:00
|
|
|
struct raid_bdev *
|
|
|
|
raid_bdev_find_by_name(const char *name)
|
bdev/raid: Change construct_raid_bdev RPC to be consistent to other bdev modules
Change the behavior of construct_raid_bdev RPC to be consistent with
other bdev modules.
Create a config of raid bdev first. Then create a raid bdev based
on the config.
If both succeed, the config and the raid bdev are not removed even if
any failure occur later in construct_raid_bdev RPC. Otherwise, both
are removed and return failure.
During iteration of adding base bdevs in construct_raid_bdev RPC,
- skip any nonexistent bdev and move to the next base bdev.
- if adding any base bdev fails, move to the next bdev.
- if adding base bdevs don't fail but any base bdev doesn't exist,
the raid bdev is in the configuring state and return success.
- if adding base bdev fails, the raid bdev is in the configuring state
and return failure.
- if all adding base bdevs succeed, configure the raid bdev. If configuring
the raid bdev fails, move the raid bdev to the offline state and return
failure. If configuring the raid bdev succeed, return success.
check_and_remove_raid_bdev() becomes unused in raid_bdev_rpc.c but
is still necessary in UT. Hence move this function to UT.
In UT, finding a raid bdev config by name becomes necessary. Hence
factor out the iteration to a function and use the function in UT.
Change-Id: Ifa36967bdc987d97030e3a4e36684cb37b329d4e
Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-on: https://review.gerrithub.io/423622
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Kunal Sablok <kunal.sablok@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
2018-08-31 00:58:25 +00:00
|
|
|
{
|
2022-08-26 11:02:27 +00:00
|
|
|
struct raid_bdev *raid_bdev;
|
2018-07-25 02:12:39 +00:00
|
|
|
|
2022-08-26 11:02:27 +00:00
|
|
|
TAILQ_FOREACH(raid_bdev, &g_raid_bdev_list, global_link) {
|
|
|
|
if (strcmp(raid_bdev->bdev.name, name) == 0) {
|
|
|
|
return raid_bdev;
|
2018-07-25 02:12:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-26 11:02:27 +00:00
|
|
|
return NULL;
|
2018-07-25 02:12:39 +00:00
|
|
|
}
|
2019-10-09 11:46:46 +00:00
|
|
|
|
|
|
|
static struct {
|
|
|
|
const char *name;
|
|
|
|
enum raid_level value;
|
|
|
|
} g_raid_level_names[] = {
|
|
|
|
{ "raid0", RAID0 },
|
|
|
|
{ "0", RAID0 },
|
2022-11-28 14:35:25 +00:00
|
|
|
{ "raid1", RAID1 },
|
|
|
|
{ "1", RAID1 },
|
2022-04-27 09:18:05 +00:00
|
|
|
{ "raid5f", RAID5F },
|
|
|
|
{ "5f", RAID5F },
|
2022-02-01 06:46:20 +00:00
|
|
|
{ "concat", CONCAT },
|
2019-10-09 11:46:46 +00:00
|
|
|
{ }
|
|
|
|
};
|
|
|
|
|
2022-11-03 13:25:30 +00:00
|
|
|
static struct {
|
|
|
|
const char *name;
|
|
|
|
enum raid_bdev_state value;
|
|
|
|
} g_raid_state_names[] = {
|
|
|
|
{ "online", RAID_BDEV_STATE_ONLINE },
|
|
|
|
{ "configuring", RAID_BDEV_STATE_CONFIGURING },
|
|
|
|
{ "offline", RAID_BDEV_STATE_OFFLINE },
|
|
|
|
{ }
|
|
|
|
};
|
|
|
|
|
2022-06-22 21:35:04 +00:00
|
|
|
/* We have to use the typedef in the function declaration to appease astyle. */
|
|
|
|
typedef enum raid_level raid_level_t;
|
2022-11-03 13:25:30 +00:00
|
|
|
typedef enum raid_bdev_state raid_bdev_state_t;
|
2022-06-22 21:35:04 +00:00
|
|
|
|
|
|
|
raid_level_t
|
2022-11-03 13:25:30 +00:00
|
|
|
raid_bdev_str_to_level(const char *str)
|
2019-10-09 11:46:46 +00:00
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
2020-05-14 13:52:08 +00:00
|
|
|
assert(str != NULL);
|
|
|
|
|
2019-10-09 11:46:46 +00:00
|
|
|
for (i = 0; g_raid_level_names[i].name != NULL; i++) {
|
|
|
|
if (strcasecmp(g_raid_level_names[i].name, str) == 0) {
|
|
|
|
return g_raid_level_names[i].value;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return INVALID_RAID_LEVEL;
|
|
|
|
}
|
|
|
|
|
|
|
|
const char *
|
|
|
|
raid_bdev_level_to_str(enum raid_level level)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; g_raid_level_names[i].name != NULL; i++) {
|
|
|
|
if (g_raid_level_names[i].value == level) {
|
|
|
|
return g_raid_level_names[i].name;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return "";
|
|
|
|
}
|
|
|
|
|
2022-11-03 13:25:30 +00:00
|
|
|
raid_bdev_state_t
|
|
|
|
raid_bdev_str_to_state(const char *str)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
assert(str != NULL);
|
|
|
|
|
|
|
|
for (i = 0; g_raid_state_names[i].name != NULL; i++) {
|
|
|
|
if (strcasecmp(g_raid_state_names[i].name, str) == 0) {
|
|
|
|
return g_raid_state_names[i].value;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return RAID_BDEV_STATE_MAX;
|
|
|
|
}
|
|
|
|
|
|
|
|
const char *
|
|
|
|
raid_bdev_state_to_str(enum raid_bdev_state state)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; g_raid_state_names[i].name != NULL; i++) {
|
|
|
|
if (g_raid_state_names[i].value == state) {
|
|
|
|
return g_raid_state_names[i].name;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(false);
|
|
|
|
return "";
|
|
|
|
}
|
|
|
|
|
2018-07-31 20:09:19 +00:00
|
|
|
/*
|
|
|
|
* brief:
|
|
|
|
* raid_bdev_fini_start is called when bdev layer is starting the
|
|
|
|
* shutdown process
|
|
|
|
* params:
|
|
|
|
* none
|
|
|
|
* returns:
|
|
|
|
* none
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
raid_bdev_fini_start(void)
|
|
|
|
{
|
2020-09-04 11:27:29 +00:00
|
|
|
SPDK_DEBUGLOG(bdev_raid, "raid_bdev_fini_start\n");
|
2018-07-31 20:09:19 +00:00
|
|
|
g_shutdown_started = true;
|
|
|
|
}
|
|
|
|
|
2018-05-08 11:30:29 +00:00
|
|
|
/*
|
|
|
|
* brief:
|
|
|
|
* raid_bdev_exit is called on raid bdev module exit time by bdev layer
|
|
|
|
* params:
|
|
|
|
* none
|
|
|
|
* returns:
|
|
|
|
* none
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
raid_bdev_exit(void)
|
|
|
|
{
|
2022-08-26 11:02:27 +00:00
|
|
|
struct raid_bdev *raid_bdev, *tmp;
|
2022-02-03 15:43:25 +00:00
|
|
|
|
2020-09-04 11:27:29 +00:00
|
|
|
SPDK_DEBUGLOG(bdev_raid, "raid_bdev_exit\n");
|
2022-08-26 11:02:27 +00:00
|
|
|
|
|
|
|
TAILQ_FOREACH_SAFE(raid_bdev, &g_raid_bdev_list, global_link, tmp) {
|
|
|
|
raid_bdev_cleanup_and_free(raid_bdev);
|
2022-02-03 15:43:25 +00:00
|
|
|
}
|
2018-05-08 11:30:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* brief:
|
|
|
|
* raid_bdev_get_ctx_size is used to return the context size of bdev_io for raid
|
|
|
|
* module
|
|
|
|
* params:
|
|
|
|
* none
|
|
|
|
* returns:
|
|
|
|
* size of spdk_bdev_io context for raid
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
raid_bdev_get_ctx_size(void)
|
|
|
|
{
|
2020-09-04 11:27:29 +00:00
|
|
|
SPDK_DEBUGLOG(bdev_raid, "raid_bdev_get_ctx_size\n");
|
2018-05-08 11:30:29 +00:00
|
|
|
return sizeof(struct raid_bdev_io);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct spdk_bdev_module g_raid_if = {
|
|
|
|
.name = "raid",
|
|
|
|
.module_init = raid_bdev_init,
|
2018-07-31 20:09:19 +00:00
|
|
|
.fini_start = raid_bdev_fini_start,
|
2018-05-08 11:30:29 +00:00
|
|
|
.module_fini = raid_bdev_exit,
|
|
|
|
.get_ctx_size = raid_bdev_get_ctx_size,
|
|
|
|
.examine_config = raid_bdev_examine,
|
|
|
|
.async_init = false,
|
|
|
|
.async_fini = false,
|
|
|
|
};
|
2019-02-05 10:46:48 +00:00
|
|
|
SPDK_BDEV_MODULE_REGISTER(raid, &g_raid_if)
|
2018-05-08 11:30:29 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* brief:
|
|
|
|
* raid_bdev_init is the initialization function for raid bdev module
|
|
|
|
* params:
|
|
|
|
* none
|
|
|
|
* returns:
|
|
|
|
* 0 - success
|
|
|
|
* non zero - failure
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
raid_bdev_init(void)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-08-03 08:14:52 +00:00
|
|
|
/*
|
|
|
|
* brief:
|
|
|
|
* raid_bdev_create allocates raid bdev based on passed configuration
|
|
|
|
* params:
|
2022-08-26 11:02:27 +00:00
|
|
|
* name - name for raid bdev
|
|
|
|
* strip_size - strip size in KB
|
|
|
|
* num_base_bdevs - number of base bdevs
|
|
|
|
* level - raid level
|
|
|
|
* raid_bdev_out - the created raid bdev
|
2018-08-03 08:14:52 +00:00
|
|
|
* returns:
|
|
|
|
* 0 - success
|
|
|
|
* non zero - failure
|
|
|
|
*/
|
2018-08-31 00:25:37 +00:00
|
|
|
int
|
2022-08-26 11:02:27 +00:00
|
|
|
raid_bdev_create(const char *name, uint32_t strip_size, uint8_t num_base_bdevs,
|
2022-11-17 10:40:49 +00:00
|
|
|
enum raid_level level, struct raid_bdev **raid_bdev_out,
|
|
|
|
const struct spdk_uuid *uuid, bool superblock)
|
2018-08-03 08:14:52 +00:00
|
|
|
{
|
|
|
|
struct raid_bdev *raid_bdev;
|
2018-08-28 06:04:44 +00:00
|
|
|
struct spdk_bdev *raid_bdev_gen;
|
2019-11-07 08:19:19 +00:00
|
|
|
struct raid_bdev_module *module;
|
2022-11-25 12:07:59 +00:00
|
|
|
struct raid_base_bdev_info *base_info;
|
2022-11-28 10:37:12 +00:00
|
|
|
uint8_t min_operational;
|
2022-09-20 12:16:16 +00:00
|
|
|
int rc;
|
2019-11-07 08:19:19 +00:00
|
|
|
|
2022-08-26 11:02:27 +00:00
|
|
|
if (raid_bdev_find_by_name(name) != NULL) {
|
|
|
|
SPDK_ERRLOG("Duplicate raid bdev name found: %s\n", name);
|
|
|
|
return -EEXIST;
|
|
|
|
}
|
|
|
|
|
2022-11-28 14:35:25 +00:00
|
|
|
if (level == RAID1) {
|
|
|
|
if (strip_size != 0) {
|
|
|
|
SPDK_ERRLOG("Strip size is not supported by raid1\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
} else if (spdk_u32_is_pow2(strip_size) == false) {
|
2022-08-26 11:02:27 +00:00
|
|
|
SPDK_ERRLOG("Invalid strip size %" PRIu32 "\n", strip_size);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
module = raid_bdev_module_find(level);
|
2019-11-07 08:19:19 +00:00
|
|
|
if (module == NULL) {
|
2022-08-26 11:02:27 +00:00
|
|
|
SPDK_ERRLOG("Unsupported raid level '%d'\n", level);
|
2019-11-07 08:19:19 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(module->base_bdevs_min != 0);
|
2022-08-26 11:02:27 +00:00
|
|
|
if (num_base_bdevs < module->base_bdevs_min) {
|
2019-11-07 08:19:19 +00:00
|
|
|
SPDK_ERRLOG("At least %u base devices required for %s\n",
|
|
|
|
module->base_bdevs_min,
|
2022-08-26 11:02:27 +00:00
|
|
|
raid_bdev_level_to_str(level));
|
2019-11-07 08:19:19 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2018-08-03 08:14:52 +00:00
|
|
|
|
2022-11-28 10:37:12 +00:00
|
|
|
switch (module->base_bdevs_constraint.type) {
|
|
|
|
case CONSTRAINT_MAX_BASE_BDEVS_REMOVED:
|
|
|
|
min_operational = num_base_bdevs - module->base_bdevs_constraint.value;
|
|
|
|
break;
|
|
|
|
case CONSTRAINT_MIN_BASE_BDEVS_OPERATIONAL:
|
|
|
|
min_operational = module->base_bdevs_constraint.value;
|
|
|
|
break;
|
|
|
|
case CONSTRAINT_UNSET:
|
|
|
|
if (module->base_bdevs_constraint.value != 0) {
|
|
|
|
SPDK_ERRLOG("Unexpected constraint value '%u' provided for raid bdev '%s'.\n",
|
|
|
|
(uint8_t)module->base_bdevs_constraint.value, name);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
min_operational = num_base_bdevs;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
SPDK_ERRLOG("Unrecognised constraint type '%u' in module for raid level '%s'.\n",
|
|
|
|
(uint8_t)module->base_bdevs_constraint.type,
|
|
|
|
raid_bdev_level_to_str(module->level));
|
|
|
|
return -EINVAL;
|
|
|
|
};
|
|
|
|
|
|
|
|
if (min_operational == 0 || min_operational > num_base_bdevs) {
|
|
|
|
SPDK_ERRLOG("Wrong constraint value for raid level '%s'.\n",
|
|
|
|
raid_bdev_level_to_str(module->level));
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2018-08-03 08:14:52 +00:00
|
|
|
raid_bdev = calloc(1, sizeof(*raid_bdev));
|
|
|
|
if (!raid_bdev) {
|
|
|
|
SPDK_ERRLOG("Unable to allocate memory for raid bdev\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2019-11-07 08:19:19 +00:00
|
|
|
raid_bdev->module = module;
|
2022-08-26 11:02:27 +00:00
|
|
|
raid_bdev->num_base_bdevs = num_base_bdevs;
|
2018-08-03 08:14:52 +00:00
|
|
|
raid_bdev->base_bdev_info = calloc(raid_bdev->num_base_bdevs,
|
|
|
|
sizeof(struct raid_base_bdev_info));
|
|
|
|
if (!raid_bdev->base_bdev_info) {
|
|
|
|
SPDK_ERRLOG("Unable able to allocate base bdev info\n");
|
|
|
|
free(raid_bdev);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2022-11-25 12:07:59 +00:00
|
|
|
RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
|
|
|
|
base_info->raid_bdev = raid_bdev;
|
|
|
|
}
|
|
|
|
|
2018-12-19 18:04:18 +00:00
|
|
|
/* strip_size_kb is from the rpc param. strip_size is in blocks and used
|
2020-01-23 18:10:42 +00:00
|
|
|
* internally and set later.
|
2018-12-19 18:04:18 +00:00
|
|
|
*/
|
|
|
|
raid_bdev->strip_size = 0;
|
2022-08-26 11:02:27 +00:00
|
|
|
raid_bdev->strip_size_kb = strip_size;
|
2018-08-03 08:14:52 +00:00
|
|
|
raid_bdev->state = RAID_BDEV_STATE_CONFIGURING;
|
2022-08-26 11:02:27 +00:00
|
|
|
raid_bdev->level = level;
|
2022-11-28 10:37:12 +00:00
|
|
|
raid_bdev->min_base_bdevs_operational = min_operational;
|
2022-11-17 10:40:49 +00:00
|
|
|
raid_bdev->superblock_enabled = superblock;
|
2022-09-20 12:16:16 +00:00
|
|
|
TAILQ_INIT(&raid_bdev->suspend_ctx);
|
|
|
|
rc = pthread_mutex_init(&raid_bdev->mutex, NULL);
|
|
|
|
if (rc) {
|
|
|
|
SPDK_ERRLOG("Cannot init mutex for raid bdev\n");
|
|
|
|
free(raid_bdev->base_bdev_info);
|
|
|
|
free(raid_bdev);
|
|
|
|
return rc;
|
|
|
|
}
|
2019-09-05 22:43:26 +00:00
|
|
|
|
2018-08-28 06:04:44 +00:00
|
|
|
raid_bdev_gen = &raid_bdev->bdev;
|
|
|
|
|
2022-08-26 11:02:27 +00:00
|
|
|
raid_bdev_gen->name = strdup(name);
|
2018-08-28 06:04:44 +00:00
|
|
|
if (!raid_bdev_gen->name) {
|
|
|
|
SPDK_ERRLOG("Unable to allocate name for raid\n");
|
2022-09-20 12:16:16 +00:00
|
|
|
pthread_mutex_destroy(&raid_bdev->mutex);
|
2018-08-28 06:04:44 +00:00
|
|
|
free(raid_bdev->base_bdev_info);
|
|
|
|
free(raid_bdev);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2019-04-26 22:30:49 +00:00
|
|
|
raid_bdev_gen->product_name = "Raid Volume";
|
2018-08-28 06:04:44 +00:00
|
|
|
raid_bdev_gen->ctxt = raid_bdev;
|
|
|
|
raid_bdev_gen->fn_table = &g_raid_bdev_fn_table;
|
|
|
|
raid_bdev_gen->module = &g_raid_if;
|
2018-10-23 02:30:12 +00:00
|
|
|
raid_bdev_gen->write_cache = 0;
|
2018-08-28 06:04:44 +00:00
|
|
|
|
2023-03-10 09:05:34 +00:00
|
|
|
if (uuid) {
|
|
|
|
spdk_uuid_copy(&raid_bdev_gen->uuid, uuid);
|
|
|
|
}
|
|
|
|
|
2019-04-08 06:57:04 +00:00
|
|
|
TAILQ_INSERT_TAIL(&g_raid_bdev_list, raid_bdev, global_link);
|
2018-08-03 08:14:52 +00:00
|
|
|
|
2022-08-26 11:02:27 +00:00
|
|
|
*raid_bdev_out = raid_bdev;
|
2018-08-03 06:54:18 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-10-19 13:02:47 +00:00
|
|
|
/*
|
|
|
|
* brief:
|
|
|
|
* Check underlying block devices against support for metadata. Do not configure
|
|
|
|
* md support when parameters from block devices are inconsistent.
|
|
|
|
* params:
|
|
|
|
* raid_bdev - pointer to raid bdev
|
|
|
|
* returns:
|
|
|
|
* 0 - The raid bdev md parameters were successfully configured.
|
|
|
|
* non zero - Failed to configure md.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
raid_bdev_configure_md(struct raid_bdev *raid_bdev)
|
|
|
|
{
|
|
|
|
struct spdk_bdev *base_bdev;
|
|
|
|
uint8_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < raid_bdev->num_base_bdevs; i++) {
|
|
|
|
base_bdev = raid_bdev->base_bdev_info[i].bdev;
|
|
|
|
|
|
|
|
if (i == 0) {
|
|
|
|
raid_bdev->bdev.md_len = spdk_bdev_get_md_size(base_bdev);
|
|
|
|
raid_bdev->bdev.md_interleave = spdk_bdev_is_md_interleaved(base_bdev);
|
|
|
|
raid_bdev->bdev.dif_type = spdk_bdev_get_dif_type(base_bdev);
|
|
|
|
raid_bdev->bdev.dif_is_head_of_md = spdk_bdev_is_dif_head_of_md(base_bdev);
|
|
|
|
raid_bdev->bdev.dif_check_flags = base_bdev->dif_check_flags;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (raid_bdev->bdev.md_len != spdk_bdev_get_md_size(base_bdev) ||
|
|
|
|
raid_bdev->bdev.md_interleave != spdk_bdev_is_md_interleaved(base_bdev) ||
|
|
|
|
raid_bdev->bdev.dif_type != spdk_bdev_get_dif_type(base_bdev) ||
|
|
|
|
raid_bdev->bdev.dif_is_head_of_md != spdk_bdev_is_dif_head_of_md(base_bdev) ||
|
|
|
|
raid_bdev->bdev.dif_check_flags != base_bdev->dif_check_flags) {
|
|
|
|
SPDK_ERRLOG("base bdevs are configured with different metadata formats\n");
|
|
|
|
return -EPERM;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-08-03 08:17:46 +00:00
|
|
|
/*
|
|
|
|
* brief:
|
|
|
|
* If raid bdev config is complete, then only register the raid bdev to
|
|
|
|
* bdev layer and remove this raid bdev from configuring list and
|
|
|
|
* insert the raid bdev to configured list
|
|
|
|
* params:
|
|
|
|
* raid_bdev - pointer to raid bdev
|
|
|
|
* returns:
|
|
|
|
* 0 - success
|
|
|
|
* non zero - failure
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
raid_bdev_configure(struct raid_bdev *raid_bdev)
|
|
|
|
{
|
2019-11-05 09:32:18 +00:00
|
|
|
uint32_t blocklen = 0;
|
|
|
|
struct spdk_bdev *raid_bdev_gen;
|
|
|
|
struct raid_base_bdev_info *base_info;
|
2018-08-28 00:56:22 +00:00
|
|
|
int rc = 0;
|
2018-08-03 08:17:46 +00:00
|
|
|
|
2019-11-12 12:55:03 +00:00
|
|
|
assert(raid_bdev->state == RAID_BDEV_STATE_CONFIGURING);
|
|
|
|
assert(raid_bdev->num_base_bdevs_discovered == raid_bdev->num_base_bdevs);
|
|
|
|
|
2019-11-05 09:32:18 +00:00
|
|
|
RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
|
2022-08-26 11:02:27 +00:00
|
|
|
assert(base_info->bdev != NULL);
|
2018-08-03 08:17:46 +00:00
|
|
|
/* Check blocklen for all base bdevs that it should be same */
|
2019-11-05 09:32:18 +00:00
|
|
|
if (blocklen == 0) {
|
|
|
|
blocklen = base_info->bdev->blocklen;
|
|
|
|
} else if (blocklen != base_info->bdev->blocklen) {
|
2018-08-03 08:17:46 +00:00
|
|
|
/*
|
|
|
|
* Assumption is that all the base bdevs for any raid bdev should
|
|
|
|
* have same blocklen
|
|
|
|
*/
|
|
|
|
SPDK_ERRLOG("Blocklen of various bdevs not matching\n");
|
2018-10-08 04:04:34 +00:00
|
|
|
return -EINVAL;
|
2018-08-03 08:17:46 +00:00
|
|
|
}
|
|
|
|
}
|
2020-02-13 12:38:30 +00:00
|
|
|
assert(blocklen > 0);
|
2018-08-03 08:17:46 +00:00
|
|
|
|
2018-12-19 18:04:18 +00:00
|
|
|
/* The strip_size_kb is read in from user in KB. Convert to blocks here for
|
|
|
|
* internal use.
|
|
|
|
*/
|
|
|
|
raid_bdev->strip_size = (raid_bdev->strip_size_kb * 1024) / blocklen;
|
2022-09-28 09:23:38 +00:00
|
|
|
if (raid_bdev->strip_size == 0 && raid_bdev->level != RAID1) {
|
|
|
|
SPDK_ERRLOG("Strip size cannot be smaller than the device block size\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2018-08-03 08:17:46 +00:00
|
|
|
raid_bdev->strip_size_shift = spdk_u32log2(raid_bdev->strip_size);
|
|
|
|
raid_bdev->blocklen_shift = spdk_u32log2(blocklen);
|
2018-10-23 02:30:12 +00:00
|
|
|
|
|
|
|
raid_bdev_gen = &raid_bdev->bdev;
|
|
|
|
raid_bdev_gen->blocklen = blocklen;
|
2019-11-12 12:55:03 +00:00
|
|
|
|
2022-10-19 13:02:47 +00:00
|
|
|
rc = raid_bdev_configure_md(raid_bdev);
|
|
|
|
if (rc != 0) {
|
|
|
|
SPDK_ERRLOG("raid metadata configuration failed\n");
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2019-10-29 13:31:51 +00:00
|
|
|
rc = raid_bdev->module->start(raid_bdev);
|
|
|
|
if (rc != 0) {
|
|
|
|
SPDK_ERRLOG("raid module startup callback failed\n");
|
|
|
|
return rc;
|
|
|
|
}
|
2019-11-12 12:55:03 +00:00
|
|
|
raid_bdev->state = RAID_BDEV_STATE_ONLINE;
|
2020-09-04 11:27:29 +00:00
|
|
|
SPDK_DEBUGLOG(bdev_raid, "io device register %p\n", raid_bdev);
|
2020-11-17 16:29:21 +00:00
|
|
|
SPDK_DEBUGLOG(bdev_raid, "blockcnt %" PRIu64 ", blocklen %u\n",
|
2019-10-29 13:31:51 +00:00
|
|
|
raid_bdev_gen->blockcnt, raid_bdev_gen->blocklen);
|
2019-11-12 12:55:03 +00:00
|
|
|
spdk_io_device_register(raid_bdev, raid_bdev_create_cb, raid_bdev_destroy_cb,
|
|
|
|
sizeof(struct raid_bdev_io_channel),
|
|
|
|
raid_bdev->bdev.name);
|
|
|
|
rc = spdk_bdev_register(raid_bdev_gen);
|
|
|
|
if (rc != 0) {
|
|
|
|
SPDK_ERRLOG("Unable to register raid bdev and stay at configuring state\n");
|
2019-10-29 13:31:51 +00:00
|
|
|
if (raid_bdev->module->stop != NULL) {
|
|
|
|
raid_bdev->module->stop(raid_bdev);
|
|
|
|
}
|
2019-11-12 12:55:03 +00:00
|
|
|
spdk_io_device_unregister(raid_bdev, NULL);
|
|
|
|
raid_bdev->state = RAID_BDEV_STATE_CONFIGURING;
|
|
|
|
return rc;
|
2018-08-03 08:17:46 +00:00
|
|
|
}
|
2020-09-04 11:27:29 +00:00
|
|
|
SPDK_DEBUGLOG(bdev_raid, "raid bdev generic %p\n", raid_bdev_gen);
|
|
|
|
SPDK_DEBUGLOG(bdev_raid, "raid bdev is created with name %s, raid_bdev %p\n",
|
2019-11-12 12:55:03 +00:00
|
|
|
raid_bdev_gen->name, raid_bdev);
|
2018-08-03 08:17:46 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* brief:
|
|
|
|
* If raid bdev is online and registered, change the bdev state to
|
|
|
|
* configuring and unregister this raid device. Queue this raid device
|
|
|
|
* in configuring list
|
|
|
|
* params:
|
|
|
|
* raid_bdev - pointer to raid bdev
|
2019-04-09 00:40:54 +00:00
|
|
|
* cb_fn - callback function
|
|
|
|
* cb_arg - argument to callback function
|
2018-08-03 08:17:46 +00:00
|
|
|
* returns:
|
|
|
|
* none
|
|
|
|
*/
|
|
|
|
static void
|
2019-04-09 00:40:54 +00:00
|
|
|
raid_bdev_deconfigure(struct raid_bdev *raid_bdev, raid_bdev_destruct_cb cb_fn,
|
|
|
|
void *cb_arg)
|
2018-08-03 08:17:46 +00:00
|
|
|
{
|
|
|
|
if (raid_bdev->state != RAID_BDEV_STATE_ONLINE) {
|
2019-04-09 00:40:54 +00:00
|
|
|
if (cb_fn) {
|
|
|
|
cb_fn(cb_arg, 0);
|
|
|
|
}
|
2018-08-03 08:17:46 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
raid_bdev->state = RAID_BDEV_STATE_OFFLINE;
|
|
|
|
assert(raid_bdev->num_base_bdevs_discovered);
|
2021-11-25 01:40:58 +00:00
|
|
|
SPDK_DEBUGLOG(bdev_raid, "raid bdev state changing from online to offline\n");
|
2018-08-03 08:17:46 +00:00
|
|
|
|
2019-04-09 00:40:54 +00:00
|
|
|
spdk_bdev_unregister(&raid_bdev->bdev, cb_fn, cb_arg);
|
2018-08-03 08:17:46 +00:00
|
|
|
}
|
|
|
|
|
2019-04-08 23:03:02 +00:00
|
|
|
/*
|
|
|
|
* brief:
|
2022-11-25 12:07:59 +00:00
|
|
|
* raid_bdev_find_base_info_by_bdev function finds the base bdev info by bdev.
|
2019-04-08 23:03:02 +00:00
|
|
|
* params:
|
2022-11-25 12:07:59 +00:00
|
|
|
* base_bdev - pointer to base bdev
|
2019-04-08 23:03:02 +00:00
|
|
|
* returns:
|
2022-11-25 12:07:59 +00:00
|
|
|
* base bdev info if found, otherwise NULL.
|
2019-04-08 23:03:02 +00:00
|
|
|
*/
|
2022-11-25 12:07:59 +00:00
|
|
|
static struct raid_base_bdev_info *
|
|
|
|
raid_bdev_find_base_info_by_bdev(struct spdk_bdev *base_bdev)
|
2019-04-08 23:03:02 +00:00
|
|
|
{
|
2019-11-05 09:32:18 +00:00
|
|
|
struct raid_bdev *raid_bdev;
|
|
|
|
struct raid_base_bdev_info *base_info;
|
2019-04-08 23:03:02 +00:00
|
|
|
|
|
|
|
TAILQ_FOREACH(raid_bdev, &g_raid_bdev_list, global_link) {
|
2019-11-05 09:32:18 +00:00
|
|
|
RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
|
|
|
|
if (base_info->bdev == base_bdev) {
|
2022-11-25 12:07:59 +00:00
|
|
|
return base_info;
|
2019-04-08 23:03:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-25 12:07:59 +00:00
|
|
|
return NULL;
|
2019-04-08 23:03:02 +00:00
|
|
|
}
|
|
|
|
|
2022-09-20 12:16:16 +00:00
|
|
|
typedef void (*raid_bdev_suspended_cb)(struct raid_bdev *raid_bdev, void *ctx);
|
|
|
|
|
|
|
|
struct raid_bdev_suspend_ctx {
|
|
|
|
raid_bdev_suspended_cb suspended_cb;
|
|
|
|
void *suspended_cb_ctx;
|
|
|
|
TAILQ_ENTRY(raid_bdev_suspend_ctx) link;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
raid_bdev_on_suspended(struct raid_bdev *raid_bdev)
|
|
|
|
{
|
|
|
|
struct raid_bdev_suspend_ctx *ctx;
|
|
|
|
|
|
|
|
while ((ctx = TAILQ_FIRST(&raid_bdev->suspend_ctx))) {
|
|
|
|
TAILQ_REMOVE(&raid_bdev->suspend_ctx, ctx, link);
|
|
|
|
ctx->suspended_cb(raid_bdev, ctx->suspended_cb_ctx);
|
|
|
|
free(ctx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
raid_bdev_inc_suspend_num_channels(void *_raid_bdev)
|
|
|
|
{
|
|
|
|
struct raid_bdev *raid_bdev = _raid_bdev;
|
|
|
|
|
|
|
|
raid_bdev->suspend_num_channels++;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
raid_bdev_dec_suspend_num_channels(void *_raid_bdev)
|
|
|
|
{
|
|
|
|
struct raid_bdev *raid_bdev = _raid_bdev;
|
|
|
|
|
|
|
|
if (--raid_bdev->suspend_num_channels == 0) {
|
|
|
|
raid_bdev_on_suspended(raid_bdev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
raid_bdev_channel_on_suspended(struct raid_bdev_io_channel *raid_ch)
|
|
|
|
{
|
|
|
|
struct spdk_io_channel *ch = spdk_io_channel_from_ctx(raid_ch);
|
|
|
|
struct raid_bdev *raid_bdev = spdk_io_channel_get_io_device(ch);
|
|
|
|
|
|
|
|
spdk_thread_exec_msg(spdk_thread_get_app_thread(), raid_bdev_dec_suspend_num_channels, raid_bdev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
raid_bdev_channel_suspend(struct spdk_io_channel_iter *i)
|
|
|
|
{
|
|
|
|
struct raid_bdev *raid_bdev = spdk_io_channel_iter_get_ctx(i);
|
|
|
|
struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
|
|
|
|
struct raid_bdev_io_channel *raid_ch = spdk_io_channel_get_ctx(ch);
|
|
|
|
|
|
|
|
SPDK_DEBUGLOG(bdev_raid, "raid_ch: %p\n", raid_ch);
|
|
|
|
|
|
|
|
spdk_thread_exec_msg(spdk_thread_get_app_thread(), raid_bdev_inc_suspend_num_channels, raid_bdev);
|
|
|
|
|
|
|
|
raid_ch->is_suspended = true;
|
|
|
|
if (raid_ch->num_ios == 0) {
|
|
|
|
raid_bdev_channel_on_suspended(raid_ch);
|
|
|
|
}
|
|
|
|
|
|
|
|
spdk_for_each_channel_continue(i, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
raid_bdev_suspend_continue(struct spdk_io_channel_iter *i, int status)
|
|
|
|
{
|
|
|
|
struct raid_bdev *raid_bdev = spdk_io_channel_iter_get_ctx(i);
|
|
|
|
|
|
|
|
raid_bdev_dec_suspend_num_channels(raid_bdev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
raid_bdev_suspend(struct raid_bdev *raid_bdev, raid_bdev_suspended_cb cb, void *cb_ctx)
|
|
|
|
{
|
|
|
|
assert(spdk_get_thread() == spdk_thread_get_app_thread());
|
|
|
|
|
|
|
|
pthread_mutex_lock(&raid_bdev->mutex);
|
|
|
|
raid_bdev->suspend_cnt++;
|
|
|
|
pthread_mutex_unlock(&raid_bdev->mutex);
|
|
|
|
|
|
|
|
if (raid_bdev->suspend_cnt > 1 && raid_bdev->suspend_num_channels == 0) {
|
|
|
|
if (cb != NULL) {
|
|
|
|
cb(raid_bdev, cb_ctx);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cb != NULL) {
|
|
|
|
struct raid_bdev_suspend_ctx *ctx;
|
|
|
|
|
|
|
|
ctx = malloc(sizeof(*ctx));
|
|
|
|
if (ctx == NULL) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
ctx->suspended_cb = cb;
|
|
|
|
ctx->suspended_cb_ctx = cb_ctx;
|
|
|
|
TAILQ_INSERT_TAIL(&raid_bdev->suspend_ctx, ctx, link);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* decremented in raid_bdev_suspend_continue() - in case there are no IO channels */
|
|
|
|
raid_bdev_inc_suspend_num_channels(raid_bdev);
|
|
|
|
|
|
|
|
spdk_for_each_channel(raid_bdev, raid_bdev_channel_suspend, raid_bdev,
|
|
|
|
raid_bdev_suspend_continue);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
raid_bdev_channel_resume(struct spdk_io_channel_iter *i)
|
|
|
|
{
|
|
|
|
struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
|
|
|
|
struct raid_bdev_io_channel *raid_ch = spdk_io_channel_get_ctx(ch);
|
|
|
|
struct raid_bdev_io *raid_io;
|
|
|
|
|
|
|
|
SPDK_DEBUGLOG(bdev_raid, "raid_ch: %p\n", raid_ch);
|
|
|
|
|
|
|
|
raid_ch->is_suspended = false;
|
|
|
|
|
|
|
|
while ((raid_io = TAILQ_FIRST(&raid_ch->suspended_ios))) {
|
|
|
|
TAILQ_REMOVE(&raid_ch->suspended_ios, raid_io, link);
|
|
|
|
raid_bdev_submit_request(spdk_io_channel_from_ctx(raid_ch),
|
|
|
|
spdk_bdev_io_from_ctx(raid_io));
|
|
|
|
}
|
|
|
|
|
|
|
|
spdk_for_each_channel_continue(i, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
raid_bdev_resume(struct raid_bdev *raid_bdev)
|
|
|
|
{
|
|
|
|
assert(spdk_get_thread() == spdk_thread_get_app_thread());
|
|
|
|
assert(raid_bdev->suspend_cnt > 0);
|
|
|
|
|
|
|
|
pthread_mutex_lock(&raid_bdev->mutex);
|
|
|
|
raid_bdev->suspend_cnt--;
|
|
|
|
pthread_mutex_unlock(&raid_bdev->mutex);
|
|
|
|
|
|
|
|
if (raid_bdev->suspend_cnt == 0) {
|
|
|
|
spdk_for_each_channel(raid_bdev, raid_bdev_channel_resume, raid_bdev, NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-28 10:06:31 +00:00
|
|
|
static void
|
|
|
|
raid_bdev_channel_remove_base_bdev(struct spdk_io_channel_iter *i)
|
|
|
|
{
|
|
|
|
struct raid_base_bdev_info *base_info = spdk_io_channel_iter_get_ctx(i);
|
|
|
|
struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
|
|
|
|
struct raid_bdev_io_channel *raid_ch = spdk_io_channel_get_ctx(ch);
|
|
|
|
uint8_t idx = base_info - base_info->raid_bdev->base_bdev_info;
|
|
|
|
|
|
|
|
SPDK_DEBUGLOG(bdev_raid, "slot: %u raid_ch: %p\n", idx, raid_ch);
|
|
|
|
|
|
|
|
if (raid_ch->base_channel[idx] != NULL) {
|
|
|
|
spdk_put_io_channel(raid_ch->base_channel[idx]);
|
|
|
|
raid_ch->base_channel[idx] = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
spdk_for_each_channel_continue(i, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
raid_bdev_remove_base_bdev_done(struct spdk_io_channel_iter *i, int status)
|
|
|
|
{
|
|
|
|
struct raid_base_bdev_info *base_info = spdk_io_channel_iter_get_ctx(i);
|
|
|
|
|
|
|
|
raid_bdev_resume(base_info->raid_bdev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
raid_bdev_remove_base_bdev_on_suspended(struct raid_bdev *raid_bdev, void *ctx)
|
|
|
|
{
|
|
|
|
struct raid_base_bdev_info *base_info = ctx;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&raid_bdev->mutex);
|
|
|
|
base_info->remove_scheduled = false;
|
|
|
|
raid_bdev_free_base_bdev_resource(base_info);
|
|
|
|
pthread_mutex_unlock(&raid_bdev->mutex);
|
|
|
|
|
|
|
|
spdk_for_each_channel(raid_bdev, raid_bdev_channel_remove_base_bdev, base_info,
|
|
|
|
raid_bdev_remove_base_bdev_done);
|
|
|
|
}
|
|
|
|
|
2018-05-08 11:30:29 +00:00
|
|
|
/*
|
|
|
|
* brief:
|
|
|
|
* raid_bdev_remove_base_bdev function is called by below layers when base_bdev
|
|
|
|
* is removed. This function checks if this base bdev is part of any raid bdev
|
|
|
|
* or not. If yes, it takes necessary action on that particular raid bdev.
|
|
|
|
* params:
|
2023-01-11 07:41:24 +00:00
|
|
|
* base_bdev - pointer to base bdev which got removed
|
2018-05-08 11:30:29 +00:00
|
|
|
* returns:
|
2022-09-28 10:06:31 +00:00
|
|
|
* 0 - success
|
|
|
|
* non zero - failure
|
2018-05-08 11:30:29 +00:00
|
|
|
*/
|
2022-10-14 13:10:53 +00:00
|
|
|
int
|
2020-10-11 09:34:21 +00:00
|
|
|
raid_bdev_remove_base_bdev(struct spdk_bdev *base_bdev)
|
2018-05-08 11:30:29 +00:00
|
|
|
{
|
2022-11-25 12:07:59 +00:00
|
|
|
struct raid_bdev *raid_bdev;
|
2019-11-05 09:32:18 +00:00
|
|
|
struct raid_base_bdev_info *base_info;
|
2018-05-08 11:30:29 +00:00
|
|
|
|
2022-09-28 10:06:31 +00:00
|
|
|
SPDK_DEBUGLOG(bdev_raid, "%s\n", base_bdev->name);
|
2018-05-08 11:30:29 +00:00
|
|
|
|
|
|
|
/* Find the raid_bdev which has claimed this base_bdev */
|
2022-11-25 12:07:59 +00:00
|
|
|
base_info = raid_bdev_find_base_info_by_bdev(base_bdev);
|
|
|
|
if (!base_info) {
|
2019-04-08 23:03:02 +00:00
|
|
|
SPDK_ERRLOG("bdev to remove '%s' not found\n", base_bdev->name);
|
2022-09-28 10:06:31 +00:00
|
|
|
return -ENODEV;
|
2019-04-08 23:03:02 +00:00
|
|
|
}
|
2022-11-25 12:07:59 +00:00
|
|
|
raid_bdev = base_info->raid_bdev;
|
2018-05-08 11:30:29 +00:00
|
|
|
|
2022-09-13 10:34:53 +00:00
|
|
|
assert(spdk_get_thread() == spdk_thread_get_app_thread());
|
|
|
|
|
2022-09-28 10:06:31 +00:00
|
|
|
if (base_info->remove_scheduled) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-11-05 09:32:18 +00:00
|
|
|
assert(base_info->desc);
|
|
|
|
base_info->remove_scheduled = true;
|
2018-05-08 11:30:29 +00:00
|
|
|
|
2022-11-15 12:52:48 +00:00
|
|
|
if (raid_bdev->state != RAID_BDEV_STATE_ONLINE) {
|
2019-04-08 23:03:02 +00:00
|
|
|
/*
|
|
|
|
* As raid bdev is not registered yet or already unregistered,
|
|
|
|
* so cleanup should be done here itself.
|
|
|
|
*/
|
2022-11-25 12:07:59 +00:00
|
|
|
raid_bdev_free_base_bdev_resource(base_info);
|
2019-04-08 23:03:02 +00:00
|
|
|
if (raid_bdev->num_base_bdevs_discovered == 0) {
|
|
|
|
/* There is no base bdev for this raid, so free the raid device. */
|
2022-02-03 15:43:25 +00:00
|
|
|
raid_bdev_cleanup_and_free(raid_bdev);
|
2018-05-08 11:30:29 +00:00
|
|
|
}
|
2022-09-28 10:06:31 +00:00
|
|
|
} else if (raid_bdev->num_base_bdevs_discovered == raid_bdev->min_base_bdevs_operational) {
|
|
|
|
raid_bdev_deconfigure(raid_bdev, NULL, NULL);
|
|
|
|
} else {
|
|
|
|
return raid_bdev_suspend(raid_bdev, raid_bdev_remove_base_bdev_on_suspended, base_info);
|
2018-05-08 11:30:29 +00:00
|
|
|
}
|
|
|
|
|
2022-09-28 10:06:31 +00:00
|
|
|
return 0;
|
2018-05-08 11:30:29 +00:00
|
|
|
}
|
|
|
|
|
2023-01-11 07:41:24 +00:00
|
|
|
/*
|
|
|
|
* brief:
|
|
|
|
* raid_bdev_resize_base_bdev function is called by below layers when base_bdev
|
|
|
|
* is resized. This function checks if the smallest size of the base_bdevs is changed.
|
|
|
|
* If yes, call module handler to resize the raid_bdev if implemented.
|
|
|
|
* params:
|
|
|
|
* base_bdev - pointer to base bdev which got resized.
|
|
|
|
* returns:
|
|
|
|
* none
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
raid_bdev_resize_base_bdev(struct spdk_bdev *base_bdev)
|
|
|
|
{
|
2022-11-25 12:07:59 +00:00
|
|
|
struct raid_bdev *raid_bdev;
|
2023-01-11 07:41:24 +00:00
|
|
|
struct raid_base_bdev_info *base_info;
|
|
|
|
|
|
|
|
SPDK_DEBUGLOG(bdev_raid, "raid_bdev_resize_base_bdev\n");
|
|
|
|
|
2022-11-25 12:07:59 +00:00
|
|
|
base_info = raid_bdev_find_base_info_by_bdev(base_bdev);
|
|
|
|
|
2023-01-11 07:41:24 +00:00
|
|
|
/* Find the raid_bdev which has claimed this base_bdev */
|
2022-11-25 12:07:59 +00:00
|
|
|
if (!base_info) {
|
2023-01-11 07:41:24 +00:00
|
|
|
SPDK_ERRLOG("raid_bdev whose base_bdev '%s' not found\n", base_bdev->name);
|
|
|
|
return;
|
|
|
|
}
|
2022-11-25 12:07:59 +00:00
|
|
|
raid_bdev = base_info->raid_bdev;
|
2023-01-11 07:41:24 +00:00
|
|
|
|
|
|
|
assert(spdk_get_thread() == spdk_thread_get_app_thread());
|
|
|
|
|
|
|
|
SPDK_NOTICELOG("base_bdev '%s' was resized: old size %" PRIu64 ", new size %" PRIu64 "\n",
|
|
|
|
base_bdev->name, base_info->blockcnt, base_bdev->blockcnt);
|
|
|
|
|
|
|
|
if (raid_bdev->module->resize) {
|
|
|
|
raid_bdev->module->resize(raid_bdev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-11 09:34:21 +00:00
|
|
|
/*
|
|
|
|
* brief:
|
|
|
|
* raid_bdev_event_base_bdev function is called by below layers when base_bdev
|
|
|
|
* triggers asynchronous event.
|
|
|
|
* params:
|
|
|
|
* type - event details.
|
|
|
|
* bdev - bdev that triggered event.
|
|
|
|
* event_ctx - context for event.
|
|
|
|
* returns:
|
|
|
|
* none
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
raid_bdev_event_base_bdev(enum spdk_bdev_event_type type, struct spdk_bdev *bdev,
|
|
|
|
void *event_ctx)
|
|
|
|
{
|
2022-09-28 10:06:31 +00:00
|
|
|
int rc;
|
|
|
|
|
2020-10-11 09:34:21 +00:00
|
|
|
switch (type) {
|
|
|
|
case SPDK_BDEV_EVENT_REMOVE:
|
2022-09-28 10:06:31 +00:00
|
|
|
rc = raid_bdev_remove_base_bdev(bdev);
|
|
|
|
if (rc != 0) {
|
|
|
|
SPDK_ERRLOG("Failed to remove base bdev %s: %s\n",
|
|
|
|
spdk_bdev_get_name(bdev), spdk_strerror(-rc));
|
|
|
|
}
|
2020-10-11 09:34:21 +00:00
|
|
|
break;
|
2023-01-11 07:41:24 +00:00
|
|
|
case SPDK_BDEV_EVENT_RESIZE:
|
|
|
|
raid_bdev_resize_base_bdev(bdev);
|
|
|
|
break;
|
2020-10-11 09:34:21 +00:00
|
|
|
default:
|
|
|
|
SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-08 23:19:41 +00:00
|
|
|
/*
|
|
|
|
* brief:
|
2022-08-26 11:02:27 +00:00
|
|
|
* Deletes the specified raid bdev
|
2019-04-08 23:19:41 +00:00
|
|
|
* params:
|
2022-08-26 11:02:27 +00:00
|
|
|
* raid_bdev - pointer to raid bdev
|
2019-04-09 00:40:54 +00:00
|
|
|
* cb_fn - callback function
|
2022-08-26 11:02:27 +00:00
|
|
|
* cb_arg - argument to callback function
|
2019-04-08 23:19:41 +00:00
|
|
|
*/
|
|
|
|
void
|
2022-08-26 11:02:27 +00:00
|
|
|
raid_bdev_delete(struct raid_bdev *raid_bdev, raid_bdev_destruct_cb cb_fn, void *cb_arg)
|
2019-04-08 23:19:41 +00:00
|
|
|
{
|
2022-08-26 11:02:27 +00:00
|
|
|
struct raid_base_bdev_info *base_info;
|
2019-04-08 23:19:41 +00:00
|
|
|
|
2022-08-26 11:02:27 +00:00
|
|
|
SPDK_DEBUGLOG(bdev_raid, "delete raid bdev: %s\n", raid_bdev->bdev.name);
|
2019-04-09 00:09:12 +00:00
|
|
|
|
2019-04-11 02:56:30 +00:00
|
|
|
if (raid_bdev->destroy_started) {
|
2020-09-04 11:27:29 +00:00
|
|
|
SPDK_DEBUGLOG(bdev_raid, "destroying raid bdev %s is already started\n",
|
2022-08-26 11:02:27 +00:00
|
|
|
raid_bdev->bdev.name);
|
2019-04-11 02:56:30 +00:00
|
|
|
if (cb_fn) {
|
|
|
|
cb_fn(cb_arg, -EALREADY);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
raid_bdev->destroy_started = true;
|
|
|
|
|
2019-11-05 09:32:18 +00:00
|
|
|
RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
|
|
|
|
base_info->remove_scheduled = true;
|
2019-04-09 00:09:12 +00:00
|
|
|
|
2022-11-15 12:52:48 +00:00
|
|
|
if (raid_bdev->state != RAID_BDEV_STATE_ONLINE) {
|
2019-04-09 00:09:12 +00:00
|
|
|
/*
|
|
|
|
* As raid bdev is not registered yet or already unregistered,
|
|
|
|
* so cleanup should be done here itself.
|
|
|
|
*/
|
2022-11-25 12:07:59 +00:00
|
|
|
raid_bdev_free_base_bdev_resource(base_info);
|
2019-04-09 00:09:12 +00:00
|
|
|
}
|
2019-04-08 23:19:41 +00:00
|
|
|
}
|
2019-04-09 00:16:47 +00:00
|
|
|
|
2022-06-17 03:00:59 +00:00
|
|
|
if (raid_bdev->num_base_bdevs_discovered == 0) {
|
|
|
|
/* There is no base bdev for this raid, so free the raid device. */
|
2022-02-03 15:43:25 +00:00
|
|
|
raid_bdev_cleanup_and_free(raid_bdev);
|
2022-06-17 03:00:59 +00:00
|
|
|
if (cb_fn) {
|
|
|
|
cb_fn(cb_arg, 0);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
raid_bdev_deconfigure(raid_bdev, cb_fn, cb_arg);
|
|
|
|
}
|
2019-04-08 23:19:41 +00:00
|
|
|
}
|
|
|
|
|
2018-08-31 06:27:29 +00:00
|
|
|
static int
|
2022-11-25 12:07:59 +00:00
|
|
|
raid_bdev_configure_base_bdev(struct raid_base_bdev_info *base_info)
|
2018-05-08 11:30:29 +00:00
|
|
|
{
|
2022-11-25 12:07:59 +00:00
|
|
|
struct raid_bdev *raid_bdev = base_info->raid_bdev;
|
2022-08-26 11:02:27 +00:00
|
|
|
struct spdk_bdev_desc *desc;
|
|
|
|
struct spdk_bdev *bdev;
|
|
|
|
int rc;
|
2018-05-08 11:30:29 +00:00
|
|
|
|
2022-09-13 10:34:53 +00:00
|
|
|
assert(spdk_get_thread() == spdk_thread_get_app_thread());
|
2022-08-26 11:02:27 +00:00
|
|
|
assert(base_info->name != NULL);
|
|
|
|
assert(base_info->bdev == NULL);
|
2018-05-08 11:30:29 +00:00
|
|
|
|
2022-08-26 11:02:27 +00:00
|
|
|
rc = spdk_bdev_open_ext(base_info->name, true, raid_bdev_event_base_bdev, NULL, &desc);
|
2018-08-03 06:54:18 +00:00
|
|
|
if (rc != 0) {
|
2020-10-11 10:42:46 +00:00
|
|
|
if (rc != -ENODEV) {
|
2022-08-26 11:02:27 +00:00
|
|
|
SPDK_ERRLOG("Unable to create desc on bdev '%s'\n", base_info->name);
|
2020-10-11 10:42:46 +00:00
|
|
|
}
|
2018-08-28 00:56:22 +00:00
|
|
|
return rc;
|
2018-08-03 06:54:18 +00:00
|
|
|
}
|
2018-05-08 11:30:29 +00:00
|
|
|
|
2022-08-26 11:02:27 +00:00
|
|
|
bdev = spdk_bdev_desc_get_bdev(desc);
|
|
|
|
|
|
|
|
rc = spdk_bdev_module_claim_bdev(bdev, NULL, &g_raid_if);
|
|
|
|
if (rc != 0) {
|
|
|
|
SPDK_ERRLOG("Unable to claim this bdev as it is already claimed\n");
|
|
|
|
spdk_bdev_close(desc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
SPDK_DEBUGLOG(bdev_raid, "bdev %s is claimed\n", bdev->name);
|
|
|
|
|
|
|
|
assert(raid_bdev->state != RAID_BDEV_STATE_ONLINE);
|
|
|
|
|
|
|
|
base_info->bdev = bdev;
|
|
|
|
base_info->desc = desc;
|
2023-01-11 07:41:24 +00:00
|
|
|
base_info->blockcnt = bdev->blockcnt;
|
2022-11-17 10:40:49 +00:00
|
|
|
base_info->data_offset = 0;
|
|
|
|
base_info->data_size = base_info->bdev->blockcnt;
|
2022-08-26 11:02:27 +00:00
|
|
|
raid_bdev->num_base_bdevs_discovered++;
|
2018-05-08 11:30:29 +00:00
|
|
|
assert(raid_bdev->num_base_bdevs_discovered <= raid_bdev->num_base_bdevs);
|
|
|
|
|
2022-11-17 10:40:49 +00:00
|
|
|
if (raid_bdev->superblock_enabled) {
|
|
|
|
assert((RAID_BDEV_MIN_DATA_OFFSET_SIZE % bdev->blocklen) == 0);
|
|
|
|
base_info->data_offset = RAID_BDEV_MIN_DATA_OFFSET_SIZE / bdev->blocklen;
|
|
|
|
|
|
|
|
if (bdev->optimal_io_boundary) {
|
|
|
|
base_info->data_offset = spdk_divide_round_up(base_info->data_offset,
|
|
|
|
bdev->optimal_io_boundary) * bdev->optimal_io_boundary;
|
|
|
|
}
|
|
|
|
|
|
|
|
base_info->data_size = base_info->bdev->blockcnt - base_info->data_offset;
|
|
|
|
|
|
|
|
if (base_info->data_offset > bdev->blockcnt) {
|
|
|
|
SPDK_ERRLOG("Data offset %lu exceeds base bdev capacity %lu on bdev '%s'\n",
|
|
|
|
base_info->data_offset, bdev->blockcnt, base_info->name);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-08 11:30:29 +00:00
|
|
|
if (raid_bdev->num_base_bdevs_discovered == raid_bdev->num_base_bdevs) {
|
2018-08-03 08:17:46 +00:00
|
|
|
rc = raid_bdev_configure(raid_bdev);
|
|
|
|
if (rc != 0) {
|
|
|
|
SPDK_ERRLOG("Failed to configure raid bdev\n");
|
2018-08-28 00:56:22 +00:00
|
|
|
return rc;
|
2018-05-08 11:30:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-08-31 06:27:29 +00:00
|
|
|
/*
|
|
|
|
* brief:
|
2022-08-26 11:02:27 +00:00
|
|
|
* raid_bdev_add_base_device function is the actual function which either adds
|
|
|
|
* the nvme base device to existing raid bdev or create a new raid bdev. It also claims
|
|
|
|
* the base device and keep the open descriptor.
|
2018-08-31 06:27:29 +00:00
|
|
|
* params:
|
2022-08-26 11:02:27 +00:00
|
|
|
* raid_bdev - pointer to raid bdev
|
|
|
|
* name - name of the base bdev
|
|
|
|
* slot - position to add base bdev
|
2018-08-31 06:27:29 +00:00
|
|
|
* returns:
|
2022-08-26 11:02:27 +00:00
|
|
|
* 0 - success
|
|
|
|
* non zero - failure
|
2018-08-31 06:27:29 +00:00
|
|
|
*/
|
|
|
|
int
|
2022-08-26 11:02:27 +00:00
|
|
|
raid_bdev_add_base_device(struct raid_bdev *raid_bdev, const char *name, uint8_t slot)
|
2018-08-31 06:27:29 +00:00
|
|
|
{
|
2022-08-26 11:02:27 +00:00
|
|
|
struct raid_base_bdev_info *base_info;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (slot >= raid_bdev->num_base_bdevs) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
base_info = &raid_bdev->base_bdev_info[slot];
|
|
|
|
|
|
|
|
if (base_info->name != NULL) {
|
|
|
|
SPDK_ERRLOG("Slot %u on raid bdev '%s' already assigned to bdev '%s'\n",
|
|
|
|
slot, raid_bdev->bdev.name, base_info->name);
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
base_info->name = strdup(name);
|
|
|
|
if (base_info->name == NULL) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2022-11-25 12:07:59 +00:00
|
|
|
rc = raid_bdev_configure_base_bdev(base_info);
|
2022-08-26 11:02:27 +00:00
|
|
|
if (rc != 0) {
|
|
|
|
if (rc != -ENODEV) {
|
|
|
|
SPDK_ERRLOG("Failed to allocate resource for bdev '%s'\n", name);
|
2018-08-31 06:27:29 +00:00
|
|
|
}
|
2022-08-26 11:02:27 +00:00
|
|
|
return rc;
|
2018-08-31 06:27:29 +00:00
|
|
|
}
|
|
|
|
|
2022-08-26 11:02:27 +00:00
|
|
|
return 0;
|
2018-08-31 06:27:29 +00:00
|
|
|
}
|
|
|
|
|
2018-05-08 11:30:29 +00:00
|
|
|
/*
|
|
|
|
* brief:
|
|
|
|
* raid_bdev_examine function is the examine function call by the below layers
|
|
|
|
* like bdev_nvme layer. This function will check if this base bdev can be
|
|
|
|
* claimed by this raid bdev or not.
|
|
|
|
* params:
|
|
|
|
* bdev - pointer to base bdev
|
|
|
|
* returns:
|
|
|
|
* none
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
raid_bdev_examine(struct spdk_bdev *bdev)
|
|
|
|
{
|
2022-08-26 11:02:27 +00:00
|
|
|
struct raid_bdev *raid_bdev;
|
|
|
|
struct raid_base_bdev_info *base_info;
|
2018-08-28 01:13:38 +00:00
|
|
|
|
2022-08-26 11:02:27 +00:00
|
|
|
TAILQ_FOREACH(raid_bdev, &g_raid_bdev_list, global_link) {
|
|
|
|
RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
|
2022-09-28 10:06:31 +00:00
|
|
|
if (base_info->bdev == NULL && base_info->name != NULL &&
|
|
|
|
strcmp(bdev->name, base_info->name) == 0) {
|
2022-11-25 12:07:59 +00:00
|
|
|
raid_bdev_configure_base_bdev(base_info);
|
2022-08-26 11:02:27 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2018-08-28 01:13:38 +00:00
|
|
|
}
|
|
|
|
|
2018-05-08 11:30:29 +00:00
|
|
|
spdk_bdev_module_examine_done(&g_raid_if);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Log component for bdev raid bdev module */
|
2020-09-04 11:27:29 +00:00
|
|
|
SPDK_LOG_REGISTER_COMPONENT(bdev_raid)
|