blobstore: implement spdk_bs_grow and bdev_lvol_grow_lvstore RPC

The bdev_lvol_grow_lvstore will grow the lvstore size if the undering
bdev size is increased. It invokes spdk_bs_grow internally. The
spdk_bs_grow will extend the used_clusters bitmap. If there is no
enough space resereved for the used_clusters bitmap, the api will
fail. The reserved space was calculated according to the num_md_pages
at blobstore creating time.

Signed-off-by: Peng Yu <yupeng0921@gmail.com>
Change-Id: If6e8c0794dbe4eaa7042acf5031de58138ce7bca
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/9730
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
yupeng 2021-10-05 03:28:38 +00:00 committed by Ben Walker
parent 88833020eb
commit 1f0b8df7b0
15 changed files with 864 additions and 3 deletions

View File

@ -8024,6 +8024,44 @@ Example response:
}
~~~
### bdev_lvol_grow_lvstore {#rpc_bdev_lvol_grow_lvstore}
Grow the logical volume store to fill the underlying bdev
#### Parameters
Name | Optional | Type | Description
----------------------- | -------- | ----------- | -----------
uuid | Optional | string | UUID of the logical volume store to grow
lvs_name | Optional | string | Name of the logical volume store to grow
Either uuid or lvs_name must be specified, but not both.
#### Example
Example request:
~~~json
{
"jsonrpc": "2.0",
"method": "bdev_lvol_grow_lvstore",
"id": 1
"params": {
"uuid": "a9959197-b5e2-4f2d-8095-251ffb6985a5"
}
}
~~~
Example response:
~~~json
{
"jsonrpc": "2.0",
"id": 1,
"result": true
}
~~~
### bdev_lvol_create {#rpc_bdev_lvol_create}
Create a logical volume on a logical volume store.

View File

@ -257,6 +257,17 @@ void spdk_bs_opts_init(struct spdk_bs_opts *opts, size_t opts_size);
void spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts,
spdk_bs_op_with_handle_complete cb_fn, void *cb_arg);
/**
* Grow a blobstore to fill the underlying device
*
* \param dev Blobstore block device.
* \param opts The structure which contains the option values for the blobstore.
* \param cb_fn Called when the loading is complete.
* \param cb_arg Argument passed to function cb_fn.
*/
void spdk_bs_grow(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts,
spdk_bs_op_with_handle_complete cb_fn, void *cb_arg);
/**
* Initialize a blobstore on the given device.
*

View File

@ -239,6 +239,16 @@ struct spdk_io_channel *spdk_lvol_get_io_channel(struct spdk_lvol *lvol);
void spdk_lvs_load(struct spdk_bs_dev *bs_dev, spdk_lvs_op_with_handle_complete cb_fn,
void *cb_arg);
/**
* Grow a lvstore to fill the underlying device
*
* \param bs_dev Pointer to the blobstore device.
* \param cb_fn Completion callback.
* \param cb_arg Completion callback custom arguments.
*/
void spdk_lvs_grow(struct spdk_bs_dev *bs_dev, spdk_lvs_op_with_handle_complete cb_fn,
void *cb_arg);
/**
* Open a lvol.
*

View File

@ -25,6 +25,15 @@ struct spdk_lvs_req {
int lvserrno;
};
struct spdk_lvs_grow_req {
spdk_lvs_op_complete cb_fn;
void *cb_arg;
struct spdk_lvol_store *lvol_store;
struct lvol_store_bdev *lvs_bdev;
int lvserrno;
int lvol_cnt;
};
struct spdk_lvol_req {
spdk_lvol_op_complete cb_fn;
void *cb_arg;

View File

@ -6,7 +6,7 @@
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../..)
include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
SO_VER := 7
SO_VER := 8
SO_MINOR := 0
C_SRCS = blobstore.c request.c zeroes.c blob_bs_dev.c

View File

@ -3828,8 +3828,15 @@ bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
/* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */
assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof(
struct spdk_blob_md_page) * 8));
/* The length of the mask must be exactly equal to the total number of clusters */
assert(ctx->mask->length == ctx->bs->total_clusters);
/*
* The length of the mask must be equal to or larger than the total number of clusters. It may be
* larger than the total nubmer of clusters due to a failure spdk_bs_grow.
*/
assert(ctx->mask->length >= ctx->bs->total_clusters);
if (ctx->mask->length > ctx->bs->total_clusters) {
SPDK_WARNLOG("Shrink the used_custers mask length to total_clusters");
ctx->mask->length = ctx->bs->total_clusters;
}
rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->mask->length);
if (rc < 0) {
@ -8043,4 +8050,255 @@ spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_
return 0;
}
static void
bs_load_grow_continue(struct spdk_bs_load_ctx *ctx)
{
int rc;
if (ctx->super->size == 0) {
ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen;
}
if (ctx->super->io_unit_size == 0) {
ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE;
}
/* Parse the super block */
ctx->bs->clean = 1;
ctx->bs->cluster_sz = ctx->super->cluster_size;
ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size;
ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE;
if (spdk_u32_is_pow2(ctx->bs->pages_per_cluster)) {
ctx->bs->pages_per_cluster_shift = spdk_u32log2(ctx->bs->pages_per_cluster);
}
ctx->bs->io_unit_size = ctx->super->io_unit_size;
rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters);
if (rc < 0) {
bs_load_ctx_fail(ctx, -ENOMEM);
return;
}
ctx->bs->md_start = ctx->super->md_start;
ctx->bs->md_len = ctx->super->md_len;
rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->bs->md_len);
if (rc < 0) {
bs_load_ctx_fail(ctx, -ENOMEM);
return;
}
ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up(
ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster);
ctx->bs->super_blob = ctx->super->super_blob;
memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype));
if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0) {
SPDK_ERRLOG("Can not grow an unclean blobstore, please load it normally to clean it.\n");
bs_load_ctx_fail(ctx, -EIO);
return;
} else {
bs_load_read_used_pages(ctx);
}
}
static void
bs_load_grow_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
{
struct spdk_bs_load_ctx *ctx = cb_arg;
if (bserrno != 0) {
bs_load_ctx_fail(ctx, bserrno);
return;
}
bs_load_grow_continue(ctx);
}
static void
bs_load_grow_used_clusters_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
{
struct spdk_bs_load_ctx *ctx = cb_arg;
if (bserrno != 0) {
bs_load_ctx_fail(ctx, bserrno);
return;
}
spdk_free(ctx->mask);
bs_sequence_write_dev(ctx->seq, ctx->super, bs_page_to_lba(ctx->bs, 0),
bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)),
bs_load_grow_super_write_cpl, ctx);
}
static void
bs_load_grow_used_clusters_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
{
struct spdk_bs_load_ctx *ctx = cb_arg;
uint64_t lba, lba_count;
uint64_t dev_size;
uint64_t total_clusters;
if (bserrno != 0) {
bs_load_ctx_fail(ctx, bserrno);
return;
}
/* The type must be correct */
assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
/* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */
assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof(
struct spdk_blob_md_page) * 8));
dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen;
total_clusters = dev_size / ctx->super->cluster_size;
ctx->mask->length = total_clusters;
lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start);
lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len);
bs_sequence_write_dev(ctx->seq, ctx->mask, lba, lba_count,
bs_load_grow_used_clusters_write_cpl, ctx);
}
static void
bs_load_try_to_grow(struct spdk_bs_load_ctx *ctx)
{
uint64_t dev_size, total_clusters, used_cluster_mask_len, max_used_cluster_mask;
uint64_t lba, lba_count, mask_size;
dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen;
total_clusters = dev_size / ctx->super->cluster_size;
used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
spdk_divide_round_up(total_clusters, 8),
SPDK_BS_PAGE_SIZE);
max_used_cluster_mask = ctx->super->used_blobid_mask_start - ctx->super->used_cluster_mask_start;
/* No necessary to grow or no space to grow */
if (ctx->super->size >= dev_size || used_cluster_mask_len > max_used_cluster_mask) {
SPDK_DEBUGLOG(blob, "No grow\n");
bs_load_grow_continue(ctx);
return;
}
SPDK_DEBUGLOG(blob, "Resize blobstore\n");
ctx->super->size = dev_size;
ctx->super->used_cluster_mask_len = used_cluster_mask_len;
ctx->super->crc = blob_md_page_calc_crc(ctx->super);
mask_size = used_cluster_mask_len * SPDK_BS_PAGE_SIZE;
ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY,
SPDK_MALLOC_DMA);
if (!ctx->mask) {
bs_load_ctx_fail(ctx, -ENOMEM);
return;
}
lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start);
lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len);
bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count,
bs_load_grow_used_clusters_read_cpl, ctx);
}
static void
bs_grow_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
{
struct spdk_bs_load_ctx *ctx = cb_arg;
uint32_t crc;
static const char zeros[SPDK_BLOBSTORE_TYPE_LENGTH];
if (ctx->super->version > SPDK_BS_VERSION ||
ctx->super->version < SPDK_BS_INITIAL_VERSION) {
bs_load_ctx_fail(ctx, -EILSEQ);
return;
}
if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG,
sizeof(ctx->super->signature)) != 0) {
bs_load_ctx_fail(ctx, -EILSEQ);
return;
}
crc = blob_md_page_calc_crc(ctx->super);
if (crc != ctx->super->crc) {
bs_load_ctx_fail(ctx, -EILSEQ);
return;
}
if (memcmp(&ctx->bs->bstype, &ctx->super->bstype, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) {
SPDK_DEBUGLOG(blob, "Bstype matched - loading blobstore\n");
} else if (memcmp(&ctx->bs->bstype, zeros, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) {
SPDK_DEBUGLOG(blob, "Bstype wildcard used - loading blobstore regardless bstype\n");
} else {
SPDK_DEBUGLOG(blob, "Unexpected bstype\n");
SPDK_LOGDUMP(blob, "Expected:", ctx->bs->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH);
SPDK_LOGDUMP(blob, "Found:", ctx->super->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH);
bs_load_ctx_fail(ctx, -ENXIO);
return;
}
if (ctx->super->size > ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen) {
SPDK_NOTICELOG("Size mismatch, dev size: %" PRIu64 ", blobstore size: %" PRIu64 "\n",
ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen, ctx->super->size);
bs_load_ctx_fail(ctx, -EILSEQ);
return;
}
bs_load_try_to_grow(ctx);
}
void
spdk_bs_grow(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
spdk_bs_op_with_handle_complete cb_fn, void *cb_arg)
{
struct spdk_blob_store *bs;
struct spdk_bs_cpl cpl;
struct spdk_bs_load_ctx *ctx;
struct spdk_bs_opts opts = {};
int err;
SPDK_DEBUGLOG(blob, "Loading blobstore from dev %p\n", dev);
if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) {
SPDK_DEBUGLOG(blob, "unsupported dev block length of %d\n", dev->blocklen);
dev->destroy(dev);
cb_fn(cb_arg, NULL, -EINVAL);
return;
}
spdk_bs_opts_init(&opts, sizeof(opts));
if (o) {
if (bs_opts_copy(o, &opts)) {
return;
}
}
if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) {
dev->destroy(dev);
cb_fn(cb_arg, NULL, -EINVAL);
return;
}
err = bs_alloc(dev, &opts, &bs, &ctx);
if (err) {
dev->destroy(dev);
cb_fn(cb_arg, NULL, err);
return;
}
cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE;
cpl.u.bs_handle.cb_fn = cb_fn;
cpl.u.bs_handle.cb_arg = cb_arg;
cpl.u.bs_handle.bs = bs;
ctx->seq = bs_sequence_start(bs->md_channel, &cpl);
if (!ctx->seq) {
spdk_free(ctx->super);
free(ctx);
bs_free(bs);
cb_fn(cb_arg, NULL, -ENOMEM);
return;
}
/* Read the super block */
bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0),
bs_byte_to_lba(bs, sizeof(*ctx->super)),
bs_grow_load_super_cpl, ctx);
}
SPDK_LOG_REGISTER_COMPONENT(blob)

View File

@ -15,6 +15,7 @@
spdk_bs_get_io_unit_size;
spdk_bs_free_cluster_count;
spdk_bs_total_data_cluster_count;
spdk_bs_grow;
spdk_blob_get_id;
spdk_blob_get_num_pages;
spdk_blob_get_num_io_units;

View File

@ -1488,3 +1488,34 @@ spdk_lvol_decouple_parent(struct spdk_lvol *lvol, spdk_lvol_op_complete cb_fn, v
spdk_bs_blob_decouple_parent(lvol->lvol_store->blobstore, req->channel, blob_id,
lvol_inflate_cb, req);
}
void
spdk_lvs_grow(struct spdk_bs_dev *bs_dev, spdk_lvs_op_with_handle_complete cb_fn, void *cb_arg)
{
struct spdk_lvs_with_handle_req *req;
struct spdk_bs_opts opts = {};
assert(cb_fn != NULL);
if (bs_dev == NULL) {
SPDK_ERRLOG("Blobstore device does not exist\n");
cb_fn(cb_arg, NULL, -ENODEV);
return;
}
req = calloc(1, sizeof(*req));
if (req == NULL) {
SPDK_ERRLOG("Cannot alloc memory for request structure\n");
cb_fn(cb_arg, NULL, -ENOMEM);
return;
}
req->cb_fn = cb_fn;
req->cb_arg = cb_arg;
req->bs_dev = bs_dev;
lvs_bs_opts_init(&opts);
snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "LVOLSTORE");
spdk_bs_grow(bs_dev, &opts, lvs_load_cb, req);
}

View File

@ -7,6 +7,7 @@
spdk_lvs_rename;
spdk_lvs_unload;
spdk_lvs_destroy;
spdk_lvs_grow;
spdk_lvol_create;
spdk_lvol_create_snapshot;
spdk_lvol_create_clone;

View File

@ -1461,4 +1461,213 @@ vbdev_lvol_get_from_bdev(struct spdk_bdev *bdev)
return (struct spdk_lvol *)bdev->ctxt;
}
static void
_vbdev_lvs_grow_finish(void *arg, int lvserrno)
{
struct spdk_lvs_grow_req *req = arg;
req->cb_fn(req->cb_arg, req->lvserrno);
free(req);
}
static void
_vbdev_lvs_grow_examine_finish(void *cb_arg, struct spdk_lvol *lvol, int lvolerrno)
{
struct spdk_lvs_grow_req *req = cb_arg;
struct spdk_lvol_store *lvs = req->lvol_store;
if (lvolerrno != 0) {
SPDK_ERRLOG("Error opening lvol %s\n", lvol->unique_id);
TAILQ_REMOVE(&lvs->lvols, lvol, link);
assert(lvs->lvol_count > 0);
lvs->lvol_count--;
free(lvol);
goto end;
}
if (_create_lvol_disk(lvol, false)) {
SPDK_ERRLOG("Cannot create bdev for lvol %s\n", lvol->unique_id);
assert(lvs->lvol_count > 0);
lvs->lvol_count--;
SPDK_INFOLOG(vbdev_lvol, "Opening lvol %s failed\n", lvol->unique_id);
goto end;
}
lvs->lvols_opened++;
SPDK_INFOLOG(vbdev_lvol, "Opening lvol %s succeeded\n", lvol->unique_id);
end:
if (lvs->lvols_opened >= lvs->lvol_count) {
SPDK_INFOLOG(vbdev_lvol, "Opening lvols finished\n");
_vbdev_lvs_grow_finish(req, 0);
}
}
static void
_vbdev_lvs_grow_examine_cb(void *arg, struct spdk_lvol_store *lvol_store, int lvserrno)
{
struct lvol_store_bdev *lvs_bdev;
struct spdk_lvs_with_handle_req *req = (struct spdk_lvs_with_handle_req *)arg;
struct spdk_lvol *lvol, *tmp;
struct spdk_lvs_grow_req *ori_req = req->cb_arg;
if (lvserrno == -EEXIST) {
SPDK_INFOLOG(vbdev_lvol,
"Name for lvolstore on device %s conflicts with name for already loaded lvs\n",
req->base_bdev->name);
ori_req->lvserrno = lvserrno;
_vbdev_lvs_grow_finish(ori_req, lvserrno);
goto end;
} else if (lvserrno != 0) {
SPDK_INFOLOG(vbdev_lvol, "Lvol store not found on %s\n", req->base_bdev->name);
/* On error blobstore destroys bs_dev itself */
ori_req->lvserrno = lvserrno;
_vbdev_lvs_grow_finish(ori_req, lvserrno);
goto end;
}
lvserrno = spdk_bs_bdev_claim(lvol_store->bs_dev, &g_lvol_if);
if (lvserrno != 0) {
SPDK_INFOLOG(vbdev_lvol, "Lvol store base bdev already claimed by another bdev\n");
ori_req->lvserrno = lvserrno;
spdk_lvs_unload(lvol_store, _vbdev_lvs_grow_finish, ori_req);
goto end;
}
lvs_bdev = calloc(1, sizeof(*lvs_bdev));
if (!lvs_bdev) {
SPDK_ERRLOG("Cannot alloc memory for lvs_bdev\n");
ori_req->lvserrno = -ENOMEM;
spdk_lvs_unload(lvol_store, _vbdev_lvs_grow_finish, ori_req);
goto end;
}
lvs_bdev->lvs = lvol_store;
lvs_bdev->bdev = req->base_bdev;
TAILQ_INSERT_TAIL(&g_spdk_lvol_pairs, lvs_bdev, lvol_stores);
SPDK_INFOLOG(vbdev_lvol, "Lvol store found on %s - begin parsing\n",
req->base_bdev->name);
lvol_store->lvols_opened = 0;
ori_req->lvol_store = lvol_store;
ori_req->lvserrno = 0;
if (TAILQ_EMPTY(&lvol_store->lvols)) {
SPDK_INFOLOG(vbdev_lvol, "Lvol store examination done\n");
_vbdev_lvs_grow_finish(ori_req, 0);
} else {
/* Open all lvols */
TAILQ_FOREACH_SAFE(lvol, &lvol_store->lvols, link, tmp) {
spdk_lvol_open(lvol, _vbdev_lvs_grow_examine_finish, ori_req);
}
}
end:
free(req);
}
static void
_vbdev_lvs_grow_examine(struct spdk_bdev *bdev, struct spdk_lvs_grow_req *ori_req)
{
struct spdk_bs_dev *bs_dev;
struct spdk_lvs_with_handle_req *req;
int rc;
req = calloc(1, sizeof(*req));
if (req == NULL) {
SPDK_ERRLOG("Cannot alloc memory for vbdev lvol store request pointer\n");
ori_req->lvserrno = -ENOMEM;
_vbdev_lvs_grow_finish(ori_req, -ENOMEM);
return;
}
rc = spdk_bdev_create_bs_dev_ext(bdev->name, vbdev_lvs_base_bdev_event_cb,
NULL, &bs_dev);
if (rc < 0) {
SPDK_INFOLOG(vbdev_lvol, "Cannot create bs dev on %s\n", bdev->name);
ori_req->lvserrno = rc;
_vbdev_lvs_grow_finish(ori_req, rc);
free(req);
return;
}
req->base_bdev = bdev;
req->cb_arg = ori_req;
spdk_lvs_grow(bs_dev, _vbdev_lvs_grow_examine_cb, req);
}
static void
_vbdev_lvs_grow_unload_cb(void *cb_arg, int lvserrno)
{
struct spdk_lvs_grow_req *req = cb_arg;
struct lvol_store_bdev *lvs_bdev;
struct spdk_bdev *bdev;
if (lvserrno != 0) {
req->cb_fn(req->cb_arg, lvserrno);
free(req);
return;
}
lvs_bdev = req->lvs_bdev;
bdev = lvs_bdev->bdev;
TAILQ_REMOVE(&g_spdk_lvol_pairs, lvs_bdev, lvol_stores);
_vbdev_lvs_grow_examine(bdev, req);
free(lvs_bdev);
}
static void
_vbdev_lvs_grow_remove_bdev_unregistered_cb(void *cb_arg, int bdeverrno)
{
struct spdk_lvs_grow_req *req = cb_arg;
struct spdk_lvol_store *lvs = req->lvol_store;
if (bdeverrno != 0) {
SPDK_DEBUGLOG(vbdev_lvol, "Lvol unregistered with errno %d\n", bdeverrno);
}
req->lvol_cnt--;
if (req->lvol_cnt == 0) {
/* Lvol store can be unloaded once all lvols are closed. */
if (_vbdev_lvs_are_lvols_closed(lvs)) {
spdk_lvs_unload(lvs, _vbdev_lvs_grow_unload_cb, req);
} else {
req->cb_fn(req->cb_arg, -EINVAL);
free(req);
}
}
}
void
vbdev_lvs_grow(struct spdk_lvol_store *lvs,
spdk_lvs_op_complete cb_fn, void *cb_arg)
{
struct spdk_lvs_grow_req *req;
struct spdk_lvol *lvol, *tmp;
req = calloc(1, sizeof(*req));
if (!req) {
SPDK_ERRLOG("Cannot alloc memory for vbdev lvol store request pointer\n");
cb_fn(cb_arg, -ENOMEM);
return;
}
req->cb_fn = cb_fn;
req->cb_arg = cb_arg;
req->lvol_store = lvs;
req->lvs_bdev = vbdev_get_lvs_bdev_by_lvs(lvs);
if (_vbdev_lvs_are_lvols_closed(lvs)) {
spdk_lvs_unload(lvs, _vbdev_lvs_grow_unload_cb, req);
} else {
lvs->destruct = false;
TAILQ_FOREACH_SAFE(lvol, &lvs->lvols, link, tmp) {
req->lvol_cnt++;
spdk_bdev_unregister(lvol->bdev, _vbdev_lvs_grow_remove_bdev_unregistered_cb, req);
}
assert(req->lvol_cnt > 0);
}
}
SPDK_LOG_REGISTER_COMPONENT(vbdev_lvol)

View File

@ -106,4 +106,14 @@ struct lvol_store_bdev *vbdev_get_lvs_bdev_by_lvs(struct spdk_lvol_store *lvs);
struct spdk_lvol *vbdev_lvol_get_from_bdev(struct spdk_bdev *bdev);
/**
* \brief Grow given lvolstore.
*
* \param lvs Pointer to lvolstore
* \param cb_fn Completion callback
* \param cb_arg Completion callback custom arguments
*/
void vbdev_lvs_grow(struct spdk_lvol_store *lvs,
spdk_lvs_op_complete cb_fn, void *cb_arg);
#endif /* SPDK_VBDEV_LVOL_H */

View File

@ -1028,3 +1028,67 @@ cleanup:
}
SPDK_RPC_REGISTER("bdev_lvol_get_lvstores", rpc_bdev_lvol_get_lvstores, SPDK_RPC_RUNTIME)
SPDK_RPC_REGISTER_ALIAS_DEPRECATED(bdev_lvol_get_lvstores, get_lvol_stores)
struct rpc_bdev_lvol_grow_lvstore {
char *uuid;
char *lvs_name;
};
static void
free_rpc_bdev_lvol_grow_lvstore(struct rpc_bdev_lvol_grow_lvstore *req)
{
free(req->uuid);
free(req->lvs_name);
}
static const struct spdk_json_object_decoder rpc_bdev_lvol_grow_lvstore_decoders[] = {
{"uuid", offsetof(struct rpc_bdev_lvol_grow_lvstore, uuid), spdk_json_decode_string, true},
{"lvs_name", offsetof(struct rpc_bdev_lvol_grow_lvstore, lvs_name), spdk_json_decode_string, true},
};
static void
rpc_bdev_lvol_grow_lvstore_cb(void *cb_arg, int lvserrno)
{
struct spdk_jsonrpc_request *request = cb_arg;
if (lvserrno != 0) {
goto invalid;
}
spdk_jsonrpc_send_bool_response(request, true);
return;
invalid:
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS,
spdk_strerror(-lvserrno));
}
static void
rpc_bdev_lvol_grow_lvstore(struct spdk_jsonrpc_request *request,
const struct spdk_json_val *params)
{
struct rpc_bdev_lvol_grow_lvstore req = {};
struct spdk_lvol_store *lvs = NULL;
int rc;
if (spdk_json_decode_object(params, rpc_bdev_lvol_grow_lvstore_decoders,
SPDK_COUNTOF(rpc_bdev_lvol_grow_lvstore_decoders),
&req)) {
SPDK_INFOLOG(lvol_rpc, "spdk_json_decode_object failed\n");
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
"spdk_json_decode_object failed");
goto cleanup;
}
rc = vbdev_get_lvol_store_by_uuid_xor_name(req.uuid, req.lvs_name, &lvs);
if (rc != 0) {
spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc));
goto cleanup;
}
vbdev_lvs_grow(lvs, rpc_bdev_lvol_grow_lvstore_cb, request);
cleanup:
free_rpc_bdev_lvol_grow_lvstore(&req);
}
SPDK_RPC_REGISTER("bdev_lvol_grow_lvstore", rpc_bdev_lvol_grow_lvstore, SPDK_RPC_RUNTIME)

View File

@ -113,6 +113,12 @@ spdk_bdev_destruct_done(struct spdk_bdev *bdev, int bdeverrno)
bdev->internal.unregister_cb(bdev->internal.unregister_ctx, bdeverrno);
}
void
spdk_lvs_grow(struct spdk_bs_dev *bs_dev, spdk_lvs_op_with_handle_complete cb_fn, void *cb_arg)
{
cb_fn(cb_arg, NULL, -EINVAL);
}
void
spdk_lvs_rename(struct spdk_lvol_store *lvs, const char *new_name,
spdk_lvs_op_complete cb_fn, void *cb_arg)

View File

@ -2543,6 +2543,132 @@ bs_load_custom_cluster_size(void)
g_bs = NULL;
}
static void
bs_load_after_failed_grow(void)
{
struct spdk_blob_store *bs;
struct spdk_bs_dev *dev;
struct spdk_bs_super_block *super_block;
struct spdk_bs_opts opts;
struct spdk_bs_md_mask *mask;
struct spdk_blob_opts blob_opts;
struct spdk_blob *blob, *snapshot;
spdk_blob_id blobid, snapshotid;
uint64_t total_data_clusters;
dev = init_dev();
spdk_bs_opts_init(&opts, sizeof(opts));
snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
/*
* The bdev_size is 64M, cluster_sz is 1M, so there are 64 clusters. The
* blobstore will create 64 md pages by defualt. We set num_md_pages to 128,
* thus the blobstore could grow to the double size.
*/
opts.num_md_pages = 128;
/* Initialize a new blob store */
spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
poll_threads();
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_bs != NULL);
bs = g_bs;
/* Create blob */
ut_spdk_blob_opts_init(&blob_opts);
blob_opts.num_clusters = 10;
blob = ut_blob_create_and_open(bs, &blob_opts);
blobid = spdk_blob_get_id(blob);
/* Create snapshot */
spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
poll_threads();
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
snapshotid = g_blobid;
spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
poll_threads();
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_blob != NULL);
snapshot = g_blob;
spdk_blob_close(snapshot, blob_op_complete, NULL);
poll_threads();
CU_ASSERT(g_bserrno == 0);
spdk_blob_close(blob, blob_op_complete, NULL);
poll_threads();
CU_ASSERT(g_bserrno == 0);
total_data_clusters = bs->total_data_clusters;
CU_ASSERT(bs->num_free_clusters + 10 == total_data_clusters);
/* Unload the blob store */
spdk_bs_unload(bs, bs_op_complete, NULL);
poll_threads();
CU_ASSERT(g_bserrno == 0);
g_bs = NULL;
g_blob = NULL;
g_blobid = 0;
super_block = (struct spdk_bs_super_block *)g_dev_buffer;
CU_ASSERT(super_block->clean == 1);
mask = (struct spdk_bs_md_mask *)(g_dev_buffer + super_block->used_cluster_mask_start * 4096);
CU_ASSERT(mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
CU_ASSERT(mask->length == super_block->size / super_block->cluster_size);
/*
* We change the mask->length to emulate this scenario: A spdk_bs_grow failed after it changed
* the used_cluster bitmap length, but it didn't change the super block yet.
*/
mask->length *= 2;
/* Load an existing blob store */
dev = init_dev();
dev->blockcnt *= 2;
spdk_bs_opts_init(&opts, sizeof(opts));
opts.clear_method = BS_CLEAR_WITH_NONE;
snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
poll_threads();
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_bs != NULL);
bs = g_bs;
/* Check the capacity is the same as before */
CU_ASSERT(bs->total_data_clusters == total_data_clusters);
CU_ASSERT(bs->num_free_clusters + 10 == total_data_clusters);
/* Check the blob and the snapshot are still available */
spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
poll_threads();
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_blob != NULL);
blob = g_blob;
spdk_blob_close(blob, blob_op_complete, NULL);
poll_threads();
CU_ASSERT(g_bserrno == 0);
spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
poll_threads();
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_blob != NULL);
snapshot = g_blob;
spdk_blob_close(snapshot, blob_op_complete, NULL);
poll_threads();
CU_ASSERT(g_bserrno == 0);
spdk_bs_unload(bs, bs_op_complete, NULL);
poll_threads();
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(super_block->clean == 1);
g_bs = NULL;
}
static void
bs_type(void)
{
@ -2743,6 +2869,84 @@ bs_test_recover_cluster_count(void)
g_bs = NULL;
}
static void
bs_test_grow(void)
{
struct spdk_blob_store *bs;
struct spdk_bs_dev *dev;
struct spdk_bs_super_block super_block;
struct spdk_bs_opts opts;
struct spdk_bs_md_mask mask;
uint64_t bdev_size;
dev = init_dev();
bdev_size = dev->blockcnt * dev->blocklen;
spdk_bs_opts_init(&opts, sizeof(opts));
spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
poll_threads();
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_bs != NULL);
bs = g_bs;
spdk_bs_unload(bs, bs_op_complete, NULL);
poll_threads();
CU_ASSERT(g_bserrno == 0);
g_bs = NULL;
/*
* To make sure all the metadata are updated to the disk,
* we check the g_dev_buffer after spdk_bs_unload.
*/
memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
CU_ASSERT(super_block.size == bdev_size);
/*
* Make sure the used_cluster mask is correct.
*/
memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * 4096,
sizeof(struct spdk_bs_md_mask));
CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
CU_ASSERT(mask.length == bdev_size / (1 * 1024 * 1024));
/*
* The default dev size is 64M, here we set the dev size to 128M,
* then the blobstore will adjust the metadata according to the new size.
* The dev size is larger than the g_dev_buffer size, so we set clear_method
* to NONE, or the blobstore will try to clear the dev and will write beyond
* the end of g_dev_buffer.
*/
dev = init_dev();
dev->blockcnt = (128L * 1024L * 1024L) / dev->blocklen;
bdev_size = dev->blockcnt * dev->blocklen;
spdk_bs_opts_init(&opts, sizeof(opts));
opts.clear_method = BS_CLEAR_WITH_NONE;
spdk_bs_grow(dev, &opts, bs_op_with_handle_complete, NULL);
poll_threads();
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_bs != NULL);
bs = g_bs;
/*
* After spdk_bs_grow, all metadata are updated to the disk.
* So we can check g_dev_buffer now.
*/
memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
CU_ASSERT(super_block.size == bdev_size);
/*
* Make sure the used_cluster mask has been updated according to the bdev size
*/
memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * 4096,
sizeof(struct spdk_bs_md_mask));
CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
CU_ASSERT(mask.length == bdev_size / (1 * 1024 * 1024));
spdk_bs_unload(bs, bs_op_complete, NULL);
poll_threads();
CU_ASSERT(g_bserrno == 0);
g_bs = NULL;
}
/*
* Create a blobstore and then unload it.
*/
@ -7238,6 +7442,7 @@ main(int argc, char **argv)
CU_ADD_TEST(suite, bs_load);
CU_ADD_TEST(suite_bs, bs_load_pending_removal);
CU_ADD_TEST(suite, bs_load_custom_cluster_size);
CU_ADD_TEST(suite, bs_load_after_failed_grow);
CU_ADD_TEST(suite_bs, bs_unload);
CU_ADD_TEST(suite, bs_cluster_sz);
CU_ADD_TEST(suite_bs, bs_usable_clusters);
@ -7246,6 +7451,7 @@ main(int argc, char **argv)
CU_ADD_TEST(suite, bs_type);
CU_ADD_TEST(suite, bs_super_block);
CU_ADD_TEST(suite, bs_test_recover_cluster_count);
CU_ADD_TEST(suite, bs_test_grow);
CU_ADD_TEST(suite, blob_serialize_test);
CU_ADD_TEST(suite_bs, blob_crc);
CU_ADD_TEST(suite, super_block_crc);

View File

@ -156,6 +156,13 @@ spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts,
cb_fn(cb_arg, bs, ut_dev->load_status);
}
void
spdk_bs_grow(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
spdk_bs_op_with_handle_complete cb_fn, void *cb_arg)
{
cb_fn(cb_arg, NULL, -EINVAL);
}
struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs)
{
if (g_io_channel == NULL) {