lvol/blob: add shallow copy over a given device

A shallow copy will copy over the destination device only the
cluster allocated to the blob/lvol discarding those belonging
to the blob/lvol parent snapshot. blob/lvol must be read only.

Signed-off-by: Damiano Cipriani <damiano.cipriani@suse.com>
This commit is contained in:
Damiano Cipriani 2023-02-15 17:57:44 +01:00
parent 980f535d38
commit c34d4d490d
No known key found for this signature in database
GPG Key ID: 95DD00E4710A7657
20 changed files with 1197 additions and 1 deletions

View File

@ -495,6 +495,7 @@ Example response:
"bdev_lvol_delete_lvstore",
"bdev_lvol_rename_lvstore",
"bdev_lvol_create_lvstore",
"bdev_lvol_shallow_copy",
"bdev_daos_delete",
"bdev_daos_create",
"bdev_daos_resize"
@ -9998,6 +9999,90 @@ Example response:
]
~~~
### bdev_lvol_shallow_copy {#rpc_bdev_lvol_shallow_copy}
Make a shallow copy of lvol over a given bdev. Only cluster allocated to the lvol will be written on the bdev.
Must have:
* lvol read only
* lvol size smaller than bdev size
* lvstore block size a multiple of bdev size
#### Parameters
Name | Optional | Type | Description
----------------------- | -------- | ----------- | -----------
src_lvol_name | Required | string | UUID or alias of lvol to create a copy from
dst_bdev_name | Required | string | Name of the bdev that acts as destination for the copy
#### Example
Example request:
~~~json
{
"jsonrpc": "2.0",
"method": "bdev_lvol_shallow_copy",
"id": 1,
"params": {
"src_lvol_name": "8a47421a-20cf-444f-845c-d97ad0b0bd8e",
"dst_bdev_name": "Nvme1n1"
}
}
~~~
Example response:
~~~json
{
"jsonrpc": "2.0",
"id": 1,
"result": true
}
~~~
### bdev_lvol_shallow_copy_status {#rpc_bdev_lvol_shallow_copy_status}
Get shallow copy status
#### Result
This RPC reports if a shallow copy is still in progress and operation's advance state in the format
_number_of_copied_clusters/total_clusters_to_copy_
#### Parameters
Name | Optional | Type | Description
----------------------- | -------- | ----------- | -----------
src_lvol_name | Required | string | UUID or alias of source lvol
#### Example
Example request:
~~~json
{
"jsonrpc": "2.0",
"method": "bdev_lvol_shallow_copy_status",
"id": 1,
"params": {
"src_lvol_name": "8a47421a-20cf-444f-845c-d97ad0b0bd8e"
}
}
~~~
Example response:
~~~json
{
"jsonrpc": "2.0",
"id": 1,
"result": {
"in_progress": true,
"status": "2/4"
}
}
~~~
## RAID
### bdev_raid_get_bdevs {#rpc_bdev_raid_get_bdevs}

View File

@ -515,6 +515,30 @@ uint64_t spdk_blob_get_next_allocated_io_unit(struct spdk_blob *blob, uint64_t o
*/
uint64_t spdk_blob_get_next_unallocated_io_unit(struct spdk_blob *blob, uint64_t offset);
/**
* Get the number of copied clusters of a shallow copy operation
* If a shallow copy of the blob is in progress, this functions returns the number of already
* copied clusters.
*
* \param blob Blob struct to query.
*
* \return cluster index or UINT64_MAX if no shallow copy is in progress
*/
uint64_t spdk_blob_get_shallow_copy_copied_clusters(struct spdk_blob *blob);
/**
* Get the total number of clusters to be copied in a shallow copy operation
* If a shallow copy of the blob is in progress, this functions returns the total number
* of cluster involved in the operation.
*
* \param blob Blob struct to query.
*
* \return total number, 0 if no shallow copy is in progress
*/
uint64_t spdk_blob_get_shallow_copy_total_clusters(struct spdk_blob *blob);
struct spdk_blob_xattr_opts {
/* Number of attributes */
size_t count;
@ -761,6 +785,26 @@ void spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *ch
void spdk_bs_blob_decouple_parent(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg);
/**
* Perform a shallow copy over a device
*
* This call make a shallow copy of a blob over an external blobstore block device.
* Only cluster allocated to the blob will be written on the device.
* Blob size must be smaller than device size.
* Blobstore block size must be a multiple of device block size.
* \param bs Blobstore
* \param channel IO channel used to copy the blob.
* \param blobid The id of the blob.
* \param ext_dev The device to copy on
* \param cb_fn Called when the operation is complete.
* \param cb_arg Argument passed to function cb_fn.
*/
void spdk_bs_blob_shallow_copy(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
spdk_blob_id blobid, struct spdk_bs_dev *ext_dev,
spdk_blob_op_complete cb_fn, void *cb_arg);
struct spdk_blob_open_opts {
enum blob_clear_method clear_method;

View File

@ -381,6 +381,20 @@ void spdk_lvol_decouple_parent(struct spdk_lvol *lvol, spdk_lvol_op_complete cb_
*/
bool spdk_lvol_is_degraded(const struct spdk_lvol *lvol);
/**
* Make a shallow copy of lvol on given bs_dev.
*
* lvol must be read only and lvol size must be smaller than bs_dev size.
*
* \param lvol Handle to lvol
* \param ext_dev The bs_dev to copy on. This is created on the given bdev by using
* spdk_bdev_create_bs_dev_ext() beforehand
* \param cb_fn Completion callback
* \param cb_arg Completion callback custom arguments
*/
void spdk_lvol_shallow_copy(struct spdk_lvol *lvol, struct spdk_bs_dev *ext_dev,
spdk_lvol_op_complete cb_fn, void *cb_arg);
#ifdef __cplusplus
}
#endif

View File

@ -46,6 +46,13 @@ struct spdk_lvol_req {
char name[SPDK_LVOL_NAME_MAX];
};
struct spdk_lvol_copy_req {
spdk_lvol_op_complete cb_fn;
void *cb_arg;
struct spdk_lvol *lvol;
struct spdk_bs_dev *ext_dev;
};
struct spdk_lvs_with_handle_req {
spdk_lvs_op_with_handle_complete cb_fn;
void *cb_arg;

View File

@ -40,6 +40,8 @@ static int blob_remove_xattr(struct spdk_blob *blob, const char *name, bool inte
static void blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num,
struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg);
static void bs_shallow_copy_cluster_find_next(void *cb_arg, int bserrno);
/*
* External snapshots require a channel per thread per esnap bdev. The tree
* is populated lazily as blob IOs are handled by the back_bs_dev. When this
@ -302,6 +304,8 @@ blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id)
blob->parent_id = SPDK_BLOBID_INVALID;
blob->state = SPDK_BLOB_STATE_DIRTY;
blob->u.shallow_copy.copied_clusters_number = 0;
blob->u.shallow_copy.num_clusters_to_copy = 0;
blob->extent_rle_found = false;
blob->extent_table_found = false;
blob->active.num_pages = 1;
@ -5879,6 +5883,26 @@ spdk_blob_get_next_unallocated_io_unit(struct spdk_blob *blob, uint64_t offset)
return blob_find_io_unit(blob, offset, false);
}
uint64_t
spdk_blob_get_shallow_copy_copied_clusters(struct spdk_blob *blob)
{
assert(blob != NULL);
if (blob->u.shallow_copy.num_clusters_to_copy > 0) {
return blob->u.shallow_copy.copied_clusters_number;
} else {
return UINT64_MAX;
}
}
uint64_t
spdk_blob_get_shallow_copy_total_clusters(struct spdk_blob *blob)
{
assert(blob != NULL);
return blob->u.shallow_copy.num_clusters_to_copy;
}
/* START spdk_bs_create_blob */
static void
@ -6919,6 +6943,232 @@ spdk_bs_blob_decouple_parent(struct spdk_blob_store *bs, struct spdk_io_channel
}
/* END spdk_bs_inflate_blob */
/* START spdk_bs_blob_shallow_copy */
struct shallow_copy_ctx {
struct spdk_bs_cpl cpl;
int bserrno;
/* Blob source for copy */
struct spdk_blob *blob;
struct spdk_io_channel *blob_channel;
/* Destination device for copy */
struct spdk_bs_dev *ext_dev;
struct spdk_io_channel *ext_channel;
/* Current cluster for copy operation */
uint64_t cluster;
/* Buffer for blob reading */
uint8_t *read_buff;
/* Struct for external device writing */
struct spdk_bs_dev_cb_args ext_args;
};
static void
bs_shallow_copy_cleanup_finish(void *cb_arg, int bserrno)
{
struct shallow_copy_ctx *ctx = cb_arg;
struct spdk_bs_cpl *cpl = &ctx->cpl;
if (bserrno != 0) {
if (ctx->bserrno == 0) {
SPDK_ERRLOG("Shallow copy cleanup error %d\n", bserrno);
ctx->bserrno = bserrno;
}
}
ctx->ext_dev->destroy_channel(ctx->ext_dev, ctx->ext_channel);
spdk_free(ctx->read_buff);
cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno);
free(ctx);
}
static void
bs_shallow_copy_bdev_write_cpl(struct spdk_io_channel *channel, void *cb_arg, int bserrno)
{
struct shallow_copy_ctx *ctx = (struct shallow_copy_ctx *)cb_arg;
struct spdk_blob *_blob = ctx->blob;
if (bserrno != 0) {
SPDK_ERRLOG("Shallow copy ext dev write error %d\n", bserrno);
ctx->bserrno = bserrno;
_blob->locked_operation_in_progress = false;
_blob->u.shallow_copy.copied_clusters_number = 0;
_blob->u.shallow_copy.num_clusters_to_copy = 0;
spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx);
return;
}
ctx->cluster++;
_blob->u.shallow_copy.copied_clusters_number++;
bs_shallow_copy_cluster_find_next(ctx, 0);
}
static void
bs_shallow_copy_blob_read_cpl(void *cb_arg, int bserrno)
{
struct shallow_copy_ctx *ctx = (struct shallow_copy_ctx *)cb_arg;
struct spdk_bs_dev *ext_dev = ctx->ext_dev;
struct spdk_blob *_blob = ctx->blob;
if (bserrno != 0) {
SPDK_ERRLOG("Shallow copy blob read error %d\n", bserrno);
ctx->bserrno = bserrno;
_blob->locked_operation_in_progress = false;
_blob->u.shallow_copy.copied_clusters_number = 0;
_blob->u.shallow_copy.num_clusters_to_copy = 0;
spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx);
return;
}
ctx->ext_args.channel = ctx->ext_channel;
ctx->ext_args.cb_fn = bs_shallow_copy_bdev_write_cpl;
ctx->ext_args.cb_arg = ctx;
ext_dev->write(ext_dev, ctx->ext_channel, ctx->read_buff,
bs_cluster_to_lba(_blob->bs, ctx->cluster),
bs_dev_byte_to_lba(_blob->bs->dev, _blob->bs->cluster_sz),
&ctx->ext_args);
}
static void
bs_shallow_copy_cluster_find_next(void *cb_arg, int bserrno)
{
struct shallow_copy_ctx *ctx = (struct shallow_copy_ctx *)cb_arg;
struct spdk_blob *_blob = ctx->blob;
if (bserrno != 0) {
SPDK_ERRLOG("Shallow copy bdev write error %d\n", bserrno);
ctx->bserrno = bserrno;
_blob->locked_operation_in_progress = false;
_blob->u.shallow_copy.copied_clusters_number = 0;
_blob->u.shallow_copy.num_clusters_to_copy = 0;
spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx);
return;
}
while (ctx->cluster < _blob->active.num_clusters) {
if (_blob->active.clusters[ctx->cluster] != 0) {
break;
}
ctx->cluster++;
}
if (ctx->cluster < _blob->active.num_clusters) {
blob_request_submit_op_single(ctx->blob_channel, _blob, ctx->read_buff,
bs_cluster_to_lba(_blob->bs, ctx->cluster),
bs_dev_byte_to_lba(_blob->bs->dev, _blob->bs->cluster_sz),
bs_shallow_copy_blob_read_cpl, ctx, SPDK_BLOB_READ);
} else {
_blob->u.shallow_copy.copied_clusters_number = 0;
_blob->u.shallow_copy.num_clusters_to_copy = 0;
_blob->locked_operation_in_progress = false;
spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx);
}
}
static void
bs_shallow_copy_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
{
struct shallow_copy_ctx *ctx = (struct shallow_copy_ctx *)cb_arg;
struct spdk_bs_dev *ext_dev = ctx->ext_dev;
uint32_t blob_block_size;
uint64_t blob_total_size;
uint64_t i;
if (bserrno != 0) {
SPDK_ERRLOG("Shallow copy blob open error %d\n", bserrno);
ctx->bserrno = bserrno;
bs_shallow_copy_cleanup_finish(ctx, bserrno);
return;
}
blob_block_size = _blob->bs->dev->blocklen;
blob_total_size = spdk_blob_get_num_clusters(_blob) * spdk_bs_get_cluster_size(_blob->bs);
if (blob_total_size > ext_dev->blockcnt * ext_dev->blocklen) {
SPDK_ERRLOG("external device must have at least blob size\n");
ctx->bserrno = -EINVAL;
spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx);
return;
}
if (blob_block_size % ext_dev->blocklen != 0) {
SPDK_ERRLOG("external device block size is not compatible with blobstore block size\n");
ctx->bserrno = -EINVAL;
spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx);
return;
}
ctx->blob = _blob;
if (_blob->locked_operation_in_progress) {
SPDK_DEBUGLOG(blob, "Cannot make a shallow copy of blob - another operation in progress\n");
ctx->bserrno = -EBUSY;
spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx);
return;
}
_blob->locked_operation_in_progress = true;
for (i = 0; i < _blob->active.num_clusters; i++) {
if (_blob->active.clusters[i] != 0) {
_blob->u.shallow_copy.num_clusters_to_copy++;
}
}
ctx->cluster = 0;
bs_shallow_copy_cluster_find_next(ctx, 0);
}
void
spdk_bs_blob_shallow_copy(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
spdk_blob_id blobid, struct spdk_bs_dev *ext_dev,
spdk_blob_op_complete cb_fn, void *cb_arg)
{
struct shallow_copy_ctx *ctx;
struct spdk_io_channel *ext_channel;
ctx = calloc(1, sizeof(*ctx));
if (!ctx) {
cb_fn(cb_arg, -ENOMEM);
return;
}
ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
ctx->cpl.u.bs_basic.cb_fn = cb_fn;
ctx->cpl.u.bs_basic.cb_arg = cb_arg;
ctx->bserrno = 0;
ctx->blob_channel = channel;
ctx->read_buff = spdk_malloc(bs->cluster_sz, bs->dev->blocklen, NULL,
SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
if (!ctx->read_buff) {
free(ctx);
cb_fn(cb_arg, -ENOMEM);
return;
}
ext_channel = ext_dev->create_channel(ext_dev);
if (!ext_channel) {
spdk_free(ctx->read_buff);
free(ctx);
cb_fn(cb_arg, -ENOMEM);
return;
}
ctx->ext_dev = ext_dev;
ctx->ext_channel = ext_channel;
spdk_bs_open_blob(bs, blobid, bs_shallow_copy_blob_open_cpl, ctx);
}
/* END spdk_bs_blob_shallow_copy */
/* START spdk_blob_resize */
struct spdk_bs_resize_ctx {
spdk_blob_op_complete cb_fn;

View File

@ -148,6 +148,13 @@ struct spdk_blob {
/* Number of data clusters retrieved from extent table,
* that many have to be read from extent pages. */
uint64_t remaining_clusters_in_et;
union {
struct {
uint64_t num_clusters_to_copy;
uint64_t copied_clusters_number;
} shallow_copy;
} u;
};
struct spdk_blob_store {

View File

@ -22,6 +22,8 @@
spdk_blob_get_num_clusters;
spdk_blob_get_next_allocated_io_unit;
spdk_blob_get_next_unallocated_io_unit;
spdk_blob_get_shallow_copy_copied_clusters;
spdk_blob_get_shallow_copy_total_clusters;
spdk_blob_opts_init;
spdk_bs_create_blob_ext;
spdk_bs_create_blob;
@ -38,6 +40,7 @@
spdk_bs_delete_blob;
spdk_bs_inflate_blob;
spdk_bs_blob_decouple_parent;
spdk_bs_blob_shallow_copy;
spdk_blob_open_opts_init;
spdk_bs_open_blob;
spdk_bs_open_blob_ext;

View File

@ -2208,3 +2208,78 @@ spdk_lvol_is_degraded(const struct spdk_lvol *lvol)
}
return spdk_blob_is_degraded(blob);
}
static void
lvol_shallow_copy_cb(void *cb_arg, int lvolerrno)
{
struct spdk_lvol_req *req = cb_arg;
spdk_bs_free_io_channel(req->channel);
if (lvolerrno < 0) {
SPDK_ERRLOG("Could not make a shallow copy of lvol\n");
}
req->cb_fn(req->cb_arg, lvolerrno);
free(req);
}
void
spdk_lvol_shallow_copy(struct spdk_lvol *lvol, struct spdk_bs_dev *ext_dev,
spdk_lvol_op_complete cb_fn, void *cb_arg)
{
struct spdk_lvol_req *req;
spdk_blob_id blob_id;
uint64_t lvol_total_size;
assert(cb_fn != NULL);
if (lvol == NULL) {
SPDK_ERRLOG("lvol does not exist\n");
cb_fn(cb_arg, -ENODEV);
return;
}
if (ext_dev == NULL) {
SPDK_ERRLOG("External device does not exist\n");
cb_fn(cb_arg, -ENODEV);
return;
}
if (!spdk_blob_is_read_only(lvol->blob)) {
SPDK_ERRLOG("lvol must be read only\n");
cb_fn(cb_arg, -EPERM);
return;
}
lvol_total_size = spdk_blob_get_num_clusters(lvol->blob) *
spdk_bs_get_cluster_size(lvol->lvol_store->blobstore);
if (lvol_total_size > ext_dev->blockcnt * ext_dev->blocklen) {
SPDK_ERRLOG("bdev must have at least lvol size\n");
cb_fn(cb_arg, -EFBIG);
return;
}
req = calloc(1, sizeof(*req));
if (!req) {
SPDK_ERRLOG("Cannot alloc memory for lvol request pointer\n");
cb_fn(cb_arg, -ENOMEM);
return;
}
req->cb_fn = cb_fn;
req->cb_arg = cb_arg;
req->channel = spdk_bs_alloc_io_channel(lvol->lvol_store->blobstore);
if (req->channel == NULL) {
SPDK_ERRLOG("Cannot alloc io channel for lvol shallow copy request\n");
free(req);
cb_fn(cb_arg, -ENOMEM);
return;
}
blob_id = spdk_blob_get_id(lvol->blob);
spdk_bs_blob_shallow_copy(lvol->lvol_store->blobstore, req->channel, blob_id, ext_dev,
lvol_shallow_copy_cb, req);
}

View File

@ -26,6 +26,7 @@
spdk_lvol_get_by_uuid;
spdk_lvol_get_by_names;
spdk_lvol_is_degraded;
spdk_lvol_shallow_copy;
# internal functions
spdk_lvol_resize;

View File

@ -2020,4 +2020,71 @@ fail:
/* End external snapshot support */
static void
_vbdev_lvol_shallow_copy_base_bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev,
void *event_ctx)
{
}
static void
_vbdev_lvol_shallow_copy_cb(void *cb_arg, int lvolerrno)
{
struct spdk_lvol_copy_req *req = cb_arg;
struct spdk_lvol *lvol = req->lvol;
if (lvolerrno != 0) {
SPDK_ERRLOG("Could not make a shallow copy of bdev lvol %s due to error: %d.\n", lvol->name,
lvolerrno);
}
req->ext_dev->destroy(req->ext_dev);
req->cb_fn(req->cb_arg, lvolerrno);
free(req);
}
void
vbdev_lvol_shallow_copy(struct spdk_lvol *lvol, const char *bdev_name,
spdk_lvol_op_complete cb_fn, void *cb_arg)
{
struct spdk_bs_dev *ext_dev;
struct spdk_lvol_copy_req *req;
int rc;
if (lvol == NULL) {
SPDK_ERRLOG("lvol does not exist\n");
cb_fn(cb_arg, -EINVAL);
return;
}
if (bdev_name == NULL) {
SPDK_ERRLOG("bdev name does not exist\n");
cb_fn(cb_arg, -ENODEV);
return;
}
assert(lvol->bdev != NULL);
req = calloc(1, sizeof(*req));
if (req == NULL) {
SPDK_ERRLOG("Cannot alloc memory for vbdev lvol copy request pointer\n");
cb_fn(cb_arg, -ENOMEM);
return;
}
rc = spdk_bdev_create_bs_dev_ext(bdev_name, _vbdev_lvol_shallow_copy_base_bdev_event_cb,
NULL, &ext_dev);
if (rc < 0) {
SPDK_ERRLOG("Cannot create external bdev blob device\n");
free(req);
return;
}
req->cb_fn = cb_fn;
req->cb_arg = cb_arg;
req->lvol = lvol;
req->ext_dev = ext_dev;
spdk_lvol_shallow_copy(lvol, ext_dev, _vbdev_lvol_shallow_copy_cb, req);
}
SPDK_LOG_REGISTER_COMPONENT(vbdev_lvol)

View File

@ -125,4 +125,15 @@ int vbdev_lvol_esnap_dev_create(void *bs_ctx, void *blob_ctx, struct spdk_blob *
const void *esnap_id, uint32_t id_len,
struct spdk_bs_dev **_bs_dev);
/**
* \brief Make a shallow copy of lvol over a bdev
*
* \param lvol Handle to lvol
* \param bdev_name Name of the bdev to copy on
* \param cb_fn Completion callback
* \param cb_arg Completion callback custom arguments
*/
void vbdev_lvol_shallow_copy(struct spdk_lvol *lvol, const char *bdev_name,
spdk_lvol_op_complete cb_fn, void *cb_arg);
#endif /* SPDK_VBDEV_LVOL_H */

View File

@ -1341,3 +1341,156 @@ cleanup:
free_rpc_bdev_lvol_grow_lvstore(&req);
}
SPDK_RPC_REGISTER("bdev_lvol_grow_lvstore", rpc_bdev_lvol_grow_lvstore, SPDK_RPC_RUNTIME)
struct rpc_bdev_lvol_shallow_copy {
char *src_lvol_name;
char *dst_bdev_name;
};
static void
free_rpc_bdev_lvol_shallow_copy(struct rpc_bdev_lvol_shallow_copy *req)
{
free(req->src_lvol_name);
free(req->dst_bdev_name);
}
static const struct spdk_json_object_decoder rpc_bdev_lvol_shallow_copy_decoders[] = {
{"src_lvol_name", offsetof(struct rpc_bdev_lvol_shallow_copy, src_lvol_name), spdk_json_decode_string},
{"dst_bdev_name", offsetof(struct rpc_bdev_lvol_shallow_copy, dst_bdev_name), spdk_json_decode_string},
};
static void
rpc_bdev_lvol_shallow_copy_cb(void *cb_arg, int lvolerrno)
{
struct spdk_jsonrpc_request *request = cb_arg;
if (lvolerrno != 0) {
goto invalid;
}
spdk_jsonrpc_send_bool_response(request, true);
return;
invalid:
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS,
spdk_strerror(-lvolerrno));
}
static void
rpc_bdev_lvol_shallow_copy(struct spdk_jsonrpc_request *request,
const struct spdk_json_val *params)
{
struct rpc_bdev_lvol_shallow_copy req = {};
struct spdk_lvol *src_lvol;
struct spdk_bdev *src_lvol_bdev;
struct spdk_bdev *dst_bdev;
SPDK_INFOLOG(lvol_rpc, "Shallow copying lvol\n");
if (spdk_json_decode_object(params, rpc_bdev_lvol_shallow_copy_decoders,
SPDK_COUNTOF(rpc_bdev_lvol_shallow_copy_decoders),
&req)) {
SPDK_INFOLOG(lvol_rpc, "spdk_json_decode_object failed\n");
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
"spdk_json_decode_object failed");
goto cleanup;
}
src_lvol_bdev = spdk_bdev_get_by_name(req.src_lvol_name);
if (src_lvol_bdev == NULL) {
SPDK_ERRLOG("lvol bdev '%s' does not exist\n", req.src_lvol_name);
spdk_jsonrpc_send_error_response(request, -ENODEV, spdk_strerror(ENODEV));
goto cleanup;
}
src_lvol = vbdev_lvol_get_from_bdev(src_lvol_bdev);
if (src_lvol == NULL) {
SPDK_ERRLOG("lvol does not exist\n");
spdk_jsonrpc_send_error_response(request, -ENODEV, spdk_strerror(ENODEV));
goto cleanup;
}
dst_bdev = spdk_bdev_get_by_name(req.dst_bdev_name);
if (dst_bdev == NULL) {
SPDK_ERRLOG("bdev '%s' does not exist\n", req.dst_bdev_name);
spdk_jsonrpc_send_error_response(request, -ENODEV, spdk_strerror(ENODEV));
goto cleanup;
}
vbdev_lvol_shallow_copy(src_lvol, req.dst_bdev_name, rpc_bdev_lvol_shallow_copy_cb, request);
cleanup:
free_rpc_bdev_lvol_shallow_copy(&req);
}
SPDK_RPC_REGISTER("bdev_lvol_shallow_copy", rpc_bdev_lvol_shallow_copy, SPDK_RPC_RUNTIME)
struct rpc_bdev_lvol_shallow_copy_status {
char *src_lvol_name;
};
static void
free_rpc_bdev_lvol_shallow_copy_status(struct rpc_bdev_lvol_shallow_copy_status *req)
{
free(req->src_lvol_name);
}
static const struct spdk_json_object_decoder rpc_bdev_lvol_shallow_copy_status_decoders[] = {
{"src_lvol_name", offsetof(struct rpc_bdev_lvol_shallow_copy_status, src_lvol_name), spdk_json_decode_string},
};
static void
rpc_bdev_lvol_shallow_copy_status(struct spdk_jsonrpc_request *request,
const struct spdk_json_val *params)
{
struct rpc_bdev_lvol_shallow_copy_status req = {};
struct spdk_bdev *src_lvol_bdev;
struct spdk_lvol *src_lvol;
struct spdk_json_write_ctx *w;
uint64_t cluster_index, total_clusters;
SPDK_INFOLOG(lvol_rpc, "Shallow copy status\n");
if (spdk_json_decode_object(params, rpc_bdev_lvol_shallow_copy_status_decoders,
SPDK_COUNTOF(rpc_bdev_lvol_shallow_copy_status_decoders),
&req)) {
SPDK_INFOLOG(lvol_rpc, "spdk_json_decode_object failed\n");
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
"spdk_json_decode_object failed");
goto cleanup;
}
src_lvol_bdev = spdk_bdev_get_by_name(req.src_lvol_name);
if (src_lvol_bdev == NULL) {
SPDK_ERRLOG("lvol bdev '%s' does not exist\n", req.src_lvol_name);
spdk_jsonrpc_send_error_response(request, -ENODEV, spdk_strerror(ENODEV));
goto cleanup;
}
src_lvol = vbdev_lvol_get_from_bdev(src_lvol_bdev);
if (src_lvol == NULL) {
SPDK_ERRLOG("lvol does not exist\n");
spdk_jsonrpc_send_error_response(request, -ENODEV, spdk_strerror(ENODEV));
goto cleanup;
}
cluster_index = spdk_blob_get_shallow_copy_copied_clusters(src_lvol->blob);
total_clusters = spdk_blob_get_shallow_copy_total_clusters(src_lvol->blob);
w = spdk_jsonrpc_begin_result(request);
spdk_json_write_object_begin(w);
spdk_json_write_named_bool(w, "in_progress", total_clusters > 0);
if (total_clusters > 0) {
spdk_json_write_named_string_fmt(w, "status", "%lu/%lu", cluster_index, total_clusters);
}
spdk_json_write_object_end(w);
spdk_jsonrpc_end_result(request, w);
cleanup:
free_rpc_bdev_lvol_shallow_copy_status(&req);
}
SPDK_RPC_REGISTER("bdev_lvol_shallow_copy_status", rpc_bdev_lvol_shallow_copy_status,
SPDK_RPC_RUNTIME)

View File

@ -223,6 +223,32 @@ def bdev_lvol_decouple_parent(client, name):
return client.call('bdev_lvol_decouple_parent', params)
def bdev_lvol_shallow_copy(client, src_lvol_name, dst_bdev_name):
"""Make a shallow copy of lvol over a given bdev
Args:
src_lvol_name: name of lvol to create a copy from
bdev_name: name of the bdev that acts as destination for the copy
"""
params = {
'src_lvol_name': src_lvol_name,
'dst_bdev_name': dst_bdev_name
}
return client.call('bdev_lvol_shallow_copy', params)
def bdev_lvol_shallow_copy_status(client, src_lvol_name):
"""Get shallow copy status
Args:
src_lvol_name: name of source lvol
"""
params = {
'src_lvol_name': src_lvol_name
}
return client.call('bdev_lvol_shallow_copy_status', params)
def bdev_lvol_delete_lvstore(client, uuid=None, lvs_name=None):
"""Destroy a logical volume store.

View File

@ -2041,6 +2041,24 @@ Format: 'user:u1 secret:s1 muser:mu1 msecret:ms1,user:u2 secret:s2 muser:mu2 mse
p.add_argument('name', help='lvol bdev name')
p.set_defaults(func=bdev_lvol_delete)
def bdev_lvol_shallow_copy(args):
rpc.lvol.bdev_lvol_shallow_copy(args.client,
src_lvol_name=args.src_lvol_name,
dst_bdev_name=args.dst_bdev_name)
p = subparsers.add_parser('bdev_lvol_shallow_copy', help="""Make a shallow copy of lvol over a given bdev.
lvol must be read only""")
p.add_argument('src_lvol_name', help='source lvol name')
p.add_argument('dst_bdev_name', help='destination bdev name')
p.set_defaults(func=bdev_lvol_shallow_copy)
def bdev_lvol_shallow_copy_status(args):
print_json(rpc.lvol.bdev_lvol_shallow_copy_status(args.client,
src_lvol_name=args.src_lvol_name))
p = subparsers.add_parser('bdev_lvol_shallow_copy_status', help='Get shallow copy status')
p.add_argument('src_lvol_name', help='source lvol name')
p.set_defaults(func=bdev_lvol_shallow_copy_status)
def bdev_lvol_delete_lvstore(args):
rpc.lvol.bdev_lvol_delete_lvstore(args.client,
uuid=args.uuid,

71
test/lvol/external_copy.sh Executable file
View File

@ -0,0 +1,71 @@
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (C) 2023 SUSE LLC.
# All rights reserved.
#
testdir=$(readlink -f $(dirname $0))
rootdir=$(readlink -f $testdir/../..)
source $rootdir/test/common/autotest_common.sh
source $rootdir/test/lvol/common.sh
source $rootdir/test/bdev/nbd_common.sh
function test_shallow_copy_compare() {
# Create lvs
bs_malloc_name=$(rpc_cmd bdev_malloc_create 20 $MALLOC_BS)
lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$bs_malloc_name" lvs_test)
# Create lvol with 4 cluster
lvol_size=$((LVS_DEFAULT_CLUSTER_SIZE_MB * 4))
lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size" -t)
# Fill second and fourth cluster of lvol
nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" /dev/nbd0
dd if=/dev/urandom of=/dev/nbd0 oflag=direct bs="$LVS_DEFAULT_CLUSTER_SIZE" count=1 seek=1
dd if=/dev/urandom of=/dev/nbd0 oflag=direct bs="$LVS_DEFAULT_CLUSTER_SIZE" count=1 seek=3
nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
# Create snapshots of lvol bdev
snapshot_uuid=$(rpc_cmd bdev_lvol_snapshot lvs_test/lvol_test lvol_snapshot)
# Fill first and third cluster of lvol
nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" /dev/nbd0
dd if=/dev/urandom of=/dev/nbd0 oflag=direct bs="$LVS_DEFAULT_CLUSTER_SIZE" count=1
dd if=/dev/urandom of=/dev/nbd0 oflag=direct bs="$LVS_DEFAULT_CLUSTER_SIZE" count=1 seek=2
nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
# Set lvol as read only to perform the copy
rpc_cmd bdev_lvol_set_read_only "$lvol_uuid"
# Create external bdev to make a shallow copy of lvol on
ext_malloc_name=$(rpc_cmd bdev_malloc_create "$lvol_size" $MALLOC_BS)
# Make a shallow copy of lvol over external bdev
rpc_cmd bdev_lvol_shallow_copy "$lvol_uuid" "$ext_malloc_name"
# Create nbd devices of lvol and external bdev for comparison
nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" /dev/nbd0
nbd_start_disks "$DEFAULT_RPC_ADDR" "$ext_malloc_name" /dev/nbd1
# Compare lvol and external bdev in first and third cluster
cmp -n "$LVS_DEFAULT_CLUSTER_SIZE" /dev/nbd0 /dev/nbd1
cmp -n "$LVS_DEFAULT_CLUSTER_SIZE" /dev/nbd0 /dev/nbd1 "$((LVS_DEFAULT_CLUSTER_SIZE * 2))" "$((LVS_DEFAULT_CLUSTER_SIZE * 2))"
# Check that second and fourth cluster of external bdev are zero filled
cmp -n "$LVS_DEFAULT_CLUSTER_SIZE" /dev/nbd1 /dev/zero "$LVS_DEFAULT_CLUSTER_SIZE"
cmp -n "$LVS_DEFAULT_CLUSTER_SIZE" /dev/nbd1 /dev/zero "$((LVS_DEFAULT_CLUSTER_SIZE * 3))"
# Stop nbd devices
nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd1
nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
}
$SPDK_BIN_DIR/spdk_tgt &
spdk_pid=$!
trap 'killprocess "$spdk_pid"; exit 1' SIGINT SIGTERM EXIT
waitforlisten $spdk_pid
modprobe nbd
run_test "test_shallow_copy_compare" test_shallow_copy_compare
trap - SIGINT SIGTERM EXIT
killprocess $spdk_pid

View File

@ -20,6 +20,7 @@ run_test "lvol_rename" $rootdir/test/lvol/rename.sh
run_test "lvol_provisioning" $rootdir/test/lvol/thin_provisioning.sh
run_test "lvol_esnap" $rootdir/test/lvol/esnap/esnap
run_test "lvol_external_snapshot" $rootdir/test/lvol/external_snapshot.sh
run_test "lvol_external_copy" $rootdir/test/lvol/external_copy.sh
timing_exit basic
timing_exit lvol

View File

@ -893,6 +893,23 @@ spdk_lvs_notify_hotplug(const void *esnap_id, uint32_t id_len,
return g_bdev_is_missing;
}
void
spdk_lvol_shallow_copy(struct spdk_lvol *lvol, struct spdk_bs_dev *ext_dev,
spdk_lvol_op_complete cb_fn, void *cb_arg)
{
if (lvol == NULL) {
cb_fn(cb_arg, -ENODEV);
return;
}
if (ext_dev == NULL) {
cb_fn(cb_arg, -ENODEV);
return;
}
cb_fn(cb_arg, 0);
}
static void
lvol_store_op_complete(void *cb_arg, int lvserrno)
{
@ -933,6 +950,12 @@ vbdev_lvol_rename_complete(void *cb_arg, int lvolerrno)
g_lvolerrno = lvolerrno;
}
static void
vbdev_lvol_shallow_copy_complete(void *cb_arg, int lvolerrno)
{
g_lvolerrno = lvolerrno;
}
static void
ut_lvs_destroy(void)
{
@ -1927,6 +1950,54 @@ ut_lvol_esnap_clone_bad_args(void)
g_base_bdev = NULL;
}
static void
ut_lvol_shallow_copy(void)
{
struct spdk_lvol_store *lvs;
int sz = 10;
int rc;
struct spdk_lvol *lvol = NULL;
/* Lvol store is successfully created */
rc = vbdev_lvs_create("bdev", "lvs", 0, LVS_CLEAR_WITH_UNMAP, 0,
lvol_store_op_with_handle_complete, NULL);
CU_ASSERT(rc == 0);
CU_ASSERT(g_lvserrno == 0);
SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
CU_ASSERT(g_lvol_store->bs_dev != NULL);
lvs = g_lvol_store;
/* Successful lvol create */
g_lvolerrno = -1;
rc = vbdev_lvol_create(lvs, "lvol_sc", sz, false, LVOL_CLEAR_WITH_DEFAULT,
vbdev_lvol_create_complete,
NULL);
SPDK_CU_ASSERT_FATAL(rc == 0);
SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
CU_ASSERT(g_lvolerrno == 0);
lvol = g_lvol;
/* Successful shallow copy */
g_lvolerrno = -1;
lvol_already_opened = false;
vbdev_lvol_shallow_copy(lvol, "bdev_sc", vbdev_lvol_shallow_copy_complete, NULL);
CU_ASSERT(g_lvolerrno == 0);
/* Shallow copy error with NULL lvol */
vbdev_lvol_shallow_copy(NULL, "", vbdev_lvol_shallow_copy_complete, NULL);
CU_ASSERT(g_lvolerrno != 0);
/* Successful lvol destroy */
vbdev_lvol_destroy(g_lvol, lvol_store_op_complete, NULL);
CU_ASSERT(g_lvol == NULL);
/* Destroy lvol store */
vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL);
CU_ASSERT(g_lvserrno == 0);
CU_ASSERT(g_lvol_store == NULL);
}
int
main(int argc, char **argv)
{
@ -1959,6 +2030,7 @@ main(int argc, char **argv)
CU_ADD_TEST(suite, ut_lvol_seek);
CU_ADD_TEST(suite, ut_esnap_dev_create);
CU_ADD_TEST(suite, ut_lvol_esnap_clone_bad_args);
CU_ADD_TEST(suite, ut_lvol_shallow_copy);
allocate_threads(1);
set_thread(0);

View File

@ -13,6 +13,7 @@
#include "common/lib/ut_multithread.c"
#include "../bs_dev_common.c"
#include "thread/thread.c"
#include "ext_dev.c"
#include "blob/blobstore.c"
#include "blob/request.c"
#include "blob/zeroes.c"
@ -8577,6 +8578,129 @@ blob_is_degraded(void)
g_blob->back_bs_dev = NULL;
}
static void
bs_dev_io_complete_cb(struct spdk_io_channel *channel, void *cb_arg, int bserrno)
{
g_bserrno = bserrno;
}
static void
blob_shallow_copy(void)
{
struct spdk_blob_store *bs = g_bs;
struct spdk_blob_opts blob_opts;
struct spdk_blob *blob;
spdk_blob_id blobid;
uint64_t num_clusters = 4;
struct spdk_bs_dev *ext_dev;
struct spdk_bs_dev_cb_args ext_args;
struct spdk_io_channel *bdev_ch, *blob_ch;
uint8_t buf1[DEV_BUFFER_BLOCKLEN];
uint8_t buf2[DEV_BUFFER_BLOCKLEN];
uint64_t io_units_per_cluster;
uint64_t offset;
blob_ch = spdk_bs_alloc_io_channel(bs);
SPDK_CU_ASSERT_FATAL(blob_ch != NULL);
/* Set blob dimension and as thin provisioned */
ut_spdk_blob_opts_init(&blob_opts);
blob_opts.thin_provision = true;
blob_opts.num_clusters = num_clusters;
/* Create a blob */
blob = ut_blob_create_and_open(bs, &blob_opts);
SPDK_CU_ASSERT_FATAL(blob != NULL);
blobid = spdk_blob_get_id(blob);
io_units_per_cluster = bs_io_units_per_cluster(blob);
/* Write on cluster 2 and 4 of blob */
for (offset = io_units_per_cluster; offset < 2 * io_units_per_cluster; offset++) {
memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
poll_threads();
CU_ASSERT(g_bserrno == 0);
}
for (offset = 3 * io_units_per_cluster; offset < 4 * io_units_per_cluster; offset++) {
memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
poll_threads();
CU_ASSERT(g_bserrno == 0);
}
/* Make a snapshot over blob */
spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
poll_threads();
CU_ASSERT(g_bserrno == 0);
/* Write on cluster 1 and 3 of blob */
for (offset = 0; offset < io_units_per_cluster; offset++) {
memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
poll_threads();
CU_ASSERT(g_bserrno == 0);
}
for (offset = 2 * io_units_per_cluster; offset < 3 * io_units_per_cluster; offset++) {
memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
poll_threads();
CU_ASSERT(g_bserrno == 0);
}
/* Create a spdk_bs_dev */
ext_dev = init_ext_dev(num_clusters * 1024 * 1024, DEV_BUFFER_BLOCKLEN);
/* Make a shallow copy of blob over bdev */
spdk_bs_blob_shallow_copy(bs, blob_ch, blobid, ext_dev, blob_op_complete, NULL);
CU_ASSERT(spdk_blob_get_shallow_copy_total_clusters(blob) == 2);
CU_ASSERT(spdk_blob_get_shallow_copy_copied_clusters(blob) == 0);
poll_threads();
CU_ASSERT(g_bserrno == 0);
/* Read from bdev */
/* Only cluster 1 and 3 must be filled */
bdev_ch = ext_dev->create_channel(ext_dev);
SPDK_CU_ASSERT_FATAL(bdev_ch != NULL);
ext_args.cb_fn = bs_dev_io_complete_cb;
for (offset = 0; offset < io_units_per_cluster; offset++) {
memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
ext_dev->read(ext_dev, bdev_ch, buf2, offset, 1, &ext_args);
poll_threads();
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(memcmp(buf1, buf2, DEV_BUFFER_BLOCKLEN) == 0);
}
for (offset = io_units_per_cluster; offset < 2 * io_units_per_cluster; offset++) {
memset(buf1, 0, DEV_BUFFER_BLOCKLEN);
ext_dev->read(ext_dev, bdev_ch, buf2, offset, 1, &ext_args);
poll_threads();
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(memcmp(buf1, buf2, DEV_BUFFER_BLOCKLEN) == 0);
}
for (offset = 2 * io_units_per_cluster; offset < 3 * io_units_per_cluster; offset++) {
memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
ext_dev->read(ext_dev, bdev_ch, buf2, offset, 1, &ext_args);
poll_threads();
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(memcmp(buf1, buf2, DEV_BUFFER_BLOCKLEN) == 0);
}
for (offset = 3 * io_units_per_cluster; offset < 4 * io_units_per_cluster; offset++) {
memset(buf1, 0, DEV_BUFFER_BLOCKLEN);
ext_dev->read(ext_dev, bdev_ch, buf2, offset, 1, &ext_args);
poll_threads();
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(memcmp(buf1, buf2, DEV_BUFFER_BLOCKLEN) == 0);
}
/* Clean up */
ext_dev->destroy_channel(ext_dev, bdev_ch);
ext_dev->destroy(ext_dev);
spdk_bs_free_io_channel(blob_ch);
ut_blob_close_and_delete(bs, blob);
poll_threads();
}
static void
suite_bs_setup(void)
{
@ -8789,6 +8913,7 @@ main(int argc, char **argv)
CU_ADD_TEST(suite_esnap_bs, blob_esnap_clone_reload);
CU_ADD_TEST(suite_esnap_bs, blob_esnap_hotplug);
CU_ADD_TEST(suite_blob, blob_is_degraded);
CU_ADD_TEST(suite_bs, blob_shallow_copy);
allocate_threads(2);
set_thread(0);

View File

@ -0,0 +1,81 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2023 SUSE LLC.
* All rights reserved.
*/
#include "thread/thread_internal.h"
#include "spdk/blob.h"
#define EXT_DEV_BUFFER_SIZE (4 * 1024 * 1024)
uint8_t g_ext_dev_buffer[EXT_DEV_BUFFER_SIZE];
struct spdk_io_channel g_ext_io_channel;
static struct spdk_io_channel *
ext_dev_create_channel(struct spdk_bs_dev *dev)
{
return &g_ext_io_channel;
}
static void
ext_dev_destroy_channel(struct spdk_bs_dev *dev, struct spdk_io_channel *channel)
{
}
static void
ext_dev_destroy(struct spdk_bs_dev *dev)
{
free(dev);
}
static void
ext_dev_read(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, void *payload,
uint64_t lba, uint32_t lba_count,
struct spdk_bs_dev_cb_args *cb_args)
{
uint64_t offset, length;
offset = lba * dev->blocklen;
length = lba_count * dev->blocklen;
SPDK_CU_ASSERT_FATAL(offset + length <= EXT_DEV_BUFFER_SIZE);
if (length > 0) {
memcpy(payload, &g_ext_dev_buffer[offset], length);
}
cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, 0);
}
static void
ext_dev_write(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, void *payload,
uint64_t lba, uint32_t lba_count,
struct spdk_bs_dev_cb_args *cb_args)
{
uint64_t offset, length;
offset = lba * dev->blocklen;
length = lba_count * dev->blocklen;
SPDK_CU_ASSERT_FATAL(offset + length <= EXT_DEV_BUFFER_SIZE);
memcpy(&g_ext_dev_buffer[offset], payload, length);
cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, 0);
}
static struct spdk_bs_dev *
init_ext_dev(uint64_t blockcnt, uint32_t blocklen)
{
struct spdk_bs_dev *dev = calloc(1, sizeof(*dev));
SPDK_CU_ASSERT_FATAL(dev != NULL);
dev->create_channel = ext_dev_create_channel;
dev->destroy_channel = ext_dev_destroy_channel;
dev->destroy = ext_dev_destroy;
dev->read = ext_dev_read;
dev->write = ext_dev_write;
dev->blockcnt = blockcnt;
dev->blocklen = blocklen;
return dev;
}

View File

@ -59,6 +59,7 @@ int g_resize_rc;
int g_inflate_rc;
int g_remove_rc;
bool g_lvs_rename_blob_open_error = false;
bool g_blob_read_only = false;
struct spdk_lvol_store *g_lvol_store;
struct spdk_lvol *g_lvol;
spdk_blob_id g_blobid = 1;
@ -136,7 +137,7 @@ spdk_bs_iter_first(struct spdk_blob_store *bs,
uint64_t
spdk_blob_get_num_clusters(struct spdk_blob *blob)
{
return 0;
return 1;
}
void
@ -247,6 +248,14 @@ spdk_blob_is_thin_provisioned(struct spdk_blob *blob)
return blob->thin_provisioned;
}
void
spdk_bs_blob_shallow_copy(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
spdk_blob_id blobid, struct spdk_bs_dev *ext_dev,
spdk_blob_op_complete cb_fn, void *cb_arg)
{
cb_fn(cb_arg, 0);
}
DEFINE_STUB(spdk_bs_get_page_size, uint64_t, (struct spdk_blob_store *bs), BS_PAGE_SIZE);
int
@ -457,6 +466,12 @@ spdk_blob_open_opts_init(struct spdk_blob_open_opts *opts, size_t opts_size)
opts->clear_method = BLOB_CLEAR_WITH_DEFAULT;
}
bool
spdk_blob_is_read_only(struct spdk_blob *blob)
{
return g_blob_read_only;
}
void
spdk_bs_create_blob(struct spdk_blob_store *bs,
spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
@ -3298,6 +3313,75 @@ lvol_get_by(void)
free_dev(&dev2);
}
static void
lvol_shallow_copy(void)
{
struct lvol_ut_bs_dev bs_dev;
struct spdk_lvs_opts opts;
struct spdk_bs_dev ext_dev;
int rc = 0;
init_dev(&bs_dev);
ext_dev.blocklen = DEV_BUFFER_BLOCKLEN;
ext_dev.blockcnt = BS_CLUSTER_SIZE / DEV_BUFFER_BLOCKLEN;
spdk_lvs_opts_init(&opts);
snprintf(opts.name, sizeof(opts.name), "lvs");
g_lvserrno = -1;
rc = spdk_lvs_init(&bs_dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
CU_ASSERT(rc == 0);
CU_ASSERT(g_lvserrno == 0);
SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
spdk_lvol_create(g_lvol_store, "lvol", BS_CLUSTER_SIZE, false, LVOL_CLEAR_WITH_DEFAULT,
lvol_op_with_handle_complete, NULL);
CU_ASSERT(g_lvserrno == 0);
SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
/* Successful shallow copy */
g_blob_read_only = true;
spdk_lvol_shallow_copy(g_lvol, &ext_dev, op_complete, NULL);
CU_ASSERT(g_lvserrno == 0);
/* Shallow copy with null lvol */
spdk_lvol_shallow_copy(NULL, &ext_dev, op_complete, NULL);
CU_ASSERT(g_lvserrno != 0);
/* Shallow copy with null ext_dev */
spdk_lvol_shallow_copy(g_lvol, NULL, op_complete, NULL);
CU_ASSERT(g_lvserrno != 0);
/* Shallow copy with invalid ext_dev size */
ext_dev.blockcnt = 1;
spdk_lvol_shallow_copy(g_lvol, &ext_dev, op_complete, NULL);
CU_ASSERT(g_lvserrno != 0);
/* Shallow copy with writable lvol */
g_blob_read_only = false;
spdk_lvol_shallow_copy(g_lvol, &ext_dev, op_complete, NULL);
CU_ASSERT(g_lvserrno != 0);
spdk_lvol_close(g_lvol, op_complete, NULL);
CU_ASSERT(g_lvserrno == 0);
spdk_lvol_destroy(g_lvol, op_complete, NULL);
CU_ASSERT(g_lvserrno == 0);
g_lvserrno = -1;
rc = spdk_lvs_unload(g_lvol_store, op_complete, NULL);
CU_ASSERT(rc == 0);
CU_ASSERT(g_lvserrno == 0);
g_lvol_store = NULL;
free_dev(&bs_dev);
/* Make sure that all references to the io_channel was closed after
* shallow copy call
*/
CU_ASSERT(g_io_channel == NULL);
}
int
main(int argc, char **argv)
{
@ -3344,6 +3428,7 @@ main(int argc, char **argv)
CU_ADD_TEST(suite, lvol_esnap_missing);
CU_ADD_TEST(suite, lvol_esnap_hotplug);
CU_ADD_TEST(suite, lvol_get_by);
CU_ADD_TEST(suite, lvol_shallow_copy);
allocate_threads(1);
set_thread(0);