lvol/blob: get shallow copy status of ended ops

Shallow copy status RPC now returns info of ongoing and even
of ended operations.

Signed-off-by: Damiano Cipriani <damiano.cipriani@suse.com>
This commit is contained in:
Damiano Cipriani 2023-06-09 17:35:17 +02:00 committed by Shuo Wu
parent 59f61e6fd0
commit 575a9a8669
5 changed files with 60 additions and 30 deletions

View File

@ -10003,6 +10003,7 @@ Example response:
Make a shallow copy of lvol over a given bdev. Only cluster allocated to the lvol will be written on the bdev.
Must have:
* lvol read only
* lvol size smaller than bdev size
* lvstore block size a multiple of bdev size
@ -10046,8 +10047,8 @@ Get shallow copy status
#### Result
This RPC reports if a shallow copy is still in progress and operation's advance state in the format
_number_of_copied_clusters/total_clusters_to_copy_
This RPC reports the state of a shallow copy operation, in case of error a description, and
operation's advance state in the format _number_of_copied_clusters/total_clusters_to_copy_.
#### Parameters
@ -10077,8 +10078,8 @@ Example response:
"jsonrpc": "2.0",
"id": 1,
"result": {
"in_progress": true,
"status": "2/4"
"state": "in progress",
"progress": "2/4"
}
}
~~~

View File

@ -518,27 +518,39 @@ uint64_t spdk_blob_get_next_unallocated_io_unit(struct spdk_blob *blob, uint64_t
/**
* Get the number of copied clusters of a shallow copy operation
* If a shallow copy of the blob is in progress, this functions returns the number of already
* copied clusters.
* If a shallow copy of the blob is in progress or it is ended, this function returns
* the number of copied clusters.
*
* \param blob Blob struct to query.
*
* \return cluster index or UINT64_MAX if no shallow copy is in progress
* \return number of copied clusters.
*/
uint64_t spdk_blob_get_shallow_copy_copied_clusters(struct spdk_blob *blob);
/**
* Get the total number of clusters to be copied in a shallow copy operation
* If a shallow copy of the blob is in progress, this functions returns the total number
* of cluster involved in the operation.
* If a shallow copy of the blob is in progress or it is ended, this function returns
* the total number of clusters to be copied.
*
* \param blob Blob struct to query.
*
* \return total number, 0 if no shallow copy is in progress
* \return total number of clusters.
*/
uint64_t spdk_blob_get_shallow_copy_total_clusters(struct spdk_blob *blob);
/**
* Get the result of last shallow copy operation
* If a shallow copy of the blob is in progress or it is ended, this function returns
* the result of the operation.
*
* \param blob Blob struct to query.
*
* \return 0 on success, negative errno on failure.
*/
int spdk_blob_get_shallow_copy_result(struct spdk_blob *blob);
struct spdk_blob_xattr_opts {
/* Number of attributes */
size_t count;

View File

@ -304,8 +304,7 @@ blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id)
blob->parent_id = SPDK_BLOBID_INVALID;
blob->state = SPDK_BLOB_STATE_DIRTY;
blob->u.shallow_copy.copied_clusters_number = 0;
blob->u.shallow_copy.num_clusters_to_copy = 0;
blob->u.shallow_copy.bserrno = 1;
blob->extent_rle_found = false;
blob->extent_table_found = false;
blob->active.num_pages = 1;
@ -5888,11 +5887,7 @@ spdk_blob_get_shallow_copy_copied_clusters(struct spdk_blob *blob)
{
assert(blob != NULL);
if (blob->u.shallow_copy.num_clusters_to_copy > 0) {
return blob->u.shallow_copy.copied_clusters_number;
} else {
return UINT64_MAX;
}
return blob->u.shallow_copy.copied_clusters_number;
}
uint64_t
@ -5903,6 +5898,14 @@ spdk_blob_get_shallow_copy_total_clusters(struct spdk_blob *blob)
return blob->u.shallow_copy.num_clusters_to_copy;
}
int
spdk_blob_get_shallow_copy_result(struct spdk_blob *blob)
{
assert(blob != NULL);
return blob->u.shallow_copy.bserrno;
}
/* START spdk_bs_create_blob */
static void
@ -6971,6 +6974,7 @@ static void
bs_shallow_copy_cleanup_finish(void *cb_arg, int bserrno)
{
struct shallow_copy_ctx *ctx = cb_arg;
struct spdk_blob *_blob = ctx->blob;
struct spdk_bs_cpl *cpl = &ctx->cpl;
if (bserrno != 0) {
@ -6980,6 +6984,8 @@ bs_shallow_copy_cleanup_finish(void *cb_arg, int bserrno)
}
}
_blob->u.shallow_copy.bserrno = ctx->bserrno;
ctx->ext_dev->destroy_channel(ctx->ext_dev, ctx->ext_channel);
spdk_free(ctx->read_buff);
@ -6997,9 +7003,8 @@ bs_shallow_copy_bdev_write_cpl(struct spdk_io_channel *channel, void *cb_arg, in
if (bserrno != 0) {
SPDK_ERRLOG("Shallow copy ext dev write error %d\n", bserrno);
ctx->bserrno = bserrno;
_blob->u.shallow_copy.bserrno = bserrno;
_blob->locked_operation_in_progress = false;
_blob->u.shallow_copy.copied_clusters_number = 0;
_blob->u.shallow_copy.num_clusters_to_copy = 0;
spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx);
return;
}
@ -7020,9 +7025,8 @@ bs_shallow_copy_blob_read_cpl(void *cb_arg, int bserrno)
if (bserrno != 0) {
SPDK_ERRLOG("Shallow copy blob read error %d\n", bserrno);
ctx->bserrno = bserrno;
_blob->u.shallow_copy.bserrno = bserrno;
_blob->locked_operation_in_progress = false;
_blob->u.shallow_copy.copied_clusters_number = 0;
_blob->u.shallow_copy.num_clusters_to_copy = 0;
spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx);
return;
}
@ -7046,9 +7050,8 @@ bs_shallow_copy_cluster_find_next(void *cb_arg, int bserrno)
if (bserrno != 0) {
SPDK_ERRLOG("Shallow copy bdev write error %d\n", bserrno);
ctx->bserrno = bserrno;
_blob->u.shallow_copy.bserrno = bserrno;
_blob->locked_operation_in_progress = false;
_blob->u.shallow_copy.copied_clusters_number = 0;
_blob->u.shallow_copy.num_clusters_to_copy = 0;
spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx);
return;
}
@ -7067,8 +7070,6 @@ bs_shallow_copy_cluster_find_next(void *cb_arg, int bserrno)
bs_dev_byte_to_lba(_blob->bs->dev, _blob->bs->cluster_sz),
bs_shallow_copy_blob_read_cpl, ctx, SPDK_BLOB_READ);
} else {
_blob->u.shallow_copy.copied_clusters_number = 0;
_blob->u.shallow_copy.num_clusters_to_copy = 0;
_blob->locked_operation_in_progress = false;
spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx);
}
@ -7118,6 +7119,10 @@ bs_shallow_copy_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno
_blob->locked_operation_in_progress = true;
_blob->u.shallow_copy.copied_clusters_number = 0;
_blob->u.shallow_copy.num_clusters_to_copy = 0;
_blob->u.shallow_copy.bserrno = 0;
for (i = 0; i < _blob->active.num_clusters; i++) {
if (_blob->active.clusters[i] != 0) {
_blob->u.shallow_copy.num_clusters_to_copy++;

View File

@ -153,6 +153,7 @@ struct spdk_blob {
struct {
uint64_t num_clusters_to_copy;
uint64_t copied_clusters_number;
int bserrno;
} shallow_copy;
} u;
};

View File

@ -1447,7 +1447,8 @@ rpc_bdev_lvol_shallow_copy_status(struct spdk_jsonrpc_request *request,
struct spdk_bdev *src_lvol_bdev;
struct spdk_lvol *src_lvol;
struct spdk_json_write_ctx *w;
uint64_t cluster_index, total_clusters;
uint64_t copied_clusters, total_clusters;
int result;
SPDK_INFOLOG(lvol_rpc, "Shallow copy status\n");
@ -1474,16 +1475,26 @@ rpc_bdev_lvol_shallow_copy_status(struct spdk_jsonrpc_request *request,
goto cleanup;
}
cluster_index = spdk_blob_get_shallow_copy_copied_clusters(src_lvol->blob);
copied_clusters = spdk_blob_get_shallow_copy_copied_clusters(src_lvol->blob);
total_clusters = spdk_blob_get_shallow_copy_total_clusters(src_lvol->blob);
result = spdk_blob_get_shallow_copy_result(src_lvol->blob);
w = spdk_jsonrpc_begin_result(request);
spdk_json_write_object_begin(w);
spdk_json_write_named_bool(w, "in_progress", total_clusters > 0);
if (total_clusters > 0) {
spdk_json_write_named_string_fmt(w, "status", "%lu/%lu", cluster_index, total_clusters);
spdk_json_write_named_string_fmt(w, "progress", "%lu/%lu", copied_clusters, total_clusters);
if (result > 0) {
spdk_json_write_named_string(w, "state", "none");
} else if (copied_clusters < total_clusters && result == 0) {
spdk_json_write_named_string(w, "state", "in progress");
} else if (copied_clusters == total_clusters && result == 0) {
spdk_json_write_named_string(w, "state", "complete");
} else {
spdk_json_write_named_string(w, "state", "error");
spdk_json_write_named_string(w, "error", spdk_strerror(-result));
}
spdk_json_write_object_end(w);
spdk_jsonrpc_end_result(request, w);