blob: Add functions to find [un]allocated io_unit

These functions start from a given offset and seek for first
io_unit belonging to an allocated cluster or first io_unit
belonging to an unallocated cluster

Signed-off-by: Damiano Cipriani <damiano.cipriani@suse.com>
Change-Id: I0c632e2b3dfd2e96aa22e21796e25a36f2f55f9f
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/14360
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Community-CI: Mellanox Build Bot
This commit is contained in:
Damiano 2022-09-05 14:51:01 +02:00 committed by Tomasz Zawadzki
parent ddf5a8da90
commit d8a3dee1c1
4 changed files with 135 additions and 0 deletions

View File

@ -428,6 +428,34 @@ uint64_t spdk_blob_get_num_io_units(struct spdk_blob *blob);
*/
uint64_t spdk_blob_get_num_clusters(struct spdk_blob *blob);
/**
* Get next allocated io_unit
*
* Starting at 'offset' io_units into the blob, returns the offset of
* the first allocated io unit found.
* If 'offset' points to an allocated io_unit, same offset is returned.
*
* \param blob Blob struct to query.
* \param offset Offset is in io units from the beginning of the blob.
*
* \return offset in io_units or UINT64_MAX if no allocated io_unit found
*/
uint64_t spdk_blob_get_next_allocated_io_unit(struct spdk_blob *blob, uint64_t offset);
/**
* Get next unallocated io_unit
*
* Starting at 'offset' io_units into the blob, returns the offset of
* the first unallocated io unit found.
* If 'offset' points to an unallocated io_unit, same offset is returned.
*
* \param blob Blob struct to query.
* \param offset Offset is in io units from the beginning of the blob.
*
* \return offset in io_units or UINT64_MAX if only allocated io_unit found
*/
uint64_t spdk_blob_get_next_unallocated_io_unit(struct spdk_blob *blob, uint64_t offset);
struct spdk_blob_xattr_opts {
/* Number of attributes */
size_t count;

View File

@ -5593,6 +5593,34 @@ spdk_blob_get_num_clusters(struct spdk_blob *blob)
return blob->active.num_clusters;
}
static uint64_t
blob_find_io_unit(struct spdk_blob *blob, uint64_t offset, bool is_allocated)
{
uint64_t blob_io_unit_num = spdk_blob_get_num_io_units(blob);
while (offset < blob_io_unit_num) {
if (bs_io_unit_is_allocated(blob, offset) == is_allocated) {
return offset;
}
offset += bs_num_io_units_to_cluster_boundary(blob, offset);
}
return UINT64_MAX;
}
uint64_t
spdk_blob_get_next_allocated_io_unit(struct spdk_blob *blob, uint64_t offset)
{
return blob_find_io_unit(blob, offset, true);
}
uint64_t
spdk_blob_get_next_unallocated_io_unit(struct spdk_blob *blob, uint64_t offset)
{
return blob_find_io_unit(blob, offset, false);
}
/* START spdk_bs_create_blob */
static void

View File

@ -20,6 +20,8 @@
spdk_blob_get_num_pages;
spdk_blob_get_num_io_units;
spdk_blob_get_num_clusters;
spdk_blob_get_next_allocated_io_unit;
spdk_blob_get_next_unallocated_io_unit;
spdk_blob_opts_init;
spdk_bs_create_blob_ext;
spdk_bs_create_blob;

View File

@ -7316,6 +7316,82 @@ blob_decouple_snapshot(void)
}
}
static void
blob_seek_io_unit(void)
{
struct spdk_blob_store *bs = g_bs;
struct spdk_blob *blob;
struct spdk_io_channel *channel;
struct spdk_blob_opts opts;
uint64_t free_clusters;
uint8_t payload[10 * 4096];
uint64_t offset;
uint64_t io_unit, io_units_per_cluster;
free_clusters = spdk_bs_free_cluster_count(bs);
channel = spdk_bs_alloc_io_channel(bs);
CU_ASSERT(channel != NULL);
/* Set blob as thin provisioned */
ut_spdk_blob_opts_init(&opts);
opts.thin_provision = true;
/* Create a blob */
blob = ut_blob_create_and_open(bs, &opts);
CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
io_units_per_cluster = bs_io_units_per_cluster(blob);
/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
spdk_blob_resize(blob, 5, blob_op_complete, NULL);
poll_threads();
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
CU_ASSERT(blob->active.num_clusters == 5);
/* Write at the beginning of first cluster */
offset = 0;
spdk_blob_io_write(blob, channel, payload, offset, 1, blob_op_complete, NULL);
poll_threads();
CU_ASSERT(g_bserrno == 0);
io_unit = spdk_blob_get_next_allocated_io_unit(blob, 0);
CU_ASSERT(io_unit == offset);
io_unit = spdk_blob_get_next_unallocated_io_unit(blob, 0);
CU_ASSERT(io_unit == io_units_per_cluster);
/* Write in the middle of third cluster */
offset = 2 * io_units_per_cluster + io_units_per_cluster / 2;
spdk_blob_io_write(blob, channel, payload, offset, 1, blob_op_complete, NULL);
poll_threads();
CU_ASSERT(g_bserrno == 0);
io_unit = spdk_blob_get_next_allocated_io_unit(blob, io_units_per_cluster);
CU_ASSERT(io_unit == 2 * io_units_per_cluster);
io_unit = spdk_blob_get_next_unallocated_io_unit(blob, 2 * io_units_per_cluster);
CU_ASSERT(io_unit == 3 * io_units_per_cluster);
/* Write at the end of last cluster */
offset = 5 * io_units_per_cluster - 1;
spdk_blob_io_write(blob, channel, payload, offset, 1, blob_op_complete, NULL);
poll_threads();
CU_ASSERT(g_bserrno == 0);
io_unit = spdk_blob_get_next_allocated_io_unit(blob, 3 * io_units_per_cluster);
CU_ASSERT(io_unit == 4 * io_units_per_cluster);
io_unit = spdk_blob_get_next_unallocated_io_unit(blob, 4 * io_units_per_cluster);
CU_ASSERT(io_unit == UINT64_MAX);
spdk_bs_free_io_channel(channel);
poll_threads();
ut_blob_close_and_delete(bs, blob);
}
static void
suite_bs_setup(void)
{
@ -7491,6 +7567,7 @@ main(int argc, char **argv)
CU_ADD_TEST(suite_bs, blob_simultaneous_operations);
CU_ADD_TEST(suite_bs, blob_persist_test);
CU_ADD_TEST(suite_bs, blob_decouple_snapshot);
CU_ADD_TEST(suite_bs, blob_seek_io_unit);
allocate_threads(2);
set_thread(0);