blob: cluster allocation/deallocation for thin provisioned blob
Signed-off-by: Maciej Szwed <maciej.szwed@intel.com> Change-Id: Ib3470fbac49e92308ed14e20ccde6655354f2580 Reviewed-on: https://review.gerrithub.io/389577 Tested-by: SPDK Automated Test System <sys_sgsw@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
parent
85a61ea6f0
commit
65fe29f8dd
@ -271,8 +271,14 @@ _spdk_blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob_dat
|
||||
|
||||
for (i = 0; i < desc_extent->length / sizeof(desc_extent->extents[0]); i++) {
|
||||
for (j = 0; j < desc_extent->extents[i].length; j++) {
|
||||
blob->active.clusters[blob->active.num_clusters++] = _spdk_bs_cluster_to_lba(blob->bs,
|
||||
desc_extent->extents[i].cluster_idx + j);
|
||||
if (desc_extent->extents[i].cluster_idx != 0) {
|
||||
blob->active.clusters[blob->active.num_clusters++] = _spdk_bs_cluster_to_lba(blob->bs,
|
||||
desc_extent->extents[i].cluster_idx + j);
|
||||
} else if (spdk_blob_is_thin_provisioned(blob)) {
|
||||
blob->active.clusters[blob->active.num_clusters++] = 0;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -795,7 +801,10 @@ _spdk_blob_persist_unmap_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int
|
||||
for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) {
|
||||
uint32_t cluster_num = _spdk_bs_lba_to_cluster(bs, blob->active.clusters[i]);
|
||||
|
||||
_spdk_bs_release_cluster(bs, cluster_num);
|
||||
/* Nothing to release if it was not allocated */
|
||||
if (blob->active.clusters[i] != 0) {
|
||||
_spdk_bs_release_cluster(bs, cluster_num);
|
||||
}
|
||||
}
|
||||
|
||||
if (blob->active.num_clusters == 0) {
|
||||
@ -836,7 +845,7 @@ _spdk_blob_persist_unmap_clusters(spdk_bs_sequence_t *seq, void *cb_arg, int bse
|
||||
uint64_t next_lba = blob->active.clusters[i];
|
||||
uint32_t next_lba_count = _spdk_bs_cluster_to_lba(bs, 1);
|
||||
|
||||
if ((lba + lba_count) == next_lba) {
|
||||
if (next_lba > 0 && (lba + lba_count) == next_lba) {
|
||||
/* This cluster is contiguous with the previous one. */
|
||||
lba_count += next_lba_count;
|
||||
continue;
|
||||
@ -853,7 +862,11 @@ _spdk_blob_persist_unmap_clusters(spdk_bs_sequence_t *seq, void *cb_arg, int bse
|
||||
|
||||
/* Start building the next batch */
|
||||
lba = next_lba;
|
||||
lba_count = next_lba_count;
|
||||
if (next_lba > 0) {
|
||||
lba_count = next_lba_count;
|
||||
} else {
|
||||
lba_count = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* If we ended with a contiguous set of LBAs, send the unmap now */
|
||||
@ -1024,14 +1037,16 @@ _spdk_resize_blob(struct spdk_blob_data *blob, uint64_t sz)
|
||||
* and another to actually claim them.
|
||||
*/
|
||||
|
||||
lfc = 0;
|
||||
for (i = num_clusters; i < sz; i++) {
|
||||
lfc = spdk_bit_array_find_first_clear(bs->used_clusters, lfc);
|
||||
if (lfc >= bs->total_clusters) {
|
||||
/* No more free clusters. Cannot satisfy the request */
|
||||
return -ENOSPC;
|
||||
if (spdk_blob_is_thin_provisioned(blob) == false) {
|
||||
lfc = 0;
|
||||
for (i = num_clusters; i < sz; i++) {
|
||||
lfc = spdk_bit_array_find_first_clear(bs->used_clusters, lfc);
|
||||
if (lfc >= bs->total_clusters) {
|
||||
/* No more free clusters. Cannot satisfy the request */
|
||||
return -ENOSPC;
|
||||
}
|
||||
lfc++;
|
||||
}
|
||||
lfc++;
|
||||
}
|
||||
|
||||
if (sz > num_clusters) {
|
||||
@ -1042,19 +1057,23 @@ _spdk_resize_blob(struct spdk_blob_data *blob, uint64_t sz)
|
||||
if (sz > 0 && tmp == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(tmp + blob->active.cluster_array_size, 0,
|
||||
sizeof(uint64_t) * (sz - blob->active.cluster_array_size));
|
||||
blob->active.clusters = tmp;
|
||||
blob->active.cluster_array_size = sz;
|
||||
}
|
||||
|
||||
blob->state = SPDK_BLOB_STATE_DIRTY;
|
||||
|
||||
lfc = 0;
|
||||
for (i = num_clusters; i < sz; i++) {
|
||||
lfc = spdk_bit_array_find_first_clear(bs->used_clusters, lfc);
|
||||
SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming cluster %lu for blob %lu\n", lfc, blob->id);
|
||||
_spdk_bs_claim_cluster(bs, lfc);
|
||||
blob->active.clusters[i] = _spdk_bs_cluster_to_lba(bs, lfc);
|
||||
lfc++;
|
||||
if (spdk_blob_is_thin_provisioned(blob) == false) {
|
||||
lfc = 0;
|
||||
for (i = num_clusters; i < sz; i++) {
|
||||
lfc = spdk_bit_array_find_first_clear(bs->used_clusters, lfc);
|
||||
SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming cluster %lu for blob %lu\n", lfc, blob->id);
|
||||
_spdk_bs_claim_cluster(bs, lfc);
|
||||
blob->active.clusters[i] = _spdk_bs_cluster_to_lba(bs, lfc);
|
||||
lfc++;
|
||||
}
|
||||
}
|
||||
|
||||
blob->active.num_clusters = sz;
|
||||
|
@ -242,6 +242,8 @@ struct spdk_blob_md_descriptor_extent {
|
||||
#define SPDK_BLOB_DATA_RO_FLAGS_MASK SPDK_BLOB_READ_ONLY
|
||||
#define SPDK_BLOB_MD_RO_FLAGS_MASK 0
|
||||
|
||||
#define spdk_blob_is_thin_provisioned(blob) (blob->invalid_flags & SPDK_BLOB_THIN_PROV)
|
||||
|
||||
struct spdk_blob_md_descriptor_flags {
|
||||
uint8_t type;
|
||||
uint32_t length;
|
||||
|
@ -2563,6 +2563,105 @@ blob_set_xattrs(void)
|
||||
|
||||
}
|
||||
|
||||
static void
|
||||
blob_thin_prov_alloc(void)
|
||||
{
|
||||
struct spdk_blob_store *bs;
|
||||
struct spdk_bs_dev *dev;
|
||||
struct spdk_blob *blob;
|
||||
struct spdk_blob_data *blob_data;
|
||||
struct spdk_blob_opts opts;
|
||||
spdk_blob_id blobid;
|
||||
uint64_t free_clusters;
|
||||
int rc;
|
||||
|
||||
dev = init_dev();
|
||||
|
||||
spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
SPDK_CU_ASSERT_FATAL(g_bs != NULL);
|
||||
bs = g_bs;
|
||||
free_clusters = spdk_bs_free_cluster_count(bs);
|
||||
|
||||
/* Set blob as thin provisioned */
|
||||
spdk_blob_opts_init(&opts);
|
||||
opts.thin_provision = true;
|
||||
|
||||
spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
|
||||
CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
|
||||
blobid = g_blobid;
|
||||
|
||||
spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
SPDK_CU_ASSERT_FATAL(g_blob != NULL);
|
||||
blob = g_blob;
|
||||
blob_data = __blob_to_data(blob);
|
||||
|
||||
CU_ASSERT(blob_data->active.num_clusters == 0);
|
||||
CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
|
||||
|
||||
/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
|
||||
rc = spdk_blob_resize(blob, 5);
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
|
||||
CU_ASSERT(blob_data->active.num_clusters == 5);
|
||||
CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
|
||||
|
||||
/* Shrink the blob to 3 clusters - still unallocated */
|
||||
rc = spdk_blob_resize(blob, 3);
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
|
||||
CU_ASSERT(blob_data->active.num_clusters == 3);
|
||||
CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
|
||||
|
||||
spdk_blob_sync_md(blob, blob_op_complete, NULL);
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
/* Sync must not change anything */
|
||||
CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
|
||||
CU_ASSERT(blob_data->active.num_clusters == 3);
|
||||
CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
|
||||
|
||||
spdk_blob_close(blob, blob_op_complete, NULL);
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
|
||||
/* Unload the blob store */
|
||||
spdk_bs_unload(g_bs, bs_op_complete, NULL);
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
g_bs = NULL;
|
||||
g_blob = NULL;
|
||||
g_blobid = 0;
|
||||
|
||||
/* Load an existing blob store */
|
||||
dev = init_dev();
|
||||
spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
SPDK_CU_ASSERT_FATAL(g_bs != NULL);
|
||||
|
||||
bs = g_bs;
|
||||
|
||||
spdk_bs_open_blob(g_bs, blobid, blob_op_with_handle_complete, NULL);
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
CU_ASSERT(g_blob != NULL);
|
||||
blob = g_blob;
|
||||
blob_data = __blob_to_data(blob);
|
||||
|
||||
/* Check that clusters allocation and size is still the same */
|
||||
CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
|
||||
CU_ASSERT(blob_data->active.num_clusters == 3);
|
||||
|
||||
spdk_blob_close(blob, blob_op_complete, NULL);
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
|
||||
spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
|
||||
spdk_bs_unload(g_bs, bs_op_complete, NULL);
|
||||
CU_ASSERT(g_bserrno == 0);
|
||||
g_bs = NULL;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
CU_pSuite suite = NULL;
|
||||
@ -2610,7 +2709,8 @@ int main(int argc, char **argv)
|
||||
CU_add_test(suite, "blob_dirty_shutdown", blob_dirty_shutdown) == NULL ||
|
||||
CU_add_test(suite, "blob_flags", blob_flags) == NULL ||
|
||||
CU_add_test(suite, "bs_version", bs_version) == NULL ||
|
||||
CU_add_test(suite, "blob_set_xattrs", blob_set_xattrs) == NULL
|
||||
CU_add_test(suite, "blob_set_xattrs", blob_set_xattrs) == NULL ||
|
||||
CU_add_test(suite, "blob_thin_prov_alloc", blob_thin_prov_alloc) == NULL
|
||||
) {
|
||||
CU_cleanup_registry();
|
||||
return CU_get_error();
|
||||
|
Loading…
Reference in New Issue
Block a user