lib/blob: use use_extent_table instead of NULL from extent_page

Right now output from _spdk_bs_cluster_to_extent_page()
is used to determine whether the exten_table is used at all.
If NULL pointer was returned this meant that extent table
was not allocated, even if the code might suggest just
checking if we overran the array.

To make it more obvious, the _spdk_bs_cluster_to_extent_page()
now only asserts the extent_table_id.

blob->use_extent_table is now always used to determine the
serialization path.

Signed-off-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Change-Id: I9d2630645213539bae5cd1d72e5f9b878f53c2bc
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/482599
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Paul Luse <paul.e.luse@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Tomasz Zawadzki 2020-01-23 04:08:03 -05:00
parent 95b478cc70
commit 2bccb7c9b4
2 changed files with 24 additions and 18 deletions

View File

@ -137,7 +137,7 @@ static int
_spdk_bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num,
uint64_t *lowest_free_cluster, uint32_t *lowest_free_md_page, bool update_map)
{
uint32_t *extent_page = _spdk_bs_cluster_to_extent_page(blob, cluster_num);
uint32_t *extent_page;
pthread_mutex_lock(&blob->bs->used_clusters_mutex);
*lowest_free_cluster = spdk_bit_array_find_first_clear(blob->bs->used_clusters,
@ -148,7 +148,9 @@ _spdk_bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num,
return -ENOSPC;
}
if (extent_page != NULL && *extent_page == 0) {
if (blob->use_extent_table) {
extent_page = _spdk_bs_cluster_to_extent_page(blob, cluster_num);
if (*extent_page == 0) {
/* No extent_page is allocated for the cluster */
*lowest_free_md_page = spdk_bit_array_find_first_clear(blob->bs->used_md_pages,
*lowest_free_md_page);
@ -159,6 +161,7 @@ _spdk_bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num,
}
_spdk_bs_claim_md_page(blob->bs, *lowest_free_md_page);
}
}
SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming cluster %lu for blob %lu\n", *lowest_free_cluster, blob->id);
_spdk_bs_claim_cluster(blob->bs, *lowest_free_cluster);
@ -167,7 +170,7 @@ _spdk_bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num,
if (update_map) {
_spdk_blob_insert_cluster(blob, cluster_num, *lowest_free_cluster);
if (extent_page != NULL && *extent_page == 0) {
if (blob->use_extent_table && *extent_page == 0) {
*extent_page = *lowest_free_md_page;
}
}
@ -6345,7 +6348,7 @@ static void
_spdk_blob_insert_cluster_msg(void *arg)
{
struct spdk_blob_insert_cluster_ctx *ctx = arg;
uint32_t *extent_page = _spdk_bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num);
uint32_t *extent_page;
ctx->rc = _spdk_blob_insert_cluster(ctx->blob, ctx->cluster_num, ctx->cluster);
if (ctx->rc != 0) {
@ -6353,11 +6356,15 @@ _spdk_blob_insert_cluster_msg(void *arg)
return;
}
if (extent_page == NULL) {
/* Extent page are not used, proceed with sync of md that will contain Extents RLE */
if (ctx->blob->use_extent_table == false) {
/* Extent table is not used, proceed with sync of md that will only use extents_rle. */
ctx->blob->state = SPDK_BLOB_STATE_DIRTY;
_spdk_blob_sync_md(ctx->blob, _spdk_blob_insert_cluster_msg_cb, ctx);
} else if (*extent_page == 0) {
return;
}
extent_page = _spdk_bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num);
if (*extent_page == 0) {
/* Extent page requires allocation.
* It was already claimed in the used_md_pages map and placed in ctx.
* Blob persist will take care of writing out new extent page on disk. */

View File

@ -543,9 +543,8 @@ _spdk_bs_cluster_to_extent_page(struct spdk_blob *blob, uint64_t cluster_num)
{
uint64_t extent_table_id = _spdk_bs_cluster_to_extent_table_id(cluster_num);
if (extent_table_id >= blob->active.extent_pages_array_size) {
return NULL;
}
assert(blob->use_extent_table);
assert(extent_table_id < blob->active.extent_pages_array_size);
return &blob->active.extent_pages[extent_table_id];
}