blobstore: rename used_clusters_mutex to used_lock
The bs->used_clusters_mutex protects used_md_pages, used_clusters, and num_free_clusters. A more generic name is appropraite. The next patch in this series will convert it from a mutex to a spinlock and having "mutex" or "spin" in the name is of little help to maintainers, so a more generic name is used. Signed-off-by: Mike Gerdts <mgerdts@nvidia.com> Change-Id: I5ce7b85b84fdec2a0c5d2ac959e0109e1d80c7f5 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/15981 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Shuhei Matsumoto <smatsumoto@nvidia.com> Community-CI: Mellanox Build Bot
This commit is contained in:
parent
58549382d0
commit
2a608d0241
@ -1731,7 +1731,7 @@ blob_persist_clear_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserr
|
||||
return;
|
||||
}
|
||||
|
||||
pthread_mutex_lock(&bs->used_clusters_mutex);
|
||||
pthread_mutex_lock(&bs->used_lock);
|
||||
/* Release all clusters that were truncated */
|
||||
for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) {
|
||||
uint32_t cluster_num = bs_lba_to_cluster(bs, blob->active.clusters[i]);
|
||||
@ -1741,7 +1741,7 @@ blob_persist_clear_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserr
|
||||
bs_release_cluster(bs, cluster_num);
|
||||
}
|
||||
}
|
||||
pthread_mutex_unlock(&bs->used_clusters_mutex);
|
||||
pthread_mutex_unlock(&bs->used_lock);
|
||||
|
||||
if (blob->active.num_clusters == 0) {
|
||||
free(blob->active.clusters);
|
||||
@ -2044,12 +2044,12 @@ blob_resize(struct spdk_blob *blob, uint64_t sz)
|
||||
if (spdk_blob_is_thin_provisioned(blob) == false) {
|
||||
cluster = 0;
|
||||
lfmd = 0;
|
||||
pthread_mutex_lock(&blob->bs->used_clusters_mutex);
|
||||
pthread_mutex_lock(&blob->bs->used_lock);
|
||||
for (i = num_clusters; i < sz; i++) {
|
||||
bs_allocate_cluster(blob, i, &cluster, &lfmd, true);
|
||||
lfmd++;
|
||||
}
|
||||
pthread_mutex_unlock(&blob->bs->used_clusters_mutex);
|
||||
pthread_mutex_unlock(&blob->bs->used_lock);
|
||||
}
|
||||
|
||||
blob->active.num_clusters = sz;
|
||||
@ -2342,9 +2342,9 @@ blob_insert_cluster_cpl(void *cb_arg, int bserrno)
|
||||
* but continue without error. */
|
||||
bserrno = 0;
|
||||
}
|
||||
pthread_mutex_lock(&ctx->blob->bs->used_clusters_mutex);
|
||||
pthread_mutex_lock(&ctx->blob->bs->used_lock);
|
||||
bs_release_cluster(ctx->blob->bs, ctx->new_cluster);
|
||||
pthread_mutex_unlock(&ctx->blob->bs->used_clusters_mutex);
|
||||
pthread_mutex_unlock(&ctx->blob->bs->used_lock);
|
||||
if (ctx->new_extent_page != 0) {
|
||||
bs_release_md_page(ctx->blob->bs, ctx->new_extent_page);
|
||||
}
|
||||
@ -2472,10 +2472,10 @@ bs_allocate_and_copy_cluster(struct spdk_blob *blob,
|
||||
}
|
||||
}
|
||||
|
||||
pthread_mutex_lock(&blob->bs->used_clusters_mutex);
|
||||
pthread_mutex_lock(&blob->bs->used_lock);
|
||||
rc = bs_allocate_cluster(blob, cluster_number, &ctx->new_cluster, &ctx->new_extent_page,
|
||||
false);
|
||||
pthread_mutex_unlock(&blob->bs->used_clusters_mutex);
|
||||
pthread_mutex_unlock(&blob->bs->used_lock);
|
||||
if (rc != 0) {
|
||||
spdk_free(ctx->buf);
|
||||
free(ctx);
|
||||
@ -2489,9 +2489,9 @@ bs_allocate_and_copy_cluster(struct spdk_blob *blob,
|
||||
|
||||
ctx->seq = bs_sequence_start(_ch, &cpl);
|
||||
if (!ctx->seq) {
|
||||
pthread_mutex_lock(&blob->bs->used_clusters_mutex);
|
||||
pthread_mutex_lock(&blob->bs->used_lock);
|
||||
bs_release_cluster(blob->bs, ctx->new_cluster);
|
||||
pthread_mutex_unlock(&blob->bs->used_clusters_mutex);
|
||||
pthread_mutex_unlock(&blob->bs->used_lock);
|
||||
spdk_free(ctx->buf);
|
||||
free(ctx);
|
||||
bs_user_op_abort(op, -ENOMEM);
|
||||
@ -3196,7 +3196,7 @@ bs_dev_destroy(void *io_device)
|
||||
blob_free(blob);
|
||||
}
|
||||
|
||||
pthread_mutex_destroy(&bs->used_clusters_mutex);
|
||||
pthread_mutex_destroy(&bs->used_lock);
|
||||
|
||||
spdk_bit_array_free(&bs->open_blobids);
|
||||
spdk_bit_array_free(&bs->used_blobids);
|
||||
@ -3477,14 +3477,14 @@ bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts, struct spdk_blob_st
|
||||
bs->used_blobids = spdk_bit_array_create(0);
|
||||
bs->open_blobids = spdk_bit_array_create(0);
|
||||
|
||||
pthread_mutex_init(&bs->used_clusters_mutex, NULL);
|
||||
pthread_mutex_init(&bs->used_lock, NULL);
|
||||
|
||||
spdk_io_device_register(bs, bs_channel_create, bs_channel_destroy,
|
||||
sizeof(struct spdk_bs_channel), "blobstore");
|
||||
rc = bs_register_md_thread(bs);
|
||||
if (rc == -1) {
|
||||
spdk_io_device_unregister(bs, NULL);
|
||||
pthread_mutex_destroy(&bs->used_clusters_mutex);
|
||||
pthread_mutex_destroy(&bs->used_lock);
|
||||
spdk_bit_array_free(&bs->open_blobids);
|
||||
spdk_bit_array_free(&bs->used_blobids);
|
||||
spdk_bit_array_free(&bs->used_md_pages);
|
||||
|
@ -160,17 +160,17 @@ struct spdk_blob_store {
|
||||
|
||||
struct spdk_bs_dev *dev;
|
||||
|
||||
struct spdk_bit_array *used_md_pages;
|
||||
struct spdk_bit_pool *used_clusters;
|
||||
struct spdk_bit_array *used_md_pages; /* Protected by used_lock */
|
||||
struct spdk_bit_pool *used_clusters; /* Protected by used_lock */
|
||||
struct spdk_bit_array *used_blobids;
|
||||
struct spdk_bit_array *open_blobids;
|
||||
|
||||
pthread_mutex_t used_clusters_mutex;
|
||||
pthread_mutex_t used_lock;
|
||||
|
||||
uint32_t cluster_sz;
|
||||
uint64_t total_clusters;
|
||||
uint64_t total_data_clusters;
|
||||
uint64_t num_free_clusters;
|
||||
uint64_t num_free_clusters; /* Protected by used_lock */
|
||||
uint64_t pages_per_cluster;
|
||||
uint8_t pages_per_cluster_shift;
|
||||
uint32_t io_unit_size;
|
||||
|
Loading…
Reference in New Issue
Block a user