blob: add pages_per_cluster_shift
Operation of locating right lba from cluster map is done on I/O path. Instead of division and multiplication, perform bit shift operation. Bit shift is only used when pages per cluster is power of 2. Signed-off-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com> Change-Id: Ic3ed7ec0a82867a8a4bc6391785b9d40c800aacb Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1724 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Community-CI: Mellanox Build Bot Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
parent
b31b4229a7
commit
b3348624e7
@ -3111,6 +3111,9 @@ _spdk_bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts, struct spdk_b
|
||||
bs->cluster_sz = opts->cluster_sz;
|
||||
bs->total_clusters = dev->blockcnt / (bs->cluster_sz / dev->blocklen);
|
||||
bs->pages_per_cluster = bs->cluster_sz / SPDK_BS_PAGE_SIZE;
|
||||
if (spdk_u32_is_pow2(bs->pages_per_cluster)) {
|
||||
bs->pages_per_cluster_shift = spdk_u32log2(bs->pages_per_cluster);
|
||||
}
|
||||
bs->num_free_clusters = bs->total_clusters;
|
||||
bs->used_clusters = spdk_bit_array_create(bs->total_clusters);
|
||||
bs->io_unit_size = dev->blocklen;
|
||||
@ -4136,6 +4139,9 @@ _spdk_bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
|
||||
ctx->bs->cluster_sz = ctx->super->cluster_size;
|
||||
ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size;
|
||||
ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE;
|
||||
if (spdk_u32_is_pow2(ctx->bs->pages_per_cluster)) {
|
||||
ctx->bs->pages_per_cluster_shift = spdk_u32log2(ctx->bs->pages_per_cluster);
|
||||
}
|
||||
ctx->bs->io_unit_size = ctx->super->io_unit_size;
|
||||
rc = spdk_bit_array_resize(&ctx->bs->used_clusters, ctx->bs->total_clusters);
|
||||
if (rc < 0) {
|
||||
|
@ -196,6 +196,7 @@ struct spdk_blob_store {
|
||||
uint64_t total_data_clusters;
|
||||
uint64_t num_free_clusters;
|
||||
uint64_t pages_per_cluster;
|
||||
uint8_t pages_per_cluster_shift;
|
||||
uint32_t io_unit_size;
|
||||
|
||||
spdk_blob_id super_blob;
|
||||
@ -584,6 +585,7 @@ _spdk_bs_blob_io_unit_to_lba(struct spdk_blob *blob, uint64_t io_unit)
|
||||
{
|
||||
uint64_t lba;
|
||||
uint64_t pages_per_cluster;
|
||||
uint8_t shift;
|
||||
uint64_t io_units_per_cluster;
|
||||
uint64_t io_units_per_page;
|
||||
uint64_t page;
|
||||
@ -591,12 +593,18 @@ _spdk_bs_blob_io_unit_to_lba(struct spdk_blob *blob, uint64_t io_unit)
|
||||
page = _spdk_bs_io_unit_to_page(blob->bs, io_unit);
|
||||
|
||||
pages_per_cluster = blob->bs->pages_per_cluster;
|
||||
shift = blob->bs->pages_per_cluster_shift;
|
||||
io_units_per_page = _spdk_bs_io_unit_per_page(blob->bs);
|
||||
io_units_per_cluster = io_units_per_page * pages_per_cluster;
|
||||
|
||||
assert(page < blob->active.num_clusters * pages_per_cluster);
|
||||
|
||||
if (shift != 0) {
|
||||
io_units_per_cluster = io_units_per_page << shift;
|
||||
lba = blob->active.clusters[page >> shift];
|
||||
} else {
|
||||
io_units_per_cluster = io_units_per_page * pages_per_cluster;
|
||||
lba = blob->active.clusters[page / pages_per_cluster];
|
||||
}
|
||||
lba += io_unit % io_units_per_cluster;
|
||||
return lba;
|
||||
}
|
||||
@ -608,8 +616,13 @@ static inline uint32_t
|
||||
_spdk_bs_num_io_units_to_cluster_boundary(struct spdk_blob *blob, uint64_t io_unit)
|
||||
{
|
||||
uint64_t io_units_per_cluster;
|
||||
uint8_t shift = blob->bs->pages_per_cluster_shift;
|
||||
|
||||
if (shift != 0) {
|
||||
io_units_per_cluster = _spdk_bs_io_unit_per_page(blob->bs) << shift;
|
||||
} else {
|
||||
io_units_per_cluster = _spdk_bs_io_unit_per_page(blob->bs) * blob->bs->pages_per_cluster;
|
||||
}
|
||||
|
||||
return io_units_per_cluster - (io_unit % io_units_per_cluster);
|
||||
}
|
||||
@ -644,7 +657,16 @@ _spdk_bs_io_unit_to_cluster_start(struct spdk_blob *blob, uint64_t io_unit)
|
||||
static inline uint32_t
|
||||
_spdk_bs_io_unit_to_cluster_number(struct spdk_blob *blob, uint64_t io_unit)
|
||||
{
|
||||
return (io_unit / _spdk_bs_io_unit_per_page(blob->bs)) / blob->bs->pages_per_cluster;
|
||||
uint64_t pages_per_cluster = blob->bs->pages_per_cluster;
|
||||
uint8_t shift = blob->bs->pages_per_cluster_shift;
|
||||
uint32_t page_offset;
|
||||
|
||||
page_offset = io_unit / _spdk_bs_io_unit_per_page(blob->bs);
|
||||
if (shift != 0) {
|
||||
return page_offset >> shift;
|
||||
} else {
|
||||
return page_offset / pages_per_cluster;
|
||||
}
|
||||
}
|
||||
|
||||
/* Given an io unit offset into a blob, look up if it is from allocated cluster. */
|
||||
@ -654,13 +676,19 @@ _spdk_bs_io_unit_is_allocated(struct spdk_blob *blob, uint64_t io_unit)
|
||||
uint64_t lba;
|
||||
uint64_t page;
|
||||
uint64_t pages_per_cluster;
|
||||
uint8_t shift;
|
||||
|
||||
shift = blob->bs->pages_per_cluster_shift;
|
||||
pages_per_cluster = blob->bs->pages_per_cluster;
|
||||
page = _spdk_bs_io_unit_to_page(blob->bs, io_unit);
|
||||
|
||||
assert(page < blob->active.num_clusters * pages_per_cluster);
|
||||
|
||||
if (shift != 0) {
|
||||
lba = blob->active.clusters[page >> shift];
|
||||
} else {
|
||||
lba = blob->active.clusters[page / pages_per_cluster];
|
||||
}
|
||||
|
||||
if (lba == 0) {
|
||||
assert(spdk_blob_is_thin_provisioned(blob));
|
||||
|
Loading…
Reference in New Issue
Block a user