lib/ftl: Change "lbk" name to "block"

"lbk" name is more OCSSD specific so in
generic FTL "block" name is more suitable.

Change-Id: I792780297b792bf5e02f13cc20346da56b032918
Signed-off-by: Wojciech Malikowski <wojciech.malikowski@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/472284
Community-CI: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Konrad Sztyber <konrad.sztyber@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Wojciech Malikowski 2019-10-24 10:36:42 -04:00 committed by Tomasz Zawadzki
parent 777ce6d83c
commit 39965ab048
18 changed files with 352 additions and 352 deletions

View File

@ -81,7 +81,7 @@ struct spdk_ftl_conf {
/* IO pool size per user thread */
size_t user_io_pool_size;
/* Lowest percentage of invalid lbks for a band to be defragged */
/* Lowest percentage of invalid blocks for a band to be defragged */
size_t invalid_thld;
/* User writes limits */
@ -134,9 +134,9 @@ struct spdk_ftl_attrs {
/* Device's UUID */
struct spdk_uuid uuid;
/* Number of logical blocks */
uint64_t lbk_cnt;
uint64_t num_blocks;
/* Logical block size */
size_t lbk_size;
size_t block_size;
/* Write buffer cache */
struct spdk_bdev_desc *cache_bdev_desc;
/* Number of zones per parallel unit in the underlying device (including any offline ones) */

View File

@ -66,8 +66,8 @@ struct __attribute__((packed)) ftl_md_hdr {
struct __attribute__((packed)) ftl_tail_md {
struct ftl_md_hdr hdr;
/* Max number of lbks */
uint64_t num_lbks;
/* Max number of blocks */
uint64_t num_blocks;
uint8_t reserved[4059];
};
@ -87,43 +87,43 @@ struct __attribute__((packed)) ftl_head_md {
};
size_t
ftl_tail_md_hdr_num_lbks(void)
ftl_tail_md_hdr_num_blocks(void)
{
return spdk_divide_round_up(sizeof(struct ftl_tail_md), FTL_BLOCK_SIZE);
}
size_t
ftl_vld_map_num_lbks(const struct spdk_ftl_dev *dev)
ftl_vld_map_num_blocks(const struct spdk_ftl_dev *dev)
{
return spdk_divide_round_up(ftl_vld_map_size(dev), FTL_BLOCK_SIZE);
}
size_t
ftl_lba_map_num_lbks(const struct spdk_ftl_dev *dev)
ftl_lba_map_num_blocks(const struct spdk_ftl_dev *dev)
{
return spdk_divide_round_up(ftl_get_num_blocks_in_band(dev) * sizeof(uint64_t), FTL_BLOCK_SIZE);
}
size_t
ftl_head_md_num_lbks(const struct spdk_ftl_dev *dev)
ftl_head_md_num_blocks(const struct spdk_ftl_dev *dev)
{
return dev->xfer_size;
}
size_t
ftl_tail_md_num_lbks(const struct spdk_ftl_dev *dev)
ftl_tail_md_num_blocks(const struct spdk_ftl_dev *dev)
{
return spdk_divide_round_up(ftl_tail_md_hdr_num_lbks() +
ftl_vld_map_num_lbks(dev) +
ftl_lba_map_num_lbks(dev),
return spdk_divide_round_up(ftl_tail_md_hdr_num_blocks() +
ftl_vld_map_num_blocks(dev) +
ftl_lba_map_num_blocks(dev),
dev->xfer_size) * dev->xfer_size;
}
static uint64_t
ftl_band_tail_md_offset(const struct ftl_band *band)
{
return ftl_band_num_usable_lbks(band) -
ftl_tail_md_num_lbks(band->dev);
return ftl_band_num_usable_blocks(band) -
ftl_tail_md_num_blocks(band->dev);
}
int
@ -284,17 +284,17 @@ ftl_pack_tail_md(struct ftl_band *band)
struct ftl_tail_md *tail = lba_map->dma_buf;
void *vld_offset;
vld_offset = (char *)tail + ftl_tail_md_hdr_num_lbks() * FTL_BLOCK_SIZE;
vld_offset = (char *)tail + ftl_tail_md_hdr_num_blocks() * FTL_BLOCK_SIZE;
/* Clear out the buffer */
memset(tail, 0, ftl_tail_md_hdr_num_lbks() * FTL_BLOCK_SIZE);
tail->num_lbks = ftl_get_num_blocks_in_band(dev);
memset(tail, 0, ftl_tail_md_hdr_num_blocks() * FTL_BLOCK_SIZE);
tail->num_blocks = ftl_get_num_blocks_in_band(dev);
pthread_spin_lock(&lba_map->lock);
spdk_bit_array_store_mask(lba_map->vld, vld_offset);
pthread_spin_unlock(&lba_map->lock);
ftl_set_md_hdr(band, &tail->hdr, ftl_tail_md_num_lbks(dev) * FTL_BLOCK_SIZE);
ftl_set_md_hdr(band, &tail->hdr, ftl_tail_md_num_blocks(dev) * FTL_BLOCK_SIZE);
return FTL_MD_SUCCESS;
}
@ -326,9 +326,9 @@ ftl_unpack_tail_md(struct ftl_band *band)
struct ftl_tail_md *tail = lba_map->dma_buf;
int rc;
vld_offset = (char *)tail + ftl_tail_md_hdr_num_lbks() * FTL_BLOCK_SIZE;
vld_offset = (char *)tail + ftl_tail_md_hdr_num_blocks() * FTL_BLOCK_SIZE;
rc = ftl_md_hdr_vld(dev, &tail->hdr, ftl_tail_md_num_lbks(dev) * FTL_BLOCK_SIZE);
rc = ftl_md_hdr_vld(dev, &tail->hdr, ftl_tail_md_num_blocks(dev) * FTL_BLOCK_SIZE);
if (rc) {
return rc;
}
@ -343,7 +343,7 @@ ftl_unpack_tail_md(struct ftl_band *band)
return FTL_MD_NO_MD;
}
if (tail->num_lbks != ftl_get_num_blocks_in_band(dev)) {
if (tail->num_blocks != ftl_get_num_blocks_in_band(dev)) {
return FTL_MD_INVALID_SIZE;
}
@ -456,7 +456,7 @@ ftl_band_set_addr(struct ftl_band *band, uint64_t lba, struct ftl_addr addr)
assert(lba != FTL_LBA_INVALID);
offset = ftl_band_lbkoff_from_addr(band, addr);
offset = ftl_band_block_offset_from_addr(band, addr);
pthread_spin_lock(&lba_map->lock);
lba_map->num_vld++;
@ -473,18 +473,18 @@ ftl_band_age(const struct ftl_band *band)
}
size_t
ftl_band_num_usable_lbks(const struct ftl_band *band)
ftl_band_num_usable_blocks(const struct ftl_band *band)
{
return band->num_zones * ftl_get_num_blocks_in_zone(band->dev);
}
size_t
ftl_band_user_lbks_left(const struct ftl_band *band, size_t offset)
ftl_band_user_blocks_left(const struct ftl_band *band, size_t offset)
{
size_t tail_md_offset = ftl_band_tail_md_offset(band);
if (spdk_unlikely(offset <= ftl_head_md_num_lbks(band->dev))) {
return ftl_band_user_lbks(band);
if (spdk_unlikely(offset <= ftl_head_md_num_blocks(band->dev))) {
return ftl_band_user_blocks(band);
}
if (spdk_unlikely(offset > tail_md_offset)) {
@ -495,11 +495,11 @@ ftl_band_user_lbks_left(const struct ftl_band *band, size_t offset)
}
size_t
ftl_band_user_lbks(const struct ftl_band *band)
ftl_band_user_blocks(const struct ftl_band *band)
{
return ftl_band_num_usable_lbks(band) -
ftl_head_md_num_lbks(band->dev) -
ftl_tail_md_num_lbks(band->dev);
return ftl_band_num_usable_blocks(band) -
ftl_head_md_num_blocks(band->dev) -
ftl_tail_md_num_blocks(band->dev);
}
struct ftl_band *
@ -521,7 +521,7 @@ ftl_band_zone_from_addr(struct ftl_band *band, struct ftl_addr addr)
}
uint64_t
ftl_band_lbkoff_from_addr(struct ftl_band *band, struct ftl_addr addr)
ftl_band_block_offset_from_addr(struct ftl_band *band, struct ftl_addr addr)
{
assert(ftl_addr_get_band(band->dev, addr) == band->id);
assert(ftl_addr_get_punit(band->dev, addr) < ftl_get_num_punits(band->dev));
@ -529,7 +529,7 @@ ftl_band_lbkoff_from_addr(struct ftl_band *band, struct ftl_addr addr)
}
struct ftl_addr
ftl_band_next_xfer_addr(struct ftl_band *band, struct ftl_addr addr, size_t num_lbks)
ftl_band_next_xfer_addr(struct ftl_band *band, struct ftl_addr addr, size_t num_blocks)
{
struct spdk_ftl_dev *dev = band->dev;
struct ftl_zone *zone;
@ -541,8 +541,8 @@ ftl_band_next_xfer_addr(struct ftl_band *band, struct ftl_addr addr, size_t num_
offset = ftl_addr_get_zone_offset(dev, addr);
zone = ftl_band_zone_from_addr(band, addr);
num_lbks += (offset % dev->xfer_size);
offset -= (offset % dev->xfer_size);
num_blocks += (offset % dev->xfer_size);
offset -= (offset % dev->xfer_size);
#if defined(DEBUG)
/* Check that the number of zones has not been changed */
@ -556,17 +556,17 @@ ftl_band_next_xfer_addr(struct ftl_band *band, struct ftl_addr addr, size_t num_
assert(band->num_zones == _num_zones);
#endif
assert(band->num_zones != 0);
num_stripes = (num_lbks / dev->xfer_size) / band->num_zones;
num_stripes = (num_blocks / dev->xfer_size) / band->num_zones;
offset += num_stripes * dev->xfer_size;
num_lbks -= num_stripes * dev->xfer_size * band->num_zones;
num_blocks -= num_stripes * dev->xfer_size * band->num_zones;
if (offset > ftl_get_num_blocks_in_zone(dev)) {
return ftl_to_addr(FTL_ADDR_INVALID);
}
num_xfers = num_lbks / dev->xfer_size;
num_xfers = num_blocks / dev->xfer_size;
for (size_t i = 0; i < num_xfers; ++i) {
/* When the last zone is reached the lbk part of the address */
/* When the last zone is reached the block part of the address */
/* needs to be increased by xfer_size */
if (ftl_band_zone_is_last(band, zone)) {
offset += dev->xfer_size;
@ -578,11 +578,11 @@ ftl_band_next_xfer_addr(struct ftl_band *band, struct ftl_addr addr, size_t num_
zone = ftl_band_next_operational_zone(band, zone);
assert(zone);
num_lbks -= dev->xfer_size;
num_blocks -= dev->xfer_size;
}
if (num_lbks) {
offset += num_lbks;
if (num_blocks) {
offset += num_blocks;
if (offset > ftl_get_num_blocks_in_zone(dev)) {
return ftl_to_addr(FTL_ADDR_INVALID);
}
@ -617,19 +617,19 @@ ftl_xfer_offset_from_addr(struct ftl_band *band, struct ftl_addr addr)
}
struct ftl_addr
ftl_band_addr_from_lbkoff(struct ftl_band *band, uint64_t lbkoff)
ftl_band_addr_from_block_offset(struct ftl_band *band, uint64_t block_off)
{
struct ftl_addr addr = { .offset = 0 };
addr.offset = lbkoff + band->id * ftl_get_num_blocks_in_band(band->dev);
addr.offset = block_off + band->id * ftl_get_num_blocks_in_band(band->dev);
return addr;
}
struct ftl_addr
ftl_band_next_addr(struct ftl_band *band, struct ftl_addr addr, size_t offset)
{
uint64_t lbkoff = ftl_band_lbkoff_from_addr(band, addr);
return ftl_band_addr_from_lbkoff(band, lbkoff + offset);
uint64_t block_off = ftl_band_block_offset_from_addr(band, addr);
return ftl_band_addr_from_block_offset(band, block_off + offset);
}
void
@ -657,9 +657,9 @@ ftl_band_alloc_lba_map(struct ftl_band *band)
memset(lba_map->dma_buf, 0, ftl_lba_map_pool_elem_size(band->dev));
lba_map->map = (uint64_t *)((char *)lba_map->dma_buf + FTL_BLOCK_SIZE *
(ftl_tail_md_hdr_num_lbks() + ftl_vld_map_num_lbks(dev)));
(ftl_tail_md_hdr_num_blocks() + ftl_vld_map_num_blocks(dev)));
lba_map->segments = (char *)lba_map->dma_buf + ftl_tail_md_num_lbks(dev) * FTL_BLOCK_SIZE;
lba_map->segments = (char *)lba_map->dma_buf + ftl_tail_md_num_blocks(dev) * FTL_BLOCK_SIZE;
ftl_band_acquire_lba_map(band);
return 0;
@ -695,7 +695,7 @@ ftl_read_md_cb(struct ftl_io *io, void *arg, int status)
static struct ftl_md_io *
ftl_io_init_md_read(struct spdk_ftl_dev *dev, struct ftl_addr addr,
struct ftl_band *band, size_t lbk_cnt, void *buf,
struct ftl_band *band, size_t num_blocks, void *buf,
ftl_io_fn fn, ftl_md_pack_fn pack_fn, ftl_io_fn cb_fn, void *cb_ctx)
{
struct ftl_md_io *io;
@ -707,7 +707,7 @@ ftl_io_init_md_read(struct spdk_ftl_dev *dev, struct ftl_addr addr,
.size = sizeof(*io),
.flags = FTL_IO_MD | FTL_IO_PHYSICAL_MODE,
.type = FTL_IO_READ,
.lbk_cnt = lbk_cnt,
.num_blocks = num_blocks,
.cb_fn = fn,
.data = buf,
};
@ -727,7 +727,7 @@ ftl_io_init_md_read(struct spdk_ftl_dev *dev, struct ftl_addr addr,
static struct ftl_io *
ftl_io_init_md_write(struct spdk_ftl_dev *dev, struct ftl_band *band,
void *data, size_t lbk_cnt, ftl_io_fn cb)
void *data, size_t num_blocks, ftl_io_fn cb)
{
struct ftl_io_init_opts opts = {
.dev = dev,
@ -737,7 +737,7 @@ ftl_io_init_md_write(struct spdk_ftl_dev *dev, struct ftl_band *band,
.size = sizeof(struct ftl_io),
.flags = FTL_IO_MD | FTL_IO_PHYSICAL_MODE,
.type = FTL_IO_WRITE,
.lbk_cnt = lbk_cnt,
.num_blocks = num_blocks,
.cb_fn = cb,
.data = data,
.md = NULL,
@ -747,13 +747,13 @@ ftl_io_init_md_write(struct spdk_ftl_dev *dev, struct ftl_band *band,
}
static int
ftl_band_write_md(struct ftl_band *band, size_t lbk_cnt,
ftl_band_write_md(struct ftl_band *band, size_t num_blocks,
ftl_md_pack_fn md_fn, ftl_io_fn cb)
{
struct spdk_ftl_dev *dev = band->dev;
struct ftl_io *io;
io = ftl_io_init_md_write(dev, band, band->lba_map.dma_buf, lbk_cnt, cb);
io = ftl_io_init_md_write(dev, band, band->lba_map.dma_buf, num_blocks, cb);
if (!io) {
return -ENOMEM;
}
@ -776,14 +776,14 @@ ftl_band_md_clear(struct ftl_band *band)
int
ftl_band_write_head_md(struct ftl_band *band, ftl_io_fn cb)
{
return ftl_band_write_md(band, ftl_head_md_num_lbks(band->dev),
return ftl_band_write_md(band, ftl_head_md_num_blocks(band->dev),
ftl_pack_head_md, cb);
}
int
ftl_band_write_tail_md(struct ftl_band *band, ftl_io_fn cb)
{
return ftl_band_write_md(band, ftl_tail_md_num_lbks(band->dev),
return ftl_band_write_md(band, ftl_tail_md_num_blocks(band->dev),
ftl_pack_tail_md, cb);
}
@ -791,13 +791,13 @@ static struct ftl_addr
ftl_band_lba_map_addr(struct ftl_band *band, size_t offset)
{
return ftl_band_next_xfer_addr(band, band->tail_md_addr,
ftl_tail_md_hdr_num_lbks() +
ftl_vld_map_num_lbks(band->dev) +
ftl_tail_md_hdr_num_blocks() +
ftl_vld_map_num_blocks(band->dev) +
offset);
}
static int
ftl_band_read_md(struct ftl_band *band, size_t lbk_cnt, struct ftl_addr start_addr,
ftl_band_read_md(struct ftl_band *band, size_t num_blocks, struct ftl_addr start_addr,
void *buf, ftl_io_fn fn, ftl_md_pack_fn pack_fn, ftl_io_fn cb_fn, void *cb_ctx)
{
struct spdk_ftl_dev *dev = band->dev;
@ -807,7 +807,7 @@ ftl_band_read_md(struct ftl_band *band, size_t lbk_cnt, struct ftl_addr start_ad
return -ENOENT;
}
io = ftl_io_init_md_read(dev, start_addr, band, lbk_cnt, buf, fn, pack_fn, cb_fn, cb_ctx);
io = ftl_io_init_md_read(dev, start_addr, band, num_blocks, buf, fn, pack_fn, cb_fn, cb_ctx);
if (!io) {
return -ENOMEM;
}
@ -819,7 +819,7 @@ ftl_band_read_md(struct ftl_band *band, size_t lbk_cnt, struct ftl_addr start_ad
int
ftl_band_read_tail_md(struct ftl_band *band, struct ftl_addr addr, ftl_io_fn cb_fn, void *cb_ctx)
{
return ftl_band_read_md(band, ftl_tail_md_num_lbks(band->dev), addr, band->lba_map.dma_buf,
return ftl_band_read_md(band, ftl_tail_md_num_blocks(band->dev), addr, band->lba_map.dma_buf,
ftl_read_md_cb, ftl_unpack_tail_md, cb_fn, cb_ctx);
}
@ -884,7 +884,7 @@ ftl_lba_map_offset_from_addr(struct ftl_band *band, struct ftl_addr addr)
struct ftl_addr start_addr = ftl_band_lba_map_addr(band, 0);
offset = ftl_xfer_offset_from_addr(band, addr) - ftl_xfer_offset_from_addr(band, start_addr);
assert(offset < ftl_lba_map_num_lbks(band->dev));
assert(offset < ftl_lba_map_num_blocks(band->dev));
return offset;
}
@ -893,17 +893,17 @@ static void
ftl_read_lba_map_cb(struct ftl_io *io, void *arg, int status)
{
struct ftl_lba_map *lba_map = &io->band->lba_map;
uint64_t lbk_off;
uint64_t block_off;
lbk_off = ftl_lba_map_offset_from_addr(io->band, io->addr);
assert(lbk_off + io->lbk_cnt <= ftl_lba_map_num_lbks(io->dev));
block_off = ftl_lba_map_offset_from_addr(io->band, io->addr);
assert(block_off + io->num_blocks <= ftl_lba_map_num_blocks(io->dev));
if (!status) {
ftl_lba_map_set_segment_state(lba_map, lbk_off, io->lbk_cnt,
ftl_lba_map_set_segment_state(lba_map, block_off, io->num_blocks,
FTL_LBA_MAP_SEG_CACHED);
}
ftl_process_lba_map_requests(io->dev, lba_map, lbk_off, io->lbk_cnt, status);
ftl_process_lba_map_requests(io->dev, lba_map, block_off, io->num_blocks, status);
}
static struct ftl_lba_map_request *
@ -950,46 +950,46 @@ int
ftl_band_read_lba_map(struct ftl_band *band, size_t offset, size_t lba_cnt,
ftl_io_fn cb_fn, void *cb_ctx)
{
size_t lbk_cnt, lbk_off, num_read, num_segments;
size_t num_blocks, block_off, num_read, num_segments;
struct ftl_lba_map *lba_map = &band->lba_map;
struct ftl_lba_map_request *request;
int rc = 0;
lbk_off = offset / FTL_NUM_LBA_IN_BLOCK;
block_off = offset / FTL_NUM_LBA_IN_BLOCK;
num_segments = spdk_divide_round_up(offset + lba_cnt, FTL_NUM_LBA_IN_BLOCK);
lbk_cnt = num_segments - lbk_off;
assert(lbk_off + lbk_cnt <= ftl_lba_map_num_lbks(band->dev));
num_blocks = num_segments - block_off;
assert(block_off + num_blocks <= ftl_lba_map_num_blocks(band->dev));
request = ftl_lba_map_alloc_request(band, lbk_off, lbk_cnt, cb_fn, cb_ctx);
request = ftl_lba_map_alloc_request(band, block_off, num_blocks, cb_fn, cb_ctx);
if (!request) {
return -ENOMEM;
}
while (lbk_cnt) {
if (lba_map->segments[lbk_off] != FTL_LBA_MAP_SEG_CLEAR) {
if (lba_map->segments[lbk_off] == FTL_LBA_MAP_SEG_CACHED) {
ftl_lba_map_request_segment_done(request, lbk_off, 1);
while (num_blocks) {
if (lba_map->segments[block_off] != FTL_LBA_MAP_SEG_CLEAR) {
if (lba_map->segments[block_off] == FTL_LBA_MAP_SEG_CACHED) {
ftl_lba_map_request_segment_done(request, block_off, 1);
}
lbk_cnt--;
lbk_off++;
num_blocks--;
block_off++;
continue;
}
num_read = ftl_lba_map_num_clear_segments(lba_map, lbk_off, lbk_cnt);
ftl_lba_map_set_segment_state(lba_map, lbk_off, num_read,
num_read = ftl_lba_map_num_clear_segments(lba_map, block_off, num_blocks);
ftl_lba_map_set_segment_state(lba_map, block_off, num_read,
FTL_LBA_MAP_SEG_PENDING);
rc = ftl_band_read_md(band, num_read, ftl_band_lba_map_addr(band, lbk_off),
(char *)band->lba_map.map + lbk_off * FTL_BLOCK_SIZE,
rc = ftl_band_read_md(band, num_read, ftl_band_lba_map_addr(band, block_off),
(char *)band->lba_map.map + block_off * FTL_BLOCK_SIZE,
ftl_read_lba_map_cb, NULL, cb_fn, cb_ctx);
if (rc) {
ftl_lba_map_request_free(band->dev, request);
return rc;
}
assert(lbk_cnt >= num_read);
lbk_cnt -= num_read;
lbk_off += num_read;
assert(num_blocks >= num_read);
num_blocks -= num_read;
block_off += num_read;
}
if (request->num_pending) {
@ -1006,7 +1006,7 @@ int
ftl_band_read_head_md(struct ftl_band *band, ftl_io_fn cb_fn, void *cb_ctx)
{
return ftl_band_read_md(band,
ftl_head_md_num_lbks(band->dev),
ftl_head_md_num_blocks(band->dev),
ftl_band_head_md_addr(band),
band->lba_map.dma_buf,
ftl_read_md_cb,
@ -1137,7 +1137,7 @@ ftl_band_clear_lba_map(struct ftl_band *band)
size_t num_segments;
spdk_bit_array_clear_mask(lba_map->vld);
memset(lba_map->map, 0, ftl_lba_map_num_lbks(band->dev) * FTL_BLOCK_SIZE);
memset(lba_map->map, 0, ftl_lba_map_num_blocks(band->dev) * FTL_BLOCK_SIZE);
/* For open band all lba map segments are already cached */
assert(band->state == FTL_BAND_STATE_PREP);
@ -1151,6 +1151,6 @@ size_t
ftl_lba_map_pool_elem_size(struct spdk_ftl_dev *dev)
{
/* Map pool element holds the whole tail md + segments map */
return ftl_tail_md_num_lbks(dev) * FTL_BLOCK_SIZE +
return ftl_tail_md_num_blocks(dev) * FTL_BLOCK_SIZE +
spdk_divide_round_up(ftl_get_num_blocks_in_band(dev), FTL_NUM_LBA_IN_BLOCK);
}

View File

@ -182,8 +182,8 @@ struct ftl_band {
STAILQ_ENTRY(ftl_band) prio_stailq;
};
uint64_t ftl_band_lbkoff_from_addr(struct ftl_band *band, struct ftl_addr addr);
struct ftl_addr ftl_band_addr_from_lbkoff(struct ftl_band *band, uint64_t lbkoff);
uint64_t ftl_band_block_offset_from_addr(struct ftl_band *band, struct ftl_addr addr);
struct ftl_addr ftl_band_addr_from_block_offset(struct ftl_band *band, uint64_t block_off);
void ftl_band_set_state(struct ftl_band *band, enum ftl_band_state state);
size_t ftl_band_age(const struct ftl_band *band);
void ftl_band_acquire_lba_map(struct ftl_band *band);
@ -194,12 +194,12 @@ int ftl_band_read_lba_map(struct ftl_band *band,
size_t offset, size_t lba_cnt,
ftl_io_fn cb_fn, void *cb_ctx);
struct ftl_addr ftl_band_next_xfer_addr(struct ftl_band *band, struct ftl_addr addr,
size_t num_lbks);
size_t num_blocks);
struct ftl_addr ftl_band_next_addr(struct ftl_band *band, struct ftl_addr addr,
size_t offset);
size_t ftl_band_num_usable_lbks(const struct ftl_band *band);
size_t ftl_band_user_lbks_left(const struct ftl_band *band, size_t offset);
size_t ftl_band_user_lbks(const struct ftl_band *band);
size_t ftl_band_num_usable_blocks(const struct ftl_band *band);
size_t ftl_band_user_blocks_left(const struct ftl_band *band, size_t offset);
size_t ftl_band_user_blocks(const struct ftl_band *band);
void ftl_band_set_addr(struct ftl_band *band, uint64_t lba,
struct ftl_addr addr);
struct ftl_band *ftl_band_from_addr(struct spdk_ftl_dev *dev, struct ftl_addr addr);
@ -247,12 +247,12 @@ ftl_band_state_changing(struct ftl_band *band)
}
static inline int
ftl_band_lbkoff_valid(struct ftl_band *band, size_t lbkoff)
ftl_band_block_offset_valid(struct ftl_band *band, size_t block_off)
{
struct ftl_lba_map *lba_map = &band->lba_map;
pthread_spin_lock(&lba_map->lock);
if (spdk_bit_array_get(lba_map->vld, lbkoff)) {
if (spdk_bit_array_get(lba_map->vld, block_off)) {
pthread_spin_unlock(&lba_map->lock);
return 1;
}

View File

@ -259,7 +259,7 @@ ftl_md_write_cb(struct ftl_io *io, void *arg, int status)
if (band->state == FTL_BAND_STATE_CLOSED) {
if (ftl_dev_has_nv_cache(dev)) {
pthread_spin_lock(&nv_cache->lock);
nv_cache->num_available += ftl_band_user_lbks(band);
nv_cache->num_available += ftl_band_user_blocks(band);
if (spdk_unlikely(nv_cache->num_available > nv_cache->num_data_blocks)) {
nv_cache->num_available = nv_cache->num_data_blocks;
@ -289,7 +289,7 @@ static int
ftl_read_next_physical_addr(struct ftl_io *io, struct ftl_addr *addr)
{
struct spdk_ftl_dev *dev = io->dev;
size_t lbk_cnt, max_lbks;
size_t num_blocks, max_blocks;
assert(ftl_io_mode_physical(io));
assert(io->iov_pos < io->iov_cnt);
@ -305,15 +305,15 @@ ftl_read_next_physical_addr(struct ftl_io *io, struct ftl_addr *addr)
/* Metadata has to be read in the way it's written (jumping across */
/* the zones in xfer_size increments) */
if (io->flags & FTL_IO_MD) {
max_lbks = dev->xfer_size - (addr->offset % dev->xfer_size);
lbk_cnt = spdk_min(ftl_io_iovec_len_left(io), max_lbks);
max_blocks = dev->xfer_size - (addr->offset % dev->xfer_size);
num_blocks = spdk_min(ftl_io_iovec_len_left(io), max_blocks);
assert(addr->offset / dev->xfer_size ==
(addr->offset + lbk_cnt - 1) / dev->xfer_size);
(addr->offset + num_blocks - 1) / dev->xfer_size);
} else {
lbk_cnt = ftl_io_iovec_len_left(io);
num_blocks = ftl_io_iovec_len_left(io);
}
return lbk_cnt;
return num_blocks;
}
static int
@ -355,7 +355,7 @@ ftl_submit_erase(struct ftl_io *io)
ioch = spdk_io_channel_get_ctx(ftl_get_io_channel(dev));
for (i = 0; i < io->lbk_cnt; ++i) {
for (i = 0; i < io->num_blocks; ++i) {
if (i != 0) {
zone = ftl_band_next_zone(band, ftl_band_zone_from_addr(band, addr));
assert(zone->info.state == SPDK_BDEV_ZONE_STATE_CLOSED);
@ -583,7 +583,7 @@ ftl_wptr_advance(struct ftl_wptr *wptr, size_t xfer_size)
}
wptr->offset += xfer_size;
next_thld = (ftl_band_num_usable_lbks(band) * conf->band_thld) / 100;
next_thld = (ftl_band_num_usable_blocks(band) * conf->band_thld) / 100;
if (ftl_band_full(band, wptr->offset)) {
ftl_band_set_state(band, FTL_BAND_STATE_FULL);
@ -606,9 +606,9 @@ ftl_wptr_advance(struct ftl_wptr *wptr, size_t xfer_size)
}
static size_t
ftl_wptr_user_lbks_left(const struct ftl_wptr *wptr)
ftl_wptr_user_blocks_left(const struct ftl_wptr *wptr)
{
return ftl_band_user_lbks_left(wptr->band, wptr->offset);
return ftl_band_user_blocks_left(wptr->band, wptr->offset);
}
static int
@ -783,7 +783,7 @@ ftl_wptr_pad_band(struct ftl_wptr *wptr)
size_t size = ftl_rwb_num_pending(dev->rwb);
size_t blocks_left, rwb_size, pad_size;
blocks_left = ftl_wptr_user_lbks_left(wptr);
blocks_left = ftl_wptr_user_blocks_left(wptr);
assert(size <= blocks_left);
assert(blocks_left % dev->xfer_size == 0);
rwb_size = ftl_rwb_size(dev->rwb) - size;
@ -860,7 +860,7 @@ ftl_invalidate_addr_unlocked(struct spdk_ftl_dev *dev, struct ftl_addr addr)
struct ftl_lba_map *lba_map = &band->lba_map;
uint64_t offset;
offset = ftl_band_lbkoff_from_addr(band, addr);
offset = ftl_band_block_offset_from_addr(band, addr);
/* The bit might be already cleared if two writes are scheduled to the */
/* same LBA at the same time */
@ -982,17 +982,17 @@ ftl_submit_read(struct ftl_io *io)
struct spdk_ftl_dev *dev = io->dev;
struct ftl_io_channel *ioch;
struct ftl_addr addr;
int rc = 0, lbk_cnt;
int rc = 0, num_blocks;
ioch = spdk_io_channel_get_ctx(io->ioch);
assert(LIST_EMPTY(&io->children));
while (io->pos < io->lbk_cnt) {
while (io->pos < io->num_blocks) {
if (ftl_io_mode_physical(io)) {
lbk_cnt = rc = ftl_read_next_physical_addr(io, &addr);
num_blocks = rc = ftl_read_next_physical_addr(io, &addr);
} else {
lbk_cnt = rc = ftl_read_next_logical_addr(io, &addr);
num_blocks = rc = ftl_read_next_logical_addr(io, &addr);
}
/* We might need to retry the read from scratch (e.g. */
@ -1011,13 +1011,13 @@ ftl_submit_read(struct ftl_io *io)
continue;
}
assert(lbk_cnt > 0);
assert(num_blocks > 0);
ftl_trace_submission(dev, io, addr, lbk_cnt);
ftl_trace_submission(dev, io, addr, num_blocks);
rc = spdk_bdev_read_blocks(dev->base_bdev_desc, ioch->base_ioch,
ftl_io_iovec_addr(io),
addr.offset,
lbk_cnt, ftl_io_cmpl_cb, io);
num_blocks, ftl_io_cmpl_cb, io);
if (spdk_unlikely(rc)) {
if (rc == -ENOMEM) {
ftl_add_to_retry_queue(io);
@ -1028,7 +1028,7 @@ ftl_submit_read(struct ftl_io *io)
}
ftl_io_inc_req(io);
ftl_io_advance(io, lbk_cnt);
ftl_io_advance(io, num_blocks);
}
/* If we didn't have to read anything from the device, */
@ -1104,7 +1104,7 @@ ftl_nv_cache_wrap(void *ctx)
}
static uint64_t
ftl_reserve_nv_cache(struct ftl_nv_cache *nv_cache, size_t *num_lbks, unsigned int *phase)
ftl_reserve_nv_cache(struct ftl_nv_cache *nv_cache, size_t *num_blocks, unsigned int *phase)
{
struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(nv_cache->bdev_desc);
struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
@ -1117,18 +1117,18 @@ ftl_reserve_nv_cache(struct ftl_nv_cache *nv_cache, size_t *num_lbks, unsigned i
goto out;
}
num_available = spdk_min(nv_cache->num_available, *num_lbks);
num_available = spdk_min(nv_cache->num_available, *num_blocks);
num_available = spdk_min(num_available, dev->conf.nv_cache.max_request_cnt);
if (spdk_unlikely(nv_cache->current_addr + num_available > cache_size)) {
*num_lbks = cache_size - nv_cache->current_addr;
*num_blocks = cache_size - nv_cache->current_addr;
} else {
*num_lbks = num_available;
*num_blocks = num_available;
}
cache_addr = nv_cache->current_addr;
nv_cache->current_addr += *num_lbks;
nv_cache->num_available -= *num_lbks;
nv_cache->current_addr += *num_blocks;
nv_cache->num_available -= *num_blocks;
*phase = nv_cache->phase;
if (nv_cache->current_addr == spdk_bdev_get_num_blocks(bdev)) {
@ -1143,13 +1143,13 @@ out:
}
static struct ftl_io *
ftl_alloc_io_nv_cache(struct ftl_io *parent, size_t num_lbks)
ftl_alloc_io_nv_cache(struct ftl_io *parent, size_t num_blocks)
{
struct ftl_io_init_opts opts = {
.dev = parent->dev,
.parent = parent,
.data = ftl_io_iovec_addr(parent),
.lbk_cnt = num_lbks,
.num_blocks = num_blocks,
.flags = parent->flags | FTL_IO_CACHE,
};
@ -1191,20 +1191,20 @@ ftl_submit_nv_cache(void *ctx)
rc = spdk_bdev_write_blocks_with_md(nv_cache->bdev_desc, ioch->cache_ioch,
ftl_io_iovec_addr(io), io->md, io->addr.offset,
io->lbk_cnt, ftl_nv_cache_submit_cb, io);
io->num_blocks, ftl_nv_cache_submit_cb, io);
if (rc == -ENOMEM) {
spdk_thread_send_msg(thread, ftl_submit_nv_cache, io);
return;
} else if (rc) {
SPDK_ERRLOG("Write to persistent cache failed: %s (%"PRIu64", %"PRIu64")\n",
spdk_strerror(-rc), io->addr.offset, io->lbk_cnt);
spdk_strerror(-rc), io->addr.offset, io->num_blocks);
spdk_mempool_put(nv_cache->md_pool, io->md);
io->status = -EIO;
ftl_io_complete(io);
return;
}
ftl_io_advance(io, io->lbk_cnt);
ftl_io_advance(io, io->num_blocks);
ftl_io_inc_req(io);
}
@ -1213,13 +1213,13 @@ ftl_nv_cache_fill_md(struct ftl_io *io, unsigned int phase)
{
struct spdk_bdev *bdev;
struct ftl_nv_cache *nv_cache = &io->dev->nv_cache;
uint64_t lbk_off, lba;
uint64_t block_off, lba;
void *md_buf = io->md;
bdev = spdk_bdev_desc_get_bdev(nv_cache->bdev_desc);
for (lbk_off = 0; lbk_off < io->lbk_cnt; ++lbk_off) {
lba = ftl_nv_cache_pack_lba(ftl_io_get_lba(io, lbk_off), phase);
for (block_off = 0; block_off < io->num_blocks; ++block_off) {
lba = ftl_nv_cache_pack_lba(ftl_io_get_lba(io, block_off), phase);
memcpy(md_buf, &lba, sizeof(lba));
md_buf += spdk_bdev_get_md_size(bdev);
}
@ -1232,14 +1232,14 @@ _ftl_write_nv_cache(void *ctx)
struct spdk_ftl_dev *dev = io->dev;
struct spdk_thread *thread;
unsigned int phase;
uint64_t num_lbks;
uint64_t num_blocks;
thread = spdk_io_channel_get_thread(io->ioch);
while (io->pos < io->lbk_cnt) {
num_lbks = ftl_io_iovec_len_left(io);
while (io->pos < io->num_blocks) {
num_blocks = ftl_io_iovec_len_left(io);
child = ftl_alloc_io_nv_cache(io, num_lbks);
child = ftl_alloc_io_nv_cache(io, num_blocks);
if (spdk_unlikely(!child)) {
spdk_thread_send_msg(thread, _ftl_write_nv_cache, io);
return;
@ -1253,7 +1253,7 @@ _ftl_write_nv_cache(void *ctx)
}
/* Reserve area on the write buffer cache */
child->addr.offset = ftl_reserve_nv_cache(&dev->nv_cache, &num_lbks, &phase);
child->addr.offset = ftl_reserve_nv_cache(&dev->nv_cache, &num_blocks, &phase);
if (child->addr.offset == FTL_LBA_INVALID) {
spdk_mempool_put(dev->nv_cache.md_pool, child->md);
ftl_io_free(child);
@ -1262,8 +1262,8 @@ _ftl_write_nv_cache(void *ctx)
}
/* Shrink the IO if there isn't enough room in the cache to fill the whole iovec */
if (spdk_unlikely(num_lbks != ftl_io_iovec_len_left(io))) {
ftl_io_shrink_iovec(child, num_lbks);
if (spdk_unlikely(num_blocks != ftl_io_iovec_len_left(io))) {
ftl_io_shrink_iovec(child, num_blocks);
}
ftl_nv_cache_fill_md(child, phase);
@ -1364,13 +1364,13 @@ ftl_write_cb(struct ftl_io *io, void *arg, int status)
return;
}
assert(io->lbk_cnt == dev->xfer_size);
assert(io->num_blocks == dev->xfer_size);
assert(!(io->flags & FTL_IO_MD));
ftl_rwb_foreach(entry, batch) {
band = entry->band;
if (!(entry->flags & FTL_IO_PAD)) {
/* Verify that the LBA is set for user lbks */
/* Verify that the LBA is set for user blocks */
assert(entry->lba != FTL_LBA_INVALID);
}
@ -1493,7 +1493,7 @@ ftl_io_init_child_write(struct ftl_io *parent, struct ftl_addr addr,
.size = sizeof(struct ftl_io),
.flags = 0,
.type = parent->type,
.lbk_cnt = dev->xfer_size,
.num_blocks = dev->xfer_size,
.cb_fn = cb,
.data = data,
.md = md,
@ -1519,7 +1519,7 @@ ftl_io_child_write_cb(struct ftl_io *io, void *ctx, int status)
wptr = ftl_wptr_from_band(io->band);
zone->busy = false;
zone->info.write_pointer += io->lbk_cnt;
zone->info.write_pointer += io->num_blocks;
/* If some other write on the same band failed the write pointer would already be freed */
if (spdk_likely(wptr)) {
@ -1528,7 +1528,7 @@ ftl_io_child_write_cb(struct ftl_io *io, void *ctx, int status)
}
static int
ftl_submit_child_write(struct ftl_wptr *wptr, struct ftl_io *io, int lbk_cnt)
ftl_submit_child_write(struct ftl_wptr *wptr, struct ftl_io *io, int num_blocks)
{
struct spdk_ftl_dev *dev = io->dev;
struct ftl_io_channel *ioch;
@ -1559,12 +1559,12 @@ ftl_submit_child_write(struct ftl_wptr *wptr, struct ftl_io *io, int lbk_cnt)
rc = spdk_bdev_zone_append(dev->base_bdev_desc, ioch->base_ioch,
ftl_io_iovec_addr(child),
ftl_addr_get_zone_slba(dev, addr),
lbk_cnt, ftl_io_cmpl_cb, child);
num_blocks, ftl_io_cmpl_cb, child);
} else {
rc = spdk_bdev_write_blocks(dev->base_bdev_desc, ioch->base_ioch,
ftl_io_iovec_addr(child),
addr.offset,
lbk_cnt, ftl_io_cmpl_cb, child);
num_blocks, ftl_io_cmpl_cb, child);
}
if (rc) {
@ -1577,7 +1577,7 @@ ftl_submit_child_write(struct ftl_wptr *wptr, struct ftl_io *io, int lbk_cnt)
}
ftl_io_inc_req(child);
ftl_io_advance(child, lbk_cnt);
ftl_io_advance(child, num_blocks);
return 0;
}
@ -1588,7 +1588,7 @@ ftl_submit_write(struct ftl_wptr *wptr, struct ftl_io *io)
struct spdk_ftl_dev *dev = io->dev;
int rc = 0;
assert(io->lbk_cnt % dev->xfer_size == 0);
assert(io->num_blocks % dev->xfer_size == 0);
/* Only one child write make sense in case of user write */
assert((io->flags & FTL_IO_MD) || io->iov_cnt == 1);
@ -1772,7 +1772,7 @@ ftl_rwb_fill(struct ftl_io *io)
struct ftl_addr addr = { .cached = 1 };
int flags = ftl_rwb_flags_from_io(io);
while (io->pos < io->lbk_cnt) {
while (io->pos < io->num_blocks) {
if (ftl_io_current_lba(io) == FTL_LBA_INVALID) {
ftl_io_advance(io, 1);
continue;
@ -1834,8 +1834,8 @@ ftl_band_calc_merit(struct ftl_band *band, size_t *threshold_valid)
size_t usable, valid, invalid;
double vld_ratio;
/* If the band doesn't have any usable lbks it's of no use */
usable = ftl_band_num_usable_lbks(band);
/* If the band doesn't have any usable blocks it's of no use */
usable = ftl_band_num_usable_blocks(band);
if (usable == 0) {
return 0.0;
}
@ -1859,7 +1859,7 @@ ftl_band_needs_defrag(struct ftl_band *band, struct spdk_ftl_dev *dev)
return true;
}
thld_vld = (ftl_band_num_usable_lbks(band) * conf->invalid_thld) / 100;
thld_vld = (ftl_band_num_usable_blocks(band) * conf->invalid_thld) / 100;
return band->merit > ftl_band_calc_merit(band, &thld_vld);
}
@ -1912,8 +1912,8 @@ void
spdk_ftl_dev_get_attrs(const struct spdk_ftl_dev *dev, struct spdk_ftl_attrs *attrs)
{
attrs->uuid = dev->uuid;
attrs->lbk_cnt = dev->num_lbas;
attrs->lbk_size = FTL_BLOCK_SIZE;
attrs->num_blocks = dev->num_lbas;
attrs->block_size = FTL_BLOCK_SIZE;
attrs->cache_bdev_desc = dev->nv_cache.bdev_desc;
attrs->num_zones = ftl_get_num_zones(dev);
attrs->zone_size = ftl_get_num_blocks_in_zone(dev);
@ -1988,7 +1988,7 @@ spdk_ftl_write(struct spdk_ftl_dev *dev, struct spdk_io_channel *ch, uint64_t lb
return -EINVAL;
}
if (lba_cnt != ftl_iovec_num_lbks(iov, iov_cnt)) {
if (lba_cnt != ftl_iovec_num_blocks(iov, iov_cnt)) {
return -EINVAL;
}
@ -2052,7 +2052,7 @@ spdk_ftl_read(struct spdk_ftl_dev *dev, struct spdk_io_channel *ch, uint64_t lba
return -EINVAL;
}
if (lba_cnt != ftl_iovec_num_lbks(iov, iov_cnt)) {
if (lba_cnt != ftl_iovec_num_blocks(iov, iov_cnt)) {
return -EINVAL;
}

View File

@ -259,11 +259,11 @@ int ftl_invalidate_addr(struct spdk_ftl_dev *dev, struct ftl_addr addr);
int ftl_task_core(void *ctx);
int ftl_task_read(void *ctx);
void ftl_process_anm_event(struct ftl_anm_event *event);
size_t ftl_tail_md_num_lbks(const struct spdk_ftl_dev *dev);
size_t ftl_tail_md_hdr_num_lbks(void);
size_t ftl_vld_map_num_lbks(const struct spdk_ftl_dev *dev);
size_t ftl_lba_map_num_lbks(const struct spdk_ftl_dev *dev);
size_t ftl_head_md_num_lbks(const struct spdk_ftl_dev *dev);
size_t ftl_tail_md_num_blocks(const struct spdk_ftl_dev *dev);
size_t ftl_tail_md_hdr_num_blocks(void);
size_t ftl_vld_map_num_blocks(const struct spdk_ftl_dev *dev);
size_t ftl_lba_map_num_blocks(const struct spdk_ftl_dev *dev);
size_t ftl_head_md_num_blocks(const struct spdk_ftl_dev *dev);
int ftl_restore_md(struct spdk_ftl_dev *dev, ftl_restore_fn cb);
int ftl_restore_device(struct ftl_restore *restore, ftl_restore_fn cb);
void ftl_restore_nv_cache(struct ftl_restore *restore, ftl_restore_fn cb);

View File

@ -72,7 +72,7 @@ ftl_band_validate_md(struct ftl_band *band)
continue;
}
addr_md = ftl_band_addr_from_lbkoff(band, i);
addr_md = ftl_band_addr_from_block_offset(band, i);
addr_l2p = ftl_l2p_get(dev, lba_map->map[i]);
if (addr_l2p.cached) {
@ -116,7 +116,7 @@ ftl_dev_dump_bands(struct spdk_ftl_dev *dev)
ftl_debug(" Band %3zu: %8zu / %zu \tnum_zones: %zu \twr_cnt: %"PRIu64"\tmerit:"
"%10.3f\tstate: %s\n",
i + 1, dev->bands[i].lba_map.num_vld,
ftl_band_user_lbks(&dev->bands[i]),
ftl_band_user_blocks(&dev->bands[i]),
dev->bands[i].num_zones,
dev->bands[i].wr_cnt,
dev->bands[i].merit,

View File

@ -75,9 +75,9 @@ static const struct spdk_ftl_conf g_default_conf = {
/* 40 free bands / 100 % host writes - defrag starts running */
[SPDK_FTL_LIMIT_START] = { .thld = 40, .limit = 100 },
},
/* 10 percent valid lbks */
/* 10 percent valid blocks */
.invalid_thld = 10,
/* 20% spare lbks */
/* 20% spare blocks */
.lba_rsvd = 20,
/* 6M write buffer */
.rwb_size = 6 * 1024 * 1024,
@ -632,7 +632,7 @@ ftl_setup_initial_state(struct spdk_ftl_dev *dev)
dev->num_lbas = 0;
for (i = 0; i < ftl_get_num_bands(dev); ++i) {
dev->num_lbas += ftl_band_num_usable_lbks(&dev->bands[i]);
dev->num_lbas += ftl_band_num_usable_blocks(&dev->bands[i]);
}
dev->num_lbas = (dev->num_lbas * (100 - conf->lba_rsvd)) / 100;
@ -954,7 +954,7 @@ static int
ftl_dev_init_base_bdev(struct spdk_ftl_dev *dev)
{
uint32_t block_size;
uint64_t block_cnt;
uint64_t num_blocks;
struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->base_bdev_desc);
if (!spdk_bdev_is_zoned(bdev)) {
@ -972,8 +972,8 @@ ftl_dev_init_base_bdev(struct spdk_ftl_dev *dev)
return -1;
}
block_cnt = spdk_bdev_get_num_blocks(bdev);
if (block_cnt % ftl_get_num_punits(dev)) {
num_blocks = spdk_bdev_get_num_blocks(bdev);
if (num_blocks % ftl_get_num_punits(dev)) {
SPDK_ERRLOG("Unsupported geometry. Base bdev block count must be multiple "
"of optimal number of zones.\n");
return -1;
@ -986,8 +986,8 @@ ftl_dev_init_base_bdev(struct spdk_ftl_dev *dev)
return -1;
}
dev->num_bands = block_cnt / (ftl_get_num_punits(dev) * ftl_get_num_blocks_in_zone(dev));
dev->addr_len = spdk_u64log2(block_cnt) + 1;
dev->num_bands = num_blocks / (ftl_get_num_punits(dev) * ftl_get_num_blocks_in_zone(dev));
dev->addr_len = spdk_u64log2(num_blocks) + 1;
return 0;
}

View File

@ -83,7 +83,7 @@ ftl_io_iovec(struct ftl_io *io)
uint64_t
ftl_io_get_lba(const struct ftl_io *io, size_t offset)
{
assert(offset < io->lbk_cnt);
assert(offset < io->num_blocks);
if (io->flags & FTL_IO_VECTOR_LBA) {
return io->lba.vector[offset];
@ -99,45 +99,45 @@ ftl_io_current_lba(const struct ftl_io *io)
}
void
ftl_io_advance(struct ftl_io *io, size_t lbk_cnt)
ftl_io_advance(struct ftl_io *io, size_t num_blocks)
{
struct iovec *iov = ftl_io_iovec(io);
size_t iov_lbks, lbk_left = lbk_cnt;
size_t iov_blocks, block_left = num_blocks;
io->pos += lbk_cnt;
io->pos += num_blocks;
if (io->iov_cnt != 0) {
while (lbk_left > 0) {
while (block_left > 0) {
assert(io->iov_pos < io->iov_cnt);
iov_lbks = iov[io->iov_pos].iov_len / FTL_BLOCK_SIZE;
iov_blocks = iov[io->iov_pos].iov_len / FTL_BLOCK_SIZE;
if (io->iov_off + lbk_left < iov_lbks) {
io->iov_off += lbk_left;
if (io->iov_off + block_left < iov_blocks) {
io->iov_off += block_left;
break;
}
assert(iov_lbks > io->iov_off);
lbk_left -= (iov_lbks - io->iov_off);
assert(iov_blocks > io->iov_off);
block_left -= (iov_blocks - io->iov_off);
io->iov_off = 0;
io->iov_pos++;
}
}
if (io->parent) {
ftl_io_advance(io->parent, lbk_cnt);
ftl_io_advance(io->parent, num_blocks);
}
}
size_t
ftl_iovec_num_lbks(struct iovec *iov, size_t iov_cnt)
ftl_iovec_num_blocks(struct iovec *iov, size_t iov_cnt)
{
size_t lbks = 0, i = 0;
size_t num_blocks = 0, i = 0;
for (; i < iov_cnt; ++i) {
lbks += iov[i].iov_len / FTL_BLOCK_SIZE;
num_blocks += iov[i].iov_len / FTL_BLOCK_SIZE;
}
return lbks;
return num_blocks;
}
void *
@ -158,19 +158,19 @@ ftl_io_iovec_len_left(struct ftl_io *io)
}
static void
_ftl_io_init_iovec(struct ftl_io *io, const struct iovec *iov, size_t iov_cnt, size_t lbk_cnt)
_ftl_io_init_iovec(struct ftl_io *io, const struct iovec *iov, size_t iov_cnt, size_t num_blocks)
{
size_t iov_off;
io->iov_pos = 0;
io->iov_cnt = iov_cnt;
io->lbk_cnt = lbk_cnt;
io->num_blocks = num_blocks;
memcpy(io->iov, iov, iov_cnt * sizeof(*iov));
if (lbk_cnt == 0) {
if (num_blocks == 0) {
for (iov_off = 0; iov_off < iov_cnt; ++iov_off) {
io->lbk_cnt += iov[iov_off].iov_len / FTL_BLOCK_SIZE;
io->num_blocks += iov[iov_off].iov_len / FTL_BLOCK_SIZE;
}
}
}
@ -190,24 +190,24 @@ ftl_io_add_child(struct ftl_io *io, const struct iovec *iov, size_t iov_cnt)
_ftl_io_init_iovec(child, iov, iov_cnt, 0);
if (io->flags & FTL_IO_VECTOR_LBA) {
child->lba.vector = io->lba.vector + io->lbk_cnt;
child->lba.vector = io->lba.vector + io->num_blocks;
} else {
child->lba.single = io->lba.single + io->lbk_cnt;
child->lba.single = io->lba.single + io->num_blocks;
}
io->lbk_cnt += child->lbk_cnt;
io->num_blocks += child->num_blocks;
return 0;
}
static int
ftl_io_init_iovec(struct ftl_io *io, const struct iovec *iov, size_t iov_cnt, size_t lbk_cnt)
ftl_io_init_iovec(struct ftl_io *io, const struct iovec *iov, size_t iov_cnt, size_t num_blocks)
{
struct ftl_io *child;
size_t iov_off = 0, iov_left;
int rc;
if (spdk_likely(iov_cnt <= FTL_IO_MAX_IOVEC)) {
_ftl_io_init_iovec(io, iov, iov_cnt, lbk_cnt);
_ftl_io_init_iovec(io, iov, iov_cnt, num_blocks);
return 0;
}
@ -228,30 +228,30 @@ ftl_io_init_iovec(struct ftl_io *io, const struct iovec *iov, size_t iov_cnt, si
iov_off += iov_left;
}
assert(io->lbk_cnt == lbk_cnt);
assert(io->num_blocks == num_blocks);
return 0;
}
void
ftl_io_shrink_iovec(struct ftl_io *io, size_t lbk_cnt)
ftl_io_shrink_iovec(struct ftl_io *io, size_t num_blocks)
{
size_t iov_off = 0, lbk_off = 0;
size_t iov_off = 0, block_off = 0;
assert(io->lbk_cnt >= lbk_cnt);
assert(io->num_blocks >= num_blocks);
assert(io->pos == 0 && io->iov_pos == 0 && io->iov_off == 0);
for (; iov_off < io->iov_cnt; ++iov_off) {
size_t num_iov = io->iov[iov_off].iov_len / FTL_BLOCK_SIZE;
size_t num_left = lbk_cnt - lbk_off;
size_t num_left = num_blocks - block_off;
if (num_iov >= num_left) {
io->iov[iov_off].iov_len = num_left * FTL_BLOCK_SIZE;
io->iov_cnt = iov_off + 1;
io->lbk_cnt = lbk_cnt;
io->num_blocks = num_blocks;
break;
}
lbk_off += num_iov;
block_off += num_iov;
}
}
@ -277,7 +277,7 @@ ftl_io_init_internal(const struct ftl_io_init_opts *opts)
struct spdk_ftl_dev *dev = opts->dev;
struct iovec iov = {
.iov_base = opts->data,
.iov_len = opts->lbk_cnt * FTL_BLOCK_SIZE
.iov_len = opts->num_blocks * FTL_BLOCK_SIZE
};
if (!io) {
@ -307,7 +307,7 @@ ftl_io_init_internal(const struct ftl_io_init_opts *opts)
}
}
if (ftl_io_init_iovec(io, &iov, 1, opts->lbk_cnt)) {
if (ftl_io_init_iovec(io, &iov, 1, opts->num_blocks)) {
if (!opts->io) {
ftl_io_free(io);
}
@ -315,7 +315,7 @@ ftl_io_init_internal(const struct ftl_io_init_opts *opts)
}
if (opts->flags & FTL_IO_VECTOR_LBA) {
io->lba.vector = calloc(io->lbk_cnt, sizeof(uint64_t));
io->lba.vector = calloc(io->num_blocks, sizeof(uint64_t));
if (!io->lba.vector) {
ftl_io_free(io);
return NULL;
@ -338,7 +338,7 @@ ftl_io_rwb_init(struct spdk_ftl_dev *dev, struct ftl_addr addr, struct ftl_band
.size = sizeof(struct ftl_io),
.flags = 0,
.type = FTL_IO_WRITE,
.lbk_cnt = dev->xfer_size,
.num_blocks = dev->xfer_size,
.cb_fn = cb,
.data = ftl_rwb_batch_get_data(batch),
.md = ftl_rwb_batch_get_md(batch),
@ -355,7 +355,7 @@ ftl_io_rwb_init(struct spdk_ftl_dev *dev, struct ftl_addr addr, struct ftl_band
}
struct ftl_io *
ftl_io_erase_init(struct ftl_band *band, size_t lbk_cnt, ftl_io_fn cb)
ftl_io_erase_init(struct ftl_band *band, size_t num_blocks, ftl_io_fn cb)
{
struct ftl_io *io;
struct ftl_io_init_opts opts = {
@ -366,7 +366,7 @@ ftl_io_erase_init(struct ftl_band *band, size_t lbk_cnt, ftl_io_fn cb)
.size = sizeof(struct ftl_io),
.flags = FTL_IO_PHYSICAL_MODE,
.type = FTL_IO_ERASE,
.lbk_cnt = 1,
.num_blocks = 1,
.cb_fn = cb,
.data = NULL,
.md = NULL,
@ -377,7 +377,7 @@ ftl_io_erase_init(struct ftl_band *band, size_t lbk_cnt, ftl_io_fn cb)
return NULL;
}
io->lbk_cnt = lbk_cnt;
io->num_blocks = num_blocks;
return io;
}
@ -389,7 +389,7 @@ _ftl_user_cb(struct ftl_io *io, void *arg, int status)
}
struct ftl_io *
ftl_io_user_init(struct spdk_io_channel *_ioch, uint64_t lba, size_t lbk_cnt, struct iovec *iov,
ftl_io_user_init(struct spdk_io_channel *_ioch, uint64_t lba, size_t num_blocks, struct iovec *iov,
size_t iov_cnt, spdk_ftl_fn cb_fn, void *cb_ctx, int type)
{
struct ftl_io_channel *ioch = spdk_io_channel_get_ctx(_ioch);
@ -405,7 +405,7 @@ ftl_io_user_init(struct spdk_io_channel *_ioch, uint64_t lba, size_t lbk_cnt, st
io->lba.single = lba;
io->user_fn = cb_fn;
if (ftl_io_init_iovec(io, iov, iov_cnt, lbk_cnt)) {
if (ftl_io_init_iovec(io, iov, iov_cnt, num_blocks)) {
ftl_io_free(io);
return NULL;
}
@ -497,7 +497,7 @@ ftl_io_alloc_child(struct ftl_io *parent)
void ftl_io_fail(struct ftl_io *io, int status)
{
io->status = status;
ftl_io_advance(io, io->lbk_cnt - io->pos);
ftl_io_advance(io, io->num_blocks - io->pos);
}
void *

View File

@ -108,7 +108,7 @@ struct ftl_io_init_opts {
struct ftl_band *band;
/* Number of logical blocks */
size_t lbk_cnt;
size_t num_blocks;
/* Data */
void *data;
@ -155,11 +155,11 @@ struct ftl_io {
/* First block address */
struct ftl_addr addr;
/* Number of processed lbks */
/* Number of processed blocks */
size_t pos;
/* Number of lbks */
size_t lbk_cnt;
/* Number of blocks */
size_t num_blocks;
#define FTL_IO_MAX_IOVEC 64
struct iovec iov[FTL_IO_MAX_IOVEC];
@ -173,7 +173,7 @@ struct ftl_io {
/* Position within the iovec */
size_t iov_pos;
/* Offset within the iovec (in lbks) */
/* Offset within the iovec (in blocks) */
size_t iov_off;
/* RWB entry (valid only for RWB-based IO) */
@ -252,7 +252,7 @@ static inline bool
ftl_io_done(const struct ftl_io *io)
{
return io->req_cnt == 0 &&
io->pos == io->lbk_cnt &&
io->pos == io->num_blocks &&
!(io->flags & FTL_IO_RETRY);
}
@ -269,20 +269,20 @@ void ftl_io_dec_req(struct ftl_io *io);
struct iovec *ftl_io_iovec(struct ftl_io *io);
uint64_t ftl_io_current_lba(const struct ftl_io *io);
uint64_t ftl_io_get_lba(const struct ftl_io *io, size_t offset);
void ftl_io_advance(struct ftl_io *io, size_t lbk_cnt);
size_t ftl_iovec_num_lbks(struct iovec *iov, size_t iov_cnt);
void ftl_io_advance(struct ftl_io *io, size_t num_blocks);
size_t ftl_iovec_num_blocks(struct iovec *iov, size_t iov_cnt);
void *ftl_io_iovec_addr(struct ftl_io *io);
size_t ftl_io_iovec_len_left(struct ftl_io *io);
struct ftl_io *ftl_io_rwb_init(struct spdk_ftl_dev *dev, struct ftl_addr addr,
struct ftl_band *band,
struct ftl_rwb_batch *entry, ftl_io_fn cb);
struct ftl_io *ftl_io_erase_init(struct ftl_band *band, size_t lbk_cnt, ftl_io_fn cb);
struct ftl_io *ftl_io_user_init(struct spdk_io_channel *ioch, uint64_t lba, size_t lbk_cnt,
struct ftl_io *ftl_io_erase_init(struct ftl_band *band, size_t num_blocks, ftl_io_fn cb);
struct ftl_io *ftl_io_user_init(struct spdk_io_channel *ioch, uint64_t lba, size_t num_blocks,
struct iovec *iov, size_t iov_cnt, spdk_ftl_fn cb_fn,
void *cb_arg, int type);
void *ftl_io_get_md(const struct ftl_io *io);
void ftl_io_complete(struct ftl_io *io);
void ftl_io_shrink_iovec(struct ftl_io *io, size_t lbk_cnt);
void ftl_io_shrink_iovec(struct ftl_io *io, size_t num_blocks);
void ftl_io_process_error(struct ftl_io *io, const struct spdk_nvme_cpl *status);
void ftl_io_reset(struct ftl_io *io);
void ftl_io_call_foreach_child(struct ftl_io *io, int (*callback)(struct ftl_io *));

View File

@ -68,7 +68,7 @@ struct ftl_reloc_move {
struct ftl_addr addr;
/* Number of logical blocks */
size_t lbk_cnt;
size_t num_blocks;
/* Data buffer */
void *data;
@ -87,7 +87,7 @@ struct ftl_band_reloc {
struct ftl_band *band;
/* Number of logical blocks to be relocated */
size_t num_lbks;
size_t num_blocks;
/* Bitmap of logical blocks to be relocated */
struct spdk_bit_array *reloc_map;
@ -170,21 +170,21 @@ ftl_reloc_iter_zone_offset(struct ftl_band_reloc *breloc)
static size_t
ftl_reloc_iter_zone_done(struct ftl_band_reloc *breloc)
{
size_t num_lbks = ftl_get_num_blocks_in_zone(breloc->parent->dev);
size_t num_blocks = ftl_get_num_blocks_in_zone(breloc->parent->dev);
return ftl_reloc_iter_zone_offset(breloc) == num_lbks;
return ftl_reloc_iter_zone_offset(breloc) == num_blocks;
}
static void
ftl_reloc_clr_lbk(struct ftl_band_reloc *breloc, size_t lbkoff)
ftl_reloc_clr_block(struct ftl_band_reloc *breloc, size_t block_off)
{
if (!spdk_bit_array_get(breloc->reloc_map, lbkoff)) {
if (!spdk_bit_array_get(breloc->reloc_map, block_off)) {
return;
}
spdk_bit_array_clear(breloc->reloc_map, lbkoff);
assert(breloc->num_lbks);
breloc->num_lbks--;
spdk_bit_array_clear(breloc->reloc_map, block_off);
assert(breloc->num_blocks);
breloc->num_blocks--;
}
static void
@ -205,8 +205,8 @@ ftl_reloc_read_lba_map(struct ftl_band_reloc *breloc, struct ftl_reloc_move *mov
struct ftl_band *band = breloc->band;
breloc->num_outstanding++;
return ftl_band_read_lba_map(band, ftl_band_lbkoff_from_addr(band, move->addr),
move->lbk_cnt, ftl_reloc_read_lba_map_cb, move);
return ftl_band_read_lba_map(band, ftl_band_block_offset_from_addr(band, move->addr),
move->num_blocks, ftl_reloc_read_lba_map_cb, move);
}
static void
@ -264,10 +264,10 @@ ftl_reloc_write_cb(struct ftl_io *io, void *arg, int status)
return;
}
for (i = 0; i < move->lbk_cnt; ++i) {
for (i = 0; i < move->num_blocks; ++i) {
addr.offset = move->addr.offset + i;
size_t lbkoff = ftl_band_lbkoff_from_addr(breloc->band, addr);
ftl_reloc_clr_lbk(breloc, lbkoff);
size_t block_off = ftl_band_block_offset_from_addr(breloc->band, addr);
ftl_reloc_clr_block(breloc, block_off);
}
ftl_reloc_free_move(breloc, move);
@ -304,7 +304,7 @@ ftl_reloc_iter_reset(struct ftl_band_reloc *breloc)
}
static size_t
ftl_reloc_iter_lbkoff(struct ftl_band_reloc *breloc)
ftl_reloc_iter_block_offset(struct ftl_band_reloc *breloc)
{
size_t zone_offset = breloc->iter.zone_current * ftl_get_num_blocks_in_zone(breloc->parent->dev);
@ -320,21 +320,21 @@ ftl_reloc_iter_next_zone(struct ftl_band_reloc *breloc)
}
static int
ftl_reloc_lbk_valid(struct ftl_band_reloc *breloc, size_t lbkoff)
ftl_reloc_block_valid(struct ftl_band_reloc *breloc, size_t block_off)
{
struct ftl_addr addr = ftl_band_addr_from_lbkoff(breloc->band, lbkoff);
struct ftl_addr addr = ftl_band_addr_from_block_offset(breloc->band, block_off);
return ftl_addr_is_written(breloc->band, addr) &&
spdk_bit_array_get(breloc->reloc_map, lbkoff) &&
ftl_band_lbkoff_valid(breloc->band, lbkoff);
spdk_bit_array_get(breloc->reloc_map, block_off) &&
ftl_band_block_offset_valid(breloc->band, block_off);
}
static int
ftl_reloc_iter_next(struct ftl_band_reloc *breloc, size_t *lbkoff)
ftl_reloc_iter_next(struct ftl_band_reloc *breloc, size_t *block_off)
{
size_t zone = breloc->iter.zone_current;
*lbkoff = ftl_reloc_iter_lbkoff(breloc);
*block_off = ftl_reloc_iter_block_offset(breloc);
if (ftl_reloc_iter_zone_done(breloc)) {
return 0;
@ -342,8 +342,8 @@ ftl_reloc_iter_next(struct ftl_band_reloc *breloc, size_t *lbkoff)
breloc->iter.zone_offset[zone]++;
if (!ftl_reloc_lbk_valid(breloc, *lbkoff)) {
ftl_reloc_clr_lbk(breloc, *lbkoff);
if (!ftl_reloc_block_valid(breloc, *block_off)) {
ftl_reloc_clr_block(breloc, *block_off);
return 0;
}
@ -351,12 +351,12 @@ ftl_reloc_iter_next(struct ftl_band_reloc *breloc, size_t *lbkoff)
}
static int
ftl_reloc_first_valid_lbk(struct ftl_band_reloc *breloc, size_t *lbkoff)
ftl_reloc_first_valid_block(struct ftl_band_reloc *breloc, size_t *block_off)
{
size_t i, num_lbks = ftl_get_num_blocks_in_zone(breloc->parent->dev);
size_t i, num_blocks = ftl_get_num_blocks_in_zone(breloc->parent->dev);
for (i = ftl_reloc_iter_zone_offset(breloc); i < num_lbks; ++i) {
if (ftl_reloc_iter_next(breloc, lbkoff)) {
for (i = ftl_reloc_iter_zone_offset(breloc); i < num_blocks; ++i) {
if (ftl_reloc_iter_next(breloc, block_off)) {
return 1;
}
}
@ -369,10 +369,10 @@ ftl_reloc_iter_done(struct ftl_band_reloc *breloc)
{
size_t i;
size_t num_zones = ftl_get_num_punits(breloc->band->dev);
size_t num_lbks = ftl_get_num_blocks_in_zone(breloc->parent->dev);
size_t num_blocks = ftl_get_num_blocks_in_zone(breloc->parent->dev);
for (i = 0; i < num_zones; ++i) {
if (breloc->iter.zone_offset[i] != num_lbks) {
if (breloc->iter.zone_offset[i] != num_blocks) {
return 0;
}
}
@ -381,49 +381,49 @@ ftl_reloc_iter_done(struct ftl_band_reloc *breloc)
}
static size_t
ftl_reloc_find_valid_lbks(struct ftl_band_reloc *breloc,
size_t num_lbk, struct ftl_addr *addr)
ftl_reloc_find_valid_blocks(struct ftl_band_reloc *breloc,
size_t _num_blocks, struct ftl_addr *addr)
{
size_t lbkoff, lbk_cnt = 0;
size_t block_off, num_blocks = 0;
if (!ftl_reloc_first_valid_lbk(breloc, &lbkoff)) {
if (!ftl_reloc_first_valid_block(breloc, &block_off)) {
return 0;
}
*addr = ftl_band_addr_from_lbkoff(breloc->band, lbkoff);
*addr = ftl_band_addr_from_block_offset(breloc->band, block_off);
for (lbk_cnt = 1; lbk_cnt < num_lbk; lbk_cnt++) {
if (!ftl_reloc_iter_next(breloc, &lbkoff)) {
for (num_blocks = 1; num_blocks < _num_blocks; num_blocks++) {
if (!ftl_reloc_iter_next(breloc, &block_off)) {
break;
}
}
return lbk_cnt;
return num_blocks;
}
static size_t
ftl_reloc_next_lbks(struct ftl_band_reloc *breloc, struct ftl_addr *addr)
ftl_reloc_next_blocks(struct ftl_band_reloc *breloc, struct ftl_addr *addr)
{
size_t i, lbk_cnt = 0;
size_t i, num_blocks = 0;
struct spdk_ftl_dev *dev = breloc->parent->dev;
for (i = 0; i < ftl_get_num_punits(dev); ++i) {
lbk_cnt = ftl_reloc_find_valid_lbks(breloc, breloc->parent->xfer_size, addr);
num_blocks = ftl_reloc_find_valid_blocks(breloc, breloc->parent->xfer_size, addr);
ftl_reloc_iter_next_zone(breloc);
if (lbk_cnt || ftl_reloc_iter_done(breloc)) {
if (num_blocks || ftl_reloc_iter_done(breloc)) {
break;
}
}
return lbk_cnt;
return num_blocks;
}
static struct ftl_io *
ftl_reloc_io_init(struct ftl_band_reloc *breloc, struct ftl_reloc_move *move,
ftl_io_fn fn, enum ftl_io_type io_type, int flags)
{
size_t lbkoff, i;
size_t block_off, i;
struct ftl_addr addr = move->addr;
struct ftl_io *io = NULL;
struct ftl_io_init_opts opts = {
@ -432,7 +432,7 @@ ftl_reloc_io_init(struct ftl_band_reloc *breloc, struct ftl_reloc_move *move,
.size = sizeof(*io),
.flags = flags | FTL_IO_INTERNAL | FTL_IO_PHYSICAL_MODE,
.type = io_type,
.lbk_cnt = move->lbk_cnt,
.num_blocks = move->num_blocks,
.data = move->data,
.cb_fn = fn,
};
@ -446,15 +446,15 @@ ftl_reloc_io_init(struct ftl_band_reloc *breloc, struct ftl_reloc_move *move,
io->addr = move->addr;
if (flags & FTL_IO_VECTOR_LBA) {
for (i = 0; i < io->lbk_cnt; ++i, ++addr.offset) {
lbkoff = ftl_band_lbkoff_from_addr(breloc->band, addr);
for (i = 0; i < io->num_blocks; ++i, ++addr.offset) {
block_off = ftl_band_block_offset_from_addr(breloc->band, addr);
if (!ftl_band_lbkoff_valid(breloc->band, lbkoff)) {
if (!ftl_band_block_offset_valid(breloc->band, block_off)) {
io->lba.vector[i] = FTL_LBA_INVALID;
continue;
}
io->lba.vector[i] = breloc->band->lba_map.map[lbkoff];
io->lba.vector[i] = breloc->band->lba_map.map[block_off];
}
}
@ -487,15 +487,15 @@ ftl_reloc_read(struct ftl_band_reloc *breloc, struct ftl_reloc_move *move)
{
struct ftl_addr addr = {};
move->lbk_cnt = ftl_reloc_next_lbks(breloc, &addr);
move->num_blocks = ftl_reloc_next_blocks(breloc, &addr);
move->breloc = breloc;
move->addr = addr;
if (!move->lbk_cnt) {
if (!move->num_blocks) {
return 0;
}
move->data = spdk_dma_malloc(FTL_BLOCK_SIZE * move->lbk_cnt, 4096, NULL);
move->data = spdk_dma_malloc(FTL_BLOCK_SIZE * move->num_blocks, 4096, NULL);
if (!move->data) {
return -1;
}
@ -566,7 +566,7 @@ ftl_reloc_release(struct ftl_band_reloc *breloc)
if (breloc->state == FTL_BAND_RELOC_STATE_HIGH_PRIO) {
/* High prio band must be relocated as a whole and ANM events will be ignored */
assert(breloc->num_lbks == 0 && ftl_band_empty(band));
assert(breloc->num_blocks == 0 && ftl_band_empty(band));
TAILQ_REMOVE(&reloc->prio_queue, breloc, entry);
band->high_prio = 0;
breloc->state = FTL_BAND_RELOC_STATE_INACTIVE;
@ -576,7 +576,7 @@ ftl_reloc_release(struct ftl_band_reloc *breloc)
breloc->state = FTL_BAND_RELOC_STATE_INACTIVE;
/* If we got ANM event during relocation put such band back to pending queue */
if (breloc->num_lbks != 0) {
if (breloc->num_blocks != 0) {
breloc->state = FTL_BAND_RELOC_STATE_PENDING;
TAILQ_INSERT_TAIL(&reloc->pending_queue, breloc, entry);
return;
@ -789,7 +789,7 @@ ftl_reloc(struct ftl_reloc *reloc)
void
ftl_reloc_add(struct ftl_reloc *reloc, struct ftl_band *band, size_t offset,
size_t num_lbks, int prio, bool is_defrag)
size_t num_blocks, int prio, bool is_defrag)
{
struct ftl_band_reloc *breloc = &reloc->brelocs[band->id];
size_t i;
@ -812,17 +812,17 @@ ftl_reloc_add(struct ftl_reloc *reloc, struct ftl_band *band, size_t offset,
}
pthread_spin_unlock(&band->lba_map.lock);
for (i = offset; i < offset + num_lbks; ++i) {
for (i = offset; i < offset + num_blocks; ++i) {
if (spdk_bit_array_get(breloc->reloc_map, i)) {
continue;
}
spdk_bit_array_set(breloc->reloc_map, i);
breloc->num_lbks++;
breloc->num_blocks++;
}
/* If the band is coming from the defrag process, mark it appropriately */
if (is_defrag) {
assert(offset == 0 && num_lbks == ftl_get_num_blocks_in_band(band->dev));
assert(offset == 0 && num_blocks == ftl_get_num_blocks_in_band(band->dev));
reloc->num_defrag_bands++;
breloc->defrag = true;
}

View File

@ -43,7 +43,7 @@ struct ftl_band;
struct ftl_reloc *ftl_reloc_init(struct spdk_ftl_dev *dev);
void ftl_reloc_free(struct ftl_reloc *reloc);
void ftl_reloc_add(struct ftl_reloc *reloc, struct ftl_band *band,
size_t offset, size_t num_lbks, int prio, bool is_defrag);
size_t offset, size_t num_blocks, int prio, bool is_defrag);
void ftl_reloc(struct ftl_reloc *reloc);
void ftl_reloc_halt(struct ftl_reloc *reloc);
void ftl_reloc_resume(struct ftl_reloc *reloc);

View File

@ -190,7 +190,7 @@ ftl_restore_init(struct spdk_ftl_dev *dev, ftl_restore_fn cb)
}
/* Allocate buffer capable of holding head mds of all bands */
restore->md_buf = spdk_dma_zmalloc(ftl_get_num_bands(dev) * ftl_head_md_num_lbks(dev) *
restore->md_buf = spdk_dma_zmalloc(ftl_get_num_bands(dev) * ftl_head_md_num_blocks(dev) *
FTL_BLOCK_SIZE, 0, NULL);
if (!restore->md_buf) {
goto error;
@ -338,7 +338,7 @@ ftl_restore_head_md(void *ctx)
rband = &restore->bands[i];
lba_map = &rband->band->lba_map;
lba_map->dma_buf = restore->md_buf + i * ftl_head_md_num_lbks(dev) * FTL_BLOCK_SIZE;
lba_map->dma_buf = restore->md_buf + i * ftl_head_md_num_blocks(dev) * FTL_BLOCK_SIZE;
if (ftl_band_read_head_md(rband->band, ftl_restore_head_cb, rband)) {
if (spdk_likely(rband->band->num_zones)) {
@ -402,7 +402,7 @@ ftl_restore_l2p(struct ftl_band *band)
ftl_invalidate_addr(dev, addr);
}
addr = ftl_band_addr_from_lbkoff(band, i);
addr = ftl_band_addr_from_block_offset(band, i);
ftl_band_set_addr(band, lba, addr);
ftl_l2p_set(dev, lba, addr);
@ -700,7 +700,7 @@ ftl_nv_cache_alloc_io(struct ftl_nv_cache_block *block, uint64_t lba)
.io = NULL,
.flags = FTL_IO_BYPASS_CACHE,
.type = FTL_IO_WRITE,
.lbk_cnt = 1,
.num_blocks = 1,
.cb_fn = ftl_nv_cache_write_cb,
.cb_ctx = block,
.data = block->buf,
@ -1129,7 +1129,7 @@ ftl_restore_init_pad_io(struct ftl_restore_band *rband, void *buffer,
.size = sizeof(struct ftl_io),
.flags = flags,
.type = FTL_IO_WRITE,
.lbk_cnt = dev->xfer_size,
.num_blocks = dev->xfer_size,
.cb_fn = ftl_pad_zone_cb,
.cb_ctx = rband,
.data = buffer,
@ -1166,12 +1166,12 @@ ftl_pad_zone_cb(struct ftl_io *io, void *arg, int status)
}
offset = io->addr.offset % ftl_get_num_blocks_in_zone(restore->dev);
if (offset + io->lbk_cnt == ftl_get_num_blocks_in_zone(restore->dev)) {
if (offset + io->num_blocks == ftl_get_num_blocks_in_zone(restore->dev)) {
zone = ftl_band_zone_from_addr(band, io->addr);
zone->info.state = SPDK_BDEV_ZONE_STATE_CLOSED;
} else {
struct ftl_addr addr = io->addr;
addr.offset += io->lbk_cnt;
addr.offset += io->num_blocks;
new_io = ftl_restore_init_pad_io(rband, io->iov[0].iov_base, addr);
if (spdk_unlikely(!new_io)) {
restore->pad_status = -ENOMEM;

View File

@ -224,7 +224,7 @@ ftl_trace_lba_io_init(struct spdk_ftl_dev *dev, const struct ftl_io *io)
}
}
spdk_trace_record(tpoint_id, io->trace, io->lbk_cnt, 0, ftl_io_get_lba(io, 0));
spdk_trace_record(tpoint_id, io->trace, io->num_blocks, 0, ftl_io_get_lba(io, 0));
}
void

View File

@ -489,13 +489,13 @@ bdev_ftl_create_cb(struct spdk_ftl_dev *dev, void *ctx, int status)
ftl_bdev->dev = dev;
ftl_bdev->bdev.product_name = "FTL disk";
ftl_bdev->bdev.write_cache = 0;
ftl_bdev->bdev.blocklen = attrs.lbk_size;
ftl_bdev->bdev.blockcnt = attrs.lbk_cnt;
ftl_bdev->bdev.blocklen = attrs.block_size;
ftl_bdev->bdev.blockcnt = attrs.num_blocks;
ftl_bdev->bdev.uuid = attrs.uuid;
SPDK_DEBUGLOG(SPDK_LOG_BDEV_FTL, "Creating bdev %s:\n", ftl_bdev->bdev.name);
SPDK_DEBUGLOG(SPDK_LOG_BDEV_FTL, "\tblock_len:\t%zu\n", attrs.lbk_size);
SPDK_DEBUGLOG(SPDK_LOG_BDEV_FTL, "\tblock_cnt:\t%"PRIu64"\n", attrs.lbk_cnt);
SPDK_DEBUGLOG(SPDK_LOG_BDEV_FTL, "\tblock_len:\t%zu\n", attrs.block_size);
SPDK_DEBUGLOG(SPDK_LOG_BDEV_FTL, "\tnum_blocks:\t%"PRIu64"\n", attrs.num_blocks);
ftl_bdev->bdev.ctxt = ftl_bdev;
ftl_bdev->bdev.fn_table = &ftl_fn_table;

View File

@ -81,7 +81,7 @@ addr_from_punit(uint64_t punit)
}
static void
test_band_lbkoff_from_addr_base(void)
test_band_block_offset_from_addr_base(void)
{
struct ftl_addr addr;
uint64_t offset, i, flat_lun = 0;
@ -91,7 +91,7 @@ test_band_lbkoff_from_addr_base(void)
addr = addr_from_punit(i);
addr.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
offset = ftl_band_lbkoff_from_addr(g_band, addr);
offset = ftl_band_block_offset_from_addr(g_band, addr);
CU_ASSERT_EQUAL(offset, flat_lun * ftl_get_num_blocks_in_zone(g_dev));
flat_lun++;
}
@ -99,7 +99,7 @@ test_band_lbkoff_from_addr_base(void)
}
static void
test_band_lbkoff_from_addr_offset(void)
test_band_block_offset_from_addr_offset(void)
{
struct ftl_addr addr;
uint64_t offset, expect, i, j;
@ -110,7 +110,7 @@ test_band_lbkoff_from_addr_offset(void)
addr = addr_from_punit(i);
addr.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev) + j;
offset = ftl_band_lbkoff_from_addr(g_band, addr);
offset = ftl_band_block_offset_from_addr(g_band, addr);
expect = test_offset_from_addr(addr, g_band);
CU_ASSERT_EQUAL(offset, expect);
@ -120,7 +120,7 @@ test_band_lbkoff_from_addr_offset(void)
}
static void
test_band_addr_from_lbkoff(void)
test_band_addr_from_block_offset(void)
{
struct ftl_addr addr, expect;
uint64_t offset, i, j;
@ -131,8 +131,8 @@ test_band_addr_from_lbkoff(void)
expect = addr_from_punit(i);
expect.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev) + j;
offset = ftl_band_lbkoff_from_addr(g_band, expect);
addr = ftl_band_addr_from_lbkoff(g_band, offset);
offset = ftl_band_block_offset_from_addr(g_band, expect);
addr = ftl_band_addr_from_block_offset(g_band, offset);
CU_ASSERT_EQUAL(addr.offset, expect.offset);
}
@ -214,7 +214,7 @@ test_next_xfer_addr(void)
struct ftl_addr addr, result, expect;
setup_band();
/* Verify simple one lbk incremention */
/* Verify simple one block incremention */
addr = addr_from_punit(0);
addr.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
expect = addr;
@ -296,12 +296,12 @@ main(int argc, char **argv)
}
if (
CU_add_test(suite, "test_band_lbkoff_from_addr_base",
test_band_lbkoff_from_addr_base) == NULL
|| CU_add_test(suite, "test_band_lbkoff_from_addr_offset",
test_band_lbkoff_from_addr_offset) == NULL
|| CU_add_test(suite, "test_band_addr_from_lbkoff",
test_band_addr_from_lbkoff) == NULL
CU_add_test(suite, "test_band_block_offset_from_addr_base",
test_band_block_offset_from_addr_base) == NULL
|| CU_add_test(suite, "test_band_block_offset_from_addr_offset",
test_band_block_offset_from_addr_offset) == NULL
|| CU_add_test(suite, "test_band_addr_from_block_offset",
test_band_addr_from_block_offset) == NULL
|| CU_add_test(suite, "test_band_set_addr",
test_band_set_addr) == NULL
|| CU_add_test(suite, "test_invalidate_addr",

View File

@ -104,7 +104,7 @@ test_md_unpack_fail(void)
/* check crc */
ftl_pack_tail_md(band);
/* flip last bit of lba_map */
*((char *)lba_map->dma_buf + ftl_tail_md_num_lbks(band->dev) * FTL_BLOCK_SIZE - 1) ^= 0x1;
*((char *)lba_map->dma_buf + ftl_tail_md_num_blocks(band->dev) * FTL_BLOCK_SIZE - 1) ^= 0x1;
CU_ASSERT_EQUAL(ftl_unpack_tail_md(band), FTL_MD_INVALID_CRC);
/* check invalid version */

View File

@ -83,31 +83,31 @@ ftl_band_acquire_lba_map(struct ftl_band *band)
}
size_t
ftl_lba_map_num_lbks(const struct spdk_ftl_dev *dev)
ftl_lba_map_num_blocks(const struct spdk_ftl_dev *dev)
{
return spdk_divide_round_up(ftl_get_num_blocks_in_band(dev) * sizeof(uint64_t), FTL_BLOCK_SIZE);
}
int
ftl_band_read_lba_map(struct ftl_band *band, size_t offset,
size_t lbk_cnt, ftl_io_fn fn, void *ctx)
size_t num_blocks, ftl_io_fn fn, void *ctx)
{
fn(ctx, ctx, 0);
return 0;
}
uint64_t
ftl_band_lbkoff_from_addr(struct ftl_band *band, struct ftl_addr addr)
ftl_band_block_offset_from_addr(struct ftl_band *band, struct ftl_addr addr)
{
return test_offset_from_addr(addr, band);
}
struct ftl_addr
ftl_band_addr_from_lbkoff(struct ftl_band *band, uint64_t lbkoff)
ftl_band_addr_from_block_offset(struct ftl_band *band, uint64_t block_off)
{
struct ftl_addr addr = {};
addr.offset = lbkoff + band->id * ftl_get_num_blocks_in_band(band->dev);
addr.offset = block_off + band->id * ftl_get_num_blocks_in_band(band->dev);
return addr;
}
@ -142,11 +142,11 @@ ftl_io_init_internal(const struct ftl_io_init_opts *opts)
io->flags = opts->flags;
io->cb_fn = opts->cb_fn;
io->cb_ctx = io;
io->lbk_cnt = opts->lbk_cnt;
io->num_blocks = opts->num_blocks;
io->iov[0].iov_base = opts->data;
if (opts->flags & FTL_IO_VECTOR_LBA) {
io->lba.vector = calloc(io->lbk_cnt, sizeof(uint64_t));
io->lba.vector = calloc(io->num_blocks, sizeof(uint64_t));
SPDK_CU_ASSERT_FATAL(io->lba.vector != NULL);
}
@ -234,13 +234,13 @@ cleanup_reloc(struct spdk_ftl_dev *dev, struct ftl_reloc *reloc)
}
static void
set_band_valid_map(struct ftl_band *band, size_t offset, size_t num_lbks)
set_band_valid_map(struct ftl_band *band, size_t offset, size_t num_blocks)
{
struct ftl_lba_map *lba_map = &band->lba_map;
size_t i;
SPDK_CU_ASSERT_FATAL(lba_map != NULL);
for (i = offset; i < offset + num_lbks; ++i) {
for (i = offset; i < offset + num_blocks; ++i) {
spdk_bit_array_set(lba_map->vld, i);
lba_map->num_vld++;
}
@ -249,7 +249,7 @@ set_band_valid_map(struct ftl_band *band, size_t offset, size_t num_lbks)
static void
test_reloc_iter_full(void)
{
size_t num_lbks, num_iters, reminder, i;
size_t num_blocks, num_iters, reminder, i;
struct spdk_ftl_dev *dev;
struct ftl_reloc *reloc;
struct ftl_band_reloc *breloc;
@ -266,30 +266,30 @@ test_reloc_iter_full(void)
ftl_reloc_add(reloc, band, 0, ftl_get_num_blocks_in_band(dev), 0, true);
CU_ASSERT_EQUAL(breloc->num_lbks, ftl_get_num_blocks_in_band(dev));
CU_ASSERT_EQUAL(breloc->num_blocks, ftl_get_num_blocks_in_band(dev));
num_iters = ftl_get_num_punits(dev) *
(ftl_get_num_blocks_in_zone(dev) / reloc->xfer_size);
for (i = 0; i < num_iters; i++) {
num_lbks = ftl_reloc_next_lbks(breloc, &addr);
CU_ASSERT_EQUAL(num_lbks, reloc->xfer_size);
num_blocks = ftl_reloc_next_blocks(breloc, &addr);
CU_ASSERT_EQUAL(num_blocks, reloc->xfer_size);
}
num_iters = ftl_get_num_punits(dev);
/* ftl_reloc_next_lbks is searching for maximum xfer_size */
/* ftl_reloc_next_blocks is searching for maximum xfer_size */
/* contiguous valid logic blocks in zone, so we can end up */
/* with some reminder if number of logical blocks in zone */
/* is not divisible by xfer_size */
reminder = ftl_get_num_blocks_in_zone(dev) % reloc->xfer_size;
for (i = 0; i < num_iters; i++) {
num_lbks = ftl_reloc_next_lbks(breloc, &addr);
CU_ASSERT_EQUAL(reminder, num_lbks);
num_blocks = ftl_reloc_next_blocks(breloc, &addr);
CU_ASSERT_EQUAL(reminder, num_blocks);
}
/* num_lbks should remain intact since all the blocks are valid */
CU_ASSERT_EQUAL(breloc->num_lbks, ftl_get_num_blocks_in_band(dev));
/* num_blocks should remain intact since all the blocks are valid */
CU_ASSERT_EQUAL(breloc->num_blocks, ftl_get_num_blocks_in_band(dev));
breloc->state = FTL_BAND_RELOC_STATE_INACTIVE;
cleanup_reloc(dev, reloc);
@ -310,7 +310,7 @@ test_reloc_empty_band(void)
ftl_reloc_add(reloc, band, 0, ftl_get_num_blocks_in_band(dev), 0, true);
CU_ASSERT_EQUAL(breloc->num_lbks, 0);
CU_ASSERT_EQUAL(breloc->num_blocks, 0);
cleanup_reloc(dev, reloc);
}
@ -322,7 +322,7 @@ test_reloc_full_band(void)
struct ftl_reloc *reloc;
struct ftl_band_reloc *breloc;
struct ftl_band *band;
size_t num_moves, num_iters, num_lbk, i;
size_t num_moves, num_iters, num_block, i;
setup_reloc(&dev, &reloc, &g_geo);
@ -335,24 +335,24 @@ test_reloc_full_band(void)
ftl_reloc_add(reloc, band, 0, ftl_get_num_blocks_in_band(dev), 0, true);
CU_ASSERT_EQUAL(breloc->num_lbks, ftl_get_num_blocks_in_band(dev));
CU_ASSERT_EQUAL(breloc->num_blocks, ftl_get_num_blocks_in_band(dev));
ftl_reloc_prep(breloc);
add_to_active_queue(reloc, breloc);
for (i = 1; i <= num_iters; ++i) {
single_reloc_move(breloc);
num_lbk = ftl_get_num_blocks_in_band(dev) - (i * num_moves);
CU_ASSERT_EQUAL(breloc->num_lbks, num_lbk);
num_block = ftl_get_num_blocks_in_band(dev) - (i * num_moves);
CU_ASSERT_EQUAL(breloc->num_blocks, num_block);
}
/* Process reminder lbks */
/* Process reminder blocks */
single_reloc_move(breloc);
/* Drain move queue */
ftl_reloc_process_moves(breloc);
CU_ASSERT_EQUAL(breloc->num_lbks, 0);
CU_ASSERT_EQUAL(breloc->num_blocks, 0);
CU_ASSERT_TRUE(ftl_reloc_done(breloc));
ftl_reloc_release(breloc);
@ -384,14 +384,14 @@ test_reloc_scatter_band(void)
ftl_reloc_prep(breloc);
add_to_active_queue(reloc, breloc);
CU_ASSERT_EQUAL(breloc->num_lbks, ftl_get_num_blocks_in_band(dev));
CU_ASSERT_EQUAL(breloc->num_blocks, ftl_get_num_blocks_in_band(dev));
for (i = 0; i < num_iters ; ++i) {
single_reloc_move(breloc);
}
ftl_process_reloc(breloc);
CU_ASSERT_EQUAL(breloc->num_lbks, 0);
CU_ASSERT_EQUAL(breloc->num_blocks, 0);
CU_ASSERT_TRUE(ftl_reloc_done(breloc));
cleanup_reloc(dev, reloc);
@ -404,7 +404,7 @@ test_reloc_zone(void)
struct ftl_reloc *reloc;
struct ftl_band_reloc *breloc;
struct ftl_band *band;
size_t num_io, num_iters, num_lbk, i;
size_t num_io, num_iters, num_block, i;
setup_reloc(&dev, &reloc, &g_geo);
@ -422,21 +422,21 @@ test_reloc_zone(void)
ftl_get_num_blocks_in_zone(dev), 1, false);
add_to_active_queue(reloc, breloc);
CU_ASSERT_EQUAL(breloc->num_lbks, ftl_get_num_blocks_in_zone(dev));
CU_ASSERT_EQUAL(breloc->num_blocks, ftl_get_num_blocks_in_zone(dev));
for (i = 1; i <= num_iters ; ++i) {
single_reloc_move(breloc);
num_lbk = ftl_get_num_blocks_in_zone(dev) - (i * num_io);
num_block = ftl_get_num_blocks_in_zone(dev) - (i * num_io);
CU_ASSERT_EQUAL(breloc->num_lbks, num_lbk);
CU_ASSERT_EQUAL(breloc->num_blocks, num_block);
}
/* In case num_lbks_in_zone % num_io != 0 one extra iteration is needed */
/* In case num_blocks_in_zone % num_io != 0 one extra iteration is needed */
single_reloc_move(breloc);
/* Drain move queue */
ftl_reloc_process_moves(breloc);
CU_ASSERT_EQUAL(breloc->num_lbks, 0);
CU_ASSERT_EQUAL(breloc->num_blocks, 0);
CU_ASSERT_TRUE(ftl_reloc_done(breloc));
ftl_reloc_release(breloc);
@ -444,7 +444,7 @@ test_reloc_zone(void)
}
static void
test_reloc_single_lbk(void)
test_reloc_single_block(void)
{
struct spdk_ftl_dev *dev;
struct ftl_reloc *reloc;
@ -464,13 +464,13 @@ test_reloc_single_lbk(void)
ftl_reloc_prep(breloc);
add_to_active_queue(reloc, breloc);
CU_ASSERT_EQUAL(breloc->num_lbks, 1);
CU_ASSERT_EQUAL(breloc->num_blocks, 1);
single_reloc_move(breloc);
/* Drain move queue */
ftl_reloc_process_moves(breloc);
CU_ASSERT_EQUAL(breloc->num_lbks, 0);
CU_ASSERT_EQUAL(breloc->num_blocks, 0);
CU_ASSERT_TRUE(ftl_reloc_done(breloc));
ftl_reloc_release(breloc);
@ -504,8 +504,8 @@ main(int argc, char **argv)
test_reloc_scatter_band) == NULL
|| CU_add_test(suite, "test_reloc_zone",
test_reloc_zone) == NULL
|| CU_add_test(suite, "test_reloc_single_lbk",
test_reloc_single_lbk) == NULL
|| CU_add_test(suite, "test_reloc_single_block",
test_reloc_single_block) == NULL
) {
CU_cleanup_registry();
return CU_get_error();

View File

@ -57,7 +57,7 @@ DEFINE_STUB_V(ftl_io_fail, (struct ftl_io *io, int status));
DEFINE_STUB_V(ftl_trace_completion, (struct spdk_ftl_dev *dev, const struct ftl_io *io,
enum ftl_trace_completion completion));
DEFINE_STUB_V(ftl_reloc_add, (struct ftl_reloc *reloc, struct ftl_band *band, size_t offset,
size_t num_lbks, int prio, bool defrag));
size_t num_blocks, int prio, bool defrag));
DEFINE_STUB_V(ftl_trace_write_band, (struct spdk_ftl_dev *dev, const struct ftl_band *band));
DEFINE_STUB_V(ftl_trace_submission, (struct spdk_ftl_dev *dev, const struct ftl_io *io,
struct ftl_addr addr, size_t addr_cnt));
@ -74,7 +74,7 @@ DEFINE_STUB(spdk_bdev_zone_management, int, (struct spdk_bdev_desc *desc,
DEFINE_STUB(spdk_bdev_io_get_append_location, uint64_t, (struct spdk_bdev_io *bdev_io), 0);
struct ftl_io *
ftl_io_erase_init(struct ftl_band *band, size_t lbk_cnt, ftl_io_fn cb)
ftl_io_erase_init(struct ftl_band *band, size_t num_blocks, ftl_io_fn cb)
{
struct ftl_io *io;
@ -84,15 +84,15 @@ ftl_io_erase_init(struct ftl_band *band, size_t lbk_cnt, ftl_io_fn cb)
io->dev = band->dev;
io->band = band;
io->cb_fn = cb;
io->lbk_cnt = 1;
io->num_blocks = 1;
return io;
}
void
ftl_io_advance(struct ftl_io *io, size_t lbk_cnt)
ftl_io_advance(struct ftl_io *io, size_t num_blocks)
{
io->pos += lbk_cnt;
io->pos += num_blocks;
}
void
@ -139,7 +139,7 @@ test_wptr(void)
struct ftl_band *band;
struct ftl_io io = { 0 };
size_t xfer_size;
size_t zone, lbk, offset, i;
size_t zone, block, offset, i;
int rc;
setup_wptr_test(&dev, &g_geo);
@ -154,7 +154,7 @@ test_wptr(void)
io.band = band;
io.dev = dev;
for (lbk = 0, offset = 0; lbk < ftl_get_num_blocks_in_zone(dev) / xfer_size; ++lbk) {
for (block = 0, offset = 0; block < ftl_get_num_blocks_in_zone(dev) / xfer_size; ++block) {
for (zone = 0; zone < band->num_zones; ++zone) {
CU_ASSERT_EQUAL(wptr->offset, offset);
ftl_wptr_advance(wptr, xfer_size);