lib/ftl: Remove ftl_md struct from band

Keep lba map related fields in separate struct directly
in the band. Cleanup interfaces depended on ftl_md.

Lba map structure will be extended in next commit.

Change-Id: I1cfc2f2ff0c0e90bb63f39808780845673002e70
Signed-off-by: Wojciech Malikowski <wojciech.malikowski@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/453370
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Konrad Sztyber <konrad.sztyber@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Wojciech Malikowski 2019-04-26 10:53:13 -04:00 committed by Darek Stojaczyk
parent d87493378d
commit 95ace66f8d
17 changed files with 262 additions and 277 deletions

View File

@ -149,34 +149,36 @@ ftl_band_write_failed(struct ftl_band *band)
}
void
ftl_band_clear_md(struct ftl_band *band)
ftl_band_clear_lba_map(struct ftl_band *band)
{
spdk_bit_array_clear_mask(band->md.vld_map);
memset(band->md.lba_map, 0, ftl_num_band_lbks(band->dev) * sizeof(uint64_t));
band->md.num_vld = 0;
struct ftl_lba_map *lba_map = &band->lba_map;
spdk_bit_array_clear_mask(lba_map->vld);
memset(lba_map->map, 0, ftl_num_band_lbks(band->dev) * sizeof(uint64_t));
lba_map->num_vld = 0;
}
static void
ftl_band_free_md(struct ftl_band *band)
ftl_band_free_lba_map(struct ftl_band *band)
{
struct spdk_ftl_dev *dev = band->dev;
struct ftl_md *md = &band->md;
struct ftl_lba_map *lba_map = &band->lba_map;
assert(band->state == FTL_BAND_STATE_CLOSED ||
band->state == FTL_BAND_STATE_FREE);
assert(md->ref_cnt == 0);
assert(md->lba_map != NULL);
assert(lba_map->ref_cnt == 0);
assert(lba_map->map != NULL);
assert(!band->high_prio);
/* Verify that band's metadata is consistent with l2p */
if (band->num_chunks) {
assert(ftl_band_validate_md(band, band->md.lba_map) == true);
assert(ftl_band_validate_md(band) == true);
}
spdk_mempool_put(dev->lba_pool, md->lba_map);
spdk_dma_free(md->dma_buf);
md->lba_map = NULL;
md->dma_buf = NULL;
spdk_mempool_put(dev->lba_pool, lba_map->map);
spdk_dma_free(lba_map->dma_buf);
lba_map->map = NULL;
lba_map->dma_buf = NULL;
}
static void
@ -194,7 +196,7 @@ _ftl_band_set_free(struct ftl_band *band)
/* Keep the list sorted by band's write count */
LIST_FOREACH(lband, &dev->free_bands, list_entry) {
if (lband->md.wr_cnt > band->md.wr_cnt) {
if (lband->wr_cnt > band->wr_cnt) {
LIST_INSERT_BEFORE(lband, band, list_entry);
break;
}
@ -215,7 +217,7 @@ _ftl_band_set_free(struct ftl_band *band)
if (!prev) {
continue;
}
assert(prev->md.wr_cnt <= lband->md.wr_cnt);
assert(prev->wr_cnt <= lband->wr_cnt);
}
#endif
dev->num_free++;
@ -226,12 +228,11 @@ static void
_ftl_band_set_preparing(struct ftl_band *band)
{
struct spdk_ftl_dev *dev = band->dev;
struct ftl_md *md = &band->md;
/* Remove band from free list */
LIST_REMOVE(band, list_entry);
md->wr_cnt++;
band->wr_cnt++;
assert(dev->num_free > 0);
dev->num_free--;
@ -248,8 +249,8 @@ _ftl_band_set_closed(struct ftl_band *band)
/* Set the state as free_md() checks for that */
band->state = FTL_BAND_STATE_CLOSED;
/* Free the md if there are no outstanding IOs */
ftl_band_release_md(band);
/* Free the lba map if there are no outstanding IOs */
ftl_band_release_lba_map(band);
if (spdk_likely(band->num_chunks)) {
LIST_INSERT_HEAD(&dev->shut_bands, band, list_entry);
@ -273,49 +274,51 @@ ftl_md_calc_crc(const struct ftl_md_hdr *hdr, size_t size)
}
static void
ftl_set_md_hdr(struct spdk_ftl_dev *dev, struct ftl_md_hdr *hdr,
struct ftl_md *md, size_t size)
ftl_set_md_hdr(struct ftl_band *band, struct ftl_md_hdr *hdr, size_t size)
{
hdr->seq = md->seq;
hdr->seq = band->seq;
hdr->ver = FTL_MD_VER;
hdr->uuid = dev->uuid;
hdr->uuid = band->dev->uuid;
hdr->checksum = ftl_md_calc_crc(hdr, size);
}
static int
ftl_pack_head_md(struct spdk_ftl_dev *dev, struct ftl_md *md, void *data)
ftl_pack_head_md(struct ftl_band *band)
{
struct ftl_head_md *head = data;
struct spdk_ftl_dev *dev = band->dev;
struct ftl_head_md *head = band->lba_map.dma_buf;
head->wr_cnt = md->wr_cnt;
head->wr_cnt = band->wr_cnt;
head->lba_cnt = dev->num_lbas;
head->xfer_size = dev->xfer_size;
ftl_set_md_hdr(dev, &head->hdr, md, sizeof(struct ftl_head_md));
ftl_set_md_hdr(band, &head->hdr, sizeof(struct ftl_head_md));
return FTL_MD_SUCCESS;
}
static int
ftl_pack_tail_md(struct spdk_ftl_dev *dev, struct ftl_md *md, void *data)
ftl_pack_tail_md(struct ftl_band *band)
{
struct ftl_tail_md *tail = data;
struct spdk_ftl_dev *dev = band->dev;
struct ftl_lba_map *lba_map = &band->lba_map;
struct ftl_tail_md *tail = lba_map->dma_buf;
size_t map_size;
void *vld_offset, *map_offset;
map_size = ftl_num_band_lbks(dev) * sizeof(uint64_t);
vld_offset = (char *)data + ftl_tail_md_hdr_num_lbks() * FTL_BLOCK_SIZE;
vld_offset = (char *)tail + ftl_tail_md_hdr_num_lbks() * FTL_BLOCK_SIZE;
map_offset = (char *)vld_offset + ftl_vld_map_num_lbks(dev) * FTL_BLOCK_SIZE;
/* Clear out the buffer */
memset(data, 0, ftl_tail_md_num_lbks(dev) * FTL_BLOCK_SIZE);
memset(tail, 0, ftl_tail_md_num_lbks(dev) * FTL_BLOCK_SIZE);
tail->num_lbks = ftl_num_band_lbks(dev);
pthread_spin_lock(&md->lock);
spdk_bit_array_store_mask(md->vld_map, vld_offset);
pthread_spin_unlock(&md->lock);
pthread_spin_lock(&lba_map->lock);
spdk_bit_array_store_mask(lba_map->vld, vld_offset);
pthread_spin_unlock(&lba_map->lock);
memcpy(map_offset, md->lba_map, map_size);
ftl_set_md_hdr(dev, &tail->hdr, md, ftl_tail_md_num_lbks(dev) * FTL_BLOCK_SIZE);
memcpy(map_offset, lba_map->map, map_size);
ftl_set_md_hdr(band, &tail->hdr, ftl_tail_md_num_lbks(dev) * FTL_BLOCK_SIZE);
return FTL_MD_SUCCESS;
}
@ -339,15 +342,17 @@ ftl_md_hdr_vld(struct spdk_ftl_dev *dev, const struct ftl_md_hdr *hdr, size_t si
}
static int
ftl_unpack_tail_md(struct spdk_ftl_dev *dev, struct ftl_md *md, void *data)
ftl_unpack_tail_md(struct ftl_band *band)
{
struct ftl_tail_md *tail = data;
struct spdk_ftl_dev *dev = band->dev;
size_t map_size;
void *vld_offset, *map_offset;
struct ftl_lba_map *lba_map = &band->lba_map;
struct ftl_tail_md *tail = lba_map->dma_buf;
int rc;
map_size = ftl_num_band_lbks(dev) * sizeof(uint64_t);
vld_offset = (char *)data + ftl_tail_md_hdr_num_lbks() * FTL_BLOCK_SIZE;
vld_offset = (char *)tail + ftl_tail_md_hdr_num_lbks() * FTL_BLOCK_SIZE;
map_offset = (char *)vld_offset + ftl_vld_map_num_lbks(dev) * FTL_BLOCK_SIZE;
rc = ftl_md_hdr_vld(dev, &tail->hdr, ftl_tail_md_num_lbks(dev) * FTL_BLOCK_SIZE);
@ -359,22 +364,23 @@ ftl_unpack_tail_md(struct spdk_ftl_dev *dev, struct ftl_md *md, void *data)
return FTL_MD_INVALID_SIZE;
}
if (md->vld_map) {
spdk_bit_array_load_mask(md->vld_map, vld_offset);
if (lba_map->vld) {
spdk_bit_array_load_mask(lba_map->vld, vld_offset);
}
if (md->lba_map) {
memcpy(md->lba_map, map_offset, map_size);
if (lba_map->map) {
memcpy(lba_map->map, map_offset, map_size);
}
md->seq = tail->hdr.seq;
band->seq = tail->hdr.seq;
return FTL_MD_SUCCESS;
}
static int
ftl_unpack_head_md(struct spdk_ftl_dev *dev, struct ftl_md *md, void *data)
ftl_unpack_head_md(struct ftl_band *band)
{
struct ftl_head_md *head = data;
struct spdk_ftl_dev *dev = band->dev;
struct ftl_head_md *head = band->lba_map.dma_buf;
int rc;
rc = ftl_md_hdr_vld(dev, &head->hdr, sizeof(struct ftl_head_md));
@ -382,8 +388,8 @@ ftl_unpack_head_md(struct spdk_ftl_dev *dev, struct ftl_md *md, void *data)
return rc;
}
md->seq = head->hdr.seq;
md->wr_cnt = head->wr_cnt;
band->seq = head->hdr.seq;
band->wr_cnt = head->wr_cnt;
if (dev->global_md.num_lbas == 0) {
dev->global_md.num_lbas = head->lba_cnt;
@ -476,25 +482,25 @@ ftl_band_set_state(struct ftl_band *band, enum ftl_band_state state)
void
ftl_band_set_addr(struct ftl_band *band, uint64_t lba, struct ftl_ppa ppa)
{
struct ftl_md *md = &band->md;
struct ftl_lba_map *lba_map = &band->lba_map;
uint64_t offset;
assert(lba != FTL_LBA_INVALID);
offset = ftl_band_lbkoff_from_ppa(band, ppa);
pthread_spin_lock(&band->md.lock);
pthread_spin_lock(&lba_map->lock);
md->num_vld++;
md->lba_map[offset] = lba;
spdk_bit_array_set(md->vld_map, offset);
lba_map->num_vld++;
lba_map->map[offset] = lba;
spdk_bit_array_set(lba_map->vld, offset);
pthread_spin_unlock(&band->md.lock);
pthread_spin_unlock(&lba_map->lock);
}
size_t
ftl_band_age(const struct ftl_band *band)
{
return (size_t)(band->dev->seq - band->md.seq);
return (size_t)(band->dev->seq - band->seq);
}
size_t
@ -670,48 +676,48 @@ ftl_band_next_ppa(struct ftl_band *band, struct ftl_ppa ppa, size_t offset)
}
void
ftl_band_acquire_md(struct ftl_band *band)
ftl_band_acquire_lba_map(struct ftl_band *band)
{
assert(band->md.lba_map != NULL);
band->md.ref_cnt++;
assert(band->lba_map.map != NULL);
band->lba_map.ref_cnt++;
}
int
ftl_band_alloc_md(struct ftl_band *band)
ftl_band_alloc_lba_map(struct ftl_band *band)
{
struct spdk_ftl_dev *dev = band->dev;
struct ftl_md *md = &band->md;
struct ftl_lba_map *lba_map = &band->lba_map;
assert(md->ref_cnt == 0);
assert(md->lba_map == NULL);
assert(lba_map->ref_cnt == 0);
assert(lba_map->map == NULL);
md->lba_map = spdk_mempool_get(dev->lba_pool);
if (!md->lba_map) {
lba_map->map = spdk_mempool_get(dev->lba_pool);
if (!lba_map->map) {
return -1;
}
md->dma_buf = spdk_dma_zmalloc(ftl_tail_md_num_lbks(dev) * FTL_BLOCK_SIZE,
lba_map->dma_buf = spdk_dma_zmalloc(ftl_tail_md_num_lbks(dev) * FTL_BLOCK_SIZE,
FTL_BLOCK_SIZE, NULL);
if (!md->dma_buf) {
spdk_mempool_put(dev->lba_pool, md->lba_map);
if (!lba_map->dma_buf) {
spdk_mempool_put(dev->lba_pool, lba_map->map);
return -1;
}
ftl_band_acquire_md(band);
ftl_band_acquire_lba_map(band);
return 0;
}
void
ftl_band_release_md(struct ftl_band *band)
ftl_band_release_lba_map(struct ftl_band *band)
{
struct ftl_md *md = &band->md;
struct ftl_lba_map *lba_map = &band->lba_map;
assert(band->md.lba_map != NULL);
assert(md->ref_cnt > 0);
md->ref_cnt--;
assert(lba_map->map != NULL);
assert(lba_map->ref_cnt > 0);
lba_map->ref_cnt--;
if (md->ref_cnt == 0) {
ftl_band_free_md(band);
if (lba_map->ref_cnt == 0) {
ftl_band_free_lba_map(band);
}
}
@ -721,9 +727,7 @@ ftl_read_md_cb(void *arg, int status)
struct ftl_md_io *md_io = arg;
if (!status) {
status = md_io->pack_fn(md_io->io.dev,
md_io->md,
md_io->buf);
status = md_io->pack_fn(md_io->io.band);
} else {
status = FTL_MD_IO_FAILURE;
}
@ -732,7 +736,7 @@ ftl_read_md_cb(void *arg, int status)
}
static struct ftl_md_io *
ftl_io_init_md_read(struct spdk_ftl_dev *dev, struct ftl_md *md, void *data, struct ftl_ppa ppa,
ftl_io_init_md_read(struct spdk_ftl_dev *dev, struct ftl_ppa ppa,
struct ftl_band *band, size_t lbk_cnt, spdk_ftl_fn fn,
ftl_md_pack_fn pack_fn, struct ftl_cb cb)
{
@ -747,7 +751,7 @@ ftl_io_init_md_read(struct spdk_ftl_dev *dev, struct ftl_md *md, void *data, str
.type = FTL_IO_READ,
.lbk_cnt = lbk_cnt,
.fn = fn,
.data = data,
.data = band->lba_map.dma_buf,
};
io = (struct ftl_md_io *)ftl_io_init_internal(&opts);
@ -756,8 +760,6 @@ ftl_io_init_md_read(struct spdk_ftl_dev *dev, struct ftl_md *md, void *data, str
}
io->io.ppa = ppa;
io->md = md;
io->buf = data;
io->pack_fn = pack_fn;
io->cb = cb;
@ -792,24 +794,24 @@ ftl_band_write_md(struct ftl_band *band, size_t lbk_cnt,
struct spdk_ftl_dev *dev = band->dev;
struct ftl_io *io;
io = ftl_io_init_md_write(dev, band, band->md.dma_buf, lbk_cnt, cb);
io = ftl_io_init_md_write(dev, band, band->lba_map.dma_buf, lbk_cnt, cb);
if (!io) {
return -ENOMEM;
}
md_fn(dev, &band->md, band->md.dma_buf);
md_fn(band);
ftl_io_write(io);
return 0;
}
void
ftl_band_md_clear(struct ftl_md *md)
ftl_band_md_clear(struct ftl_band *band)
{
md->seq = 0;
md->num_vld = 0;
md->wr_cnt = 0;
md->lba_map = NULL;
band->seq = 0;
band->wr_cnt = 0;
band->lba_map.num_vld = 0;
band->lba_map.map = NULL;
}
int
@ -836,9 +838,8 @@ ftl_band_lba_map_ppa(struct ftl_band *band, size_t offset)
}
static int
ftl_band_read_md(struct ftl_band *band, struct ftl_md *md, void *data,
size_t lbk_cnt, struct ftl_ppa start_ppa, spdk_ftl_fn fn,
ftl_md_pack_fn pack_fn, const struct ftl_cb cb)
ftl_band_read_md(struct ftl_band *band, size_t lbk_cnt, struct ftl_ppa start_ppa,
spdk_ftl_fn fn, ftl_md_pack_fn pack_fn, struct ftl_cb cb)
{
struct spdk_ftl_dev *dev = band->dev;
struct ftl_md_io *io;
@ -847,7 +848,7 @@ ftl_band_read_md(struct ftl_band *band, struct ftl_md *md, void *data,
return -ENOENT;
}
io = ftl_io_init_md_read(dev, md, data, start_ppa, band, lbk_cnt, fn, pack_fn, cb);
io = ftl_io_init_md_read(dev, start_ppa, band, lbk_cnt, fn, pack_fn, cb);
if (!io) {
return -ENOMEM;
}
@ -857,15 +858,10 @@ ftl_band_read_md(struct ftl_band *band, struct ftl_md *md, void *data,
}
int
ftl_band_read_tail_md(struct ftl_band *band, struct ftl_md *md,
void *data, struct ftl_ppa ppa, struct ftl_cb cb)
ftl_band_read_tail_md(struct ftl_band *band, struct ftl_ppa ppa, struct ftl_cb cb)
{
return ftl_band_read_md(band, md, data,
ftl_tail_md_num_lbks(band->dev),
ppa,
ftl_read_md_cb,
ftl_unpack_tail_md,
cb);
return ftl_band_read_md(band, ftl_tail_md_num_lbks(band->dev), ppa,
ftl_read_md_cb, ftl_unpack_tail_md, cb);
}
static size_t
@ -885,14 +881,14 @@ ftl_read_lba_map_cb(void *arg, int status)
{
struct ftl_md_io *md_io = arg;
struct ftl_io *io = &md_io->io;
struct ftl_md *md = md_io->md;
struct ftl_lba_map *lba_map = &io->band->lba_map;
uint64_t offset;
offset = ftl_lba_map_offset_from_ppa(io->band, io->ppa);
assert(offset + io->lbk_cnt <= ftl_lba_map_num_lbks(io->dev));
if (!status) {
memcpy((char *)md->lba_map + offset * FTL_BLOCK_SIZE, md->dma_buf,
memcpy((char *)lba_map->map + offset * FTL_BLOCK_SIZE, lba_map->dma_buf,
io->lbk_cnt * FTL_BLOCK_SIZE);
}
@ -900,7 +896,7 @@ ftl_read_lba_map_cb(void *arg, int status)
}
int
ftl_band_read_lba_map(struct ftl_band *band, struct ftl_md *md, size_t offset, size_t lba_cnt,
ftl_band_read_lba_map(struct ftl_band *band, size_t offset, size_t lba_cnt,
struct ftl_cb cb)
{
size_t lbk_cnt, lbk_off;
@ -910,16 +906,14 @@ ftl_band_read_lba_map(struct ftl_band *band, struct ftl_md *md, size_t offset, s
assert(lbk_off + lbk_cnt <= ftl_lba_map_num_lbks(band->dev));
return ftl_band_read_md(band, md, md->dma_buf,
lbk_cnt, ftl_band_lba_map_ppa(band, lbk_off),
return ftl_band_read_md(band, lbk_cnt, ftl_band_lba_map_ppa(band, lbk_off),
ftl_read_lba_map_cb, NULL, cb);
}
int
ftl_band_read_head_md(struct ftl_band *band, struct ftl_md *md,
void *data, struct ftl_cb cb)
ftl_band_read_head_md(struct ftl_band *band, struct ftl_cb cb)
{
return ftl_band_read_md(band, md, data,
return ftl_band_read_md(band,
ftl_head_md_num_lbks(band->dev),
ftl_band_head_md_ppa(band),
ftl_read_md_cb,
@ -1002,11 +996,11 @@ ftl_band_write_prep(struct ftl_band *band)
{
struct spdk_ftl_dev *dev = band->dev;
if (ftl_band_alloc_md(band)) {
if (ftl_band_alloc_lba_map(band)) {
return -1;
}
band->md.seq = ++dev->seq;
band->seq = ++dev->seq;
return 0;
}

View File

@ -84,13 +84,7 @@ enum ftl_md_status {
FTL_MD_INVALID_SIZE
};
struct ftl_md {
/* Sequence number */
uint64_t seq;
/* Number of defrag cycles */
uint64_t wr_cnt;
struct ftl_lba_map {
/* LBA/vld map lock */
pthread_spinlock_t lock;
@ -101,10 +95,10 @@ struct ftl_md {
size_t ref_cnt;
/* Bitmap of valid LBAs */
struct spdk_bit_array *vld_map;
struct spdk_bit_array *vld;
/* LBA map (only valid for open/relocating bands) */
uint64_t *lba_map;
uint64_t *map;
/* Metadata DMA buffer (only valid for open/relocating bands) */
void *dma_buf;
@ -134,8 +128,8 @@ struct ftl_band {
/* List of operational chunks */
CIRCLEQ_HEAD(, ftl_chunk) chunks;
/* Band's metadata */
struct ftl_md md;
/* LBA map */
struct ftl_lba_map lba_map;
/* Band's state */
enum ftl_band_state state;
@ -150,6 +144,12 @@ struct ftl_band {
/* the band should be defragged immediately */
int high_prio;
/* Sequence number */
uint64_t seq;
/* Number of defrag cycles */
uint64_t wr_cnt;
/* End metadata start ppa */
struct ftl_ppa tail_md_ppa;
@ -164,9 +164,13 @@ uint64_t ftl_band_lbkoff_from_ppa(struct ftl_band *band, struct ftl_ppa ppa);
struct ftl_ppa ftl_band_ppa_from_lbkoff(struct ftl_band *band, uint64_t lbkoff);
void ftl_band_set_state(struct ftl_band *band, enum ftl_band_state state);
size_t ftl_band_age(const struct ftl_band *band);
void ftl_band_acquire_md(struct ftl_band *band);
int ftl_band_alloc_md(struct ftl_band *band);
void ftl_band_release_md(struct ftl_band *band);
void ftl_band_acquire_lba_map(struct ftl_band *band);
int ftl_band_alloc_lba_map(struct ftl_band *band);
void ftl_band_clear_lba_map(struct ftl_band *band);
void ftl_band_release_lba_map(struct ftl_band *band);
int ftl_band_read_lba_map(struct ftl_band *band,
size_t offset, size_t lba_cnt,
struct ftl_cb cb);
struct ftl_ppa ftl_band_next_xfer_ppa(struct ftl_band *band, struct ftl_ppa ppa,
size_t num_lbks);
struct ftl_ppa ftl_band_next_ppa(struct ftl_band *band, struct ftl_ppa ppa,
@ -178,19 +182,15 @@ void ftl_band_set_addr(struct ftl_band *band, uint64_t lba,
struct ftl_ppa ppa);
struct ftl_band *ftl_band_from_ppa(struct spdk_ftl_dev *dev, struct ftl_ppa ppa);
struct ftl_chunk *ftl_band_chunk_from_ppa(struct ftl_band *band, struct ftl_ppa);
void ftl_band_md_clear(struct ftl_md *md);
int ftl_band_read_tail_md(struct ftl_band *band, struct ftl_md *md,
void *data, struct ftl_ppa, struct ftl_cb cb);
int ftl_band_read_head_md(struct ftl_band *band, struct ftl_md *md,
void *data, struct ftl_cb cb);
int ftl_band_read_lba_map(struct ftl_band *band, struct ftl_md *md,
size_t offset, size_t lba_cnt, struct ftl_cb cb);
void ftl_band_md_clear(struct ftl_band *band);
int ftl_band_read_tail_md(struct ftl_band *band, struct ftl_ppa,
struct ftl_cb cb);
int ftl_band_read_head_md(struct ftl_band *band, struct ftl_cb cb);
int ftl_band_write_tail_md(struct ftl_band *band, spdk_ftl_fn cb);
int ftl_band_write_head_md(struct ftl_band *band, spdk_ftl_fn cb);
struct ftl_ppa ftl_band_tail_md_ppa(struct ftl_band *band);
struct ftl_ppa ftl_band_head_md_ppa(struct ftl_band *band);
void ftl_band_write_failed(struct ftl_band *band);
void ftl_band_clear_md(struct ftl_band *band);
int ftl_band_full(struct ftl_band *band, size_t offset);
int ftl_band_erase(struct ftl_band *band);
int ftl_band_write_prep(struct ftl_band *band);
@ -200,7 +200,7 @@ struct ftl_chunk *ftl_band_next_operational_chunk(struct ftl_band *band,
static inline int
ftl_band_empty(const struct ftl_band *band)
{
return band->md.num_vld == 0;
return band->lba_map.num_vld == 0;
}
static inline struct ftl_chunk *
@ -226,15 +226,15 @@ ftl_band_state_changing(struct ftl_band *band)
static inline int
ftl_band_lbkoff_valid(struct ftl_band *band, size_t lbkoff)
{
struct ftl_md *md = &band->md;
struct ftl_lba_map *lba_map = &band->lba_map;
pthread_spin_lock(&md->lock);
if (spdk_bit_array_get(md->vld_map, lbkoff)) {
pthread_spin_unlock(&md->lock);
pthread_spin_lock(&lba_map->lock);
if (spdk_bit_array_get(lba_map->vld, lbkoff)) {
pthread_spin_unlock(&lba_map->lock);
return 1;
}
pthread_spin_unlock(&md->lock);
pthread_spin_unlock(&lba_map->lock);
return 0;
}

View File

@ -269,9 +269,9 @@ ftl_wptr_open_band(struct ftl_wptr *wptr)
struct ftl_band *band = wptr->band;
assert(ftl_band_chunk_is_first(band, wptr->chunk));
assert(band->md.num_vld == 0);
assert(band->lba_map.num_vld == 0);
ftl_band_clear_md(band);
ftl_band_clear_lba_map(band);
assert(band->state == FTL_BAND_STATE_PREP);
ftl_band_set_state(band, FTL_BAND_STATE_OPENING);
@ -675,17 +675,17 @@ static int
ftl_invalidate_addr_unlocked(struct spdk_ftl_dev *dev, struct ftl_ppa ppa)
{
struct ftl_band *band = ftl_band_from_ppa(dev, ppa);
struct ftl_md *md = &band->md;
struct ftl_lba_map *lba_map = &band->lba_map;
uint64_t offset;
offset = ftl_band_lbkoff_from_ppa(band, ppa);
/* The bit might be already cleared if two writes are scheduled to the */
/* same LBA at the same time */
if (spdk_bit_array_get(md->vld_map, offset)) {
assert(md->num_vld > 0);
spdk_bit_array_clear(md->vld_map, offset);
md->num_vld--;
if (spdk_bit_array_get(lba_map->vld, offset)) {
assert(lba_map->num_vld > 0);
spdk_bit_array_clear(lba_map->vld, offset);
lba_map->num_vld--;
return 1;
}
@ -701,9 +701,9 @@ ftl_invalidate_addr(struct spdk_ftl_dev *dev, struct ftl_ppa ppa)
assert(!ftl_ppa_cached(ppa));
band = ftl_band_from_ppa(dev, ppa);
pthread_spin_lock(&band->md.lock);
pthread_spin_lock(&band->lba_map.lock);
rc = ftl_invalidate_addr_unlocked(dev, ppa);
pthread_spin_unlock(&band->md.lock);
pthread_spin_unlock(&band->lba_map.lock);
return rc;
}
@ -1142,7 +1142,7 @@ ftl_update_l2p(struct spdk_ftl_dev *dev, const struct ftl_rwb_entry *entry,
/* the L2P as wall as metadata. The valid bits in metadata are used to */
/* check weak writes validity. */
band = ftl_band_from_ppa(dev, prev_ppa);
pthread_spin_lock(&band->md.lock);
pthread_spin_lock(&band->lba_map.lock);
valid = ftl_invalidate_addr_unlocked(dev, prev_ppa);
@ -1152,7 +1152,7 @@ ftl_update_l2p(struct spdk_ftl_dev *dev, const struct ftl_rwb_entry *entry,
ftl_l2p_set(dev, entry->lba, ppa);
}
pthread_spin_unlock(&band->md.lock);
pthread_spin_unlock(&band->lba_map.lock);
}
static struct ftl_io *
@ -1496,7 +1496,7 @@ ftl_band_calc_merit(struct ftl_band *band, size_t *threshold_valid)
return 0.0;
}
valid = threshold_valid ? (usable - *threshold_valid) : band->md.num_vld;
valid = threshold_valid ? (usable - *threshold_valid) : band->lba_map.num_vld;
invalid = usable - valid;
/* Add one to avoid division by 0 */

View File

@ -50,24 +50,24 @@ static const char *ftl_band_state_str[] = {
};
bool
ftl_band_validate_md(struct ftl_band *band, const uint64_t *lba_map)
ftl_band_validate_md(struct ftl_band *band)
{
struct spdk_ftl_dev *dev = band->dev;
struct ftl_md *md = &band->md;
struct ftl_lba_map *lba_map = &band->lba_map;
struct ftl_ppa ppa_md, ppa_l2p;
size_t i, size;
bool valid = true;
size = ftl_num_band_lbks(dev);
pthread_spin_lock(&md->lock);
pthread_spin_lock(&lba_map->lock);
for (i = 0; i < size; ++i) {
if (!spdk_bit_array_get(md->vld_map, i)) {
if (!spdk_bit_array_get(lba_map->vld, i)) {
continue;
}
ppa_md = ftl_band_ppa_from_lbkoff(band, i);
ppa_l2p = ftl_l2p_get(dev, lba_map[i]);
ppa_l2p = ftl_l2p_get(dev, lba_map->map[i]);
if (ppa_l2p.cached) {
continue;
@ -80,7 +80,7 @@ ftl_band_validate_md(struct ftl_band *band, const uint64_t *lba_map)
}
pthread_spin_unlock(&md->lock);
pthread_spin_unlock(&lba_map->lock);
return valid;
}
@ -97,7 +97,7 @@ ftl_dev_dump_bands(struct spdk_ftl_dev *dev)
ftl_debug("Bands validity:\n");
for (i = 0; i < ftl_dev_num_bands(dev); ++i) {
if (dev->bands[i].state == FTL_BAND_STATE_FREE &&
dev->bands[i].md.wr_cnt == 0) {
dev->bands[i].wr_cnt == 0) {
continue;
}
@ -106,13 +106,13 @@ ftl_dev_dump_bands(struct spdk_ftl_dev *dev)
continue;
}
total += dev->bands[i].md.num_vld;
total += dev->bands[i].lba_map.num_vld;
ftl_debug(" Band %3zu: %8zu / %zu \tnum_chunks: %zu \twr_cnt: %"PRIu64"\tmerit:"
"%10.3f\tstate: %s\n",
i + 1, dev->bands[i].md.num_vld,
i + 1, dev->bands[i].lba_map.num_vld,
ftl_band_user_lbks(&dev->bands[i]),
dev->bands[i].num_chunks,
dev->bands[i].md.wr_cnt,
dev->bands[i].wr_cnt,
dev->bands[i].merit,
ftl_band_state_str[dev->bands[i].state]);
}
@ -141,7 +141,7 @@ ftl_dev_dump_stats(const struct spdk_ftl_dev *dev)
/* Count the number of valid LBAs */
for (i = 0; i < ftl_dev_num_bands(dev); ++i) {
total += dev->bands[i].md.num_vld;
total += dev->bands[i].lba_map.num_vld;
}
waf = (double)dev->stats.write_total / (double)dev->stats.write_user;

View File

@ -59,10 +59,10 @@ ftl_ppa2str(struct ftl_ppa ppa, char *buf, size_t size)
}
#if defined(FTL_META_DEBUG)
bool ftl_band_validate_md(struct ftl_band *band, const uint64_t *lba_map);
bool ftl_band_validate_md(struct ftl_band *band);
void ftl_dev_dump_bands(struct spdk_ftl_dev *dev);
#else
#define ftl_band_validate_md(band, lba_map)
#define ftl_band_validate_md(band)
#define ftl_dev_dump_bands(dev)
#endif

View File

@ -106,15 +106,15 @@ ftl_admin_cb(void *ctx, const struct spdk_nvme_cpl *cpl)
static int
ftl_band_init_md(struct ftl_band *band)
{
struct ftl_md *md = &band->md;
struct ftl_lba_map *lba_map = &band->lba_map;
md->vld_map = spdk_bit_array_create(ftl_num_band_lbks(band->dev));
if (!md->vld_map) {
lba_map->vld = spdk_bit_array_create(ftl_num_band_lbks(band->dev));
if (!lba_map->vld) {
return -ENOMEM;
}
pthread_spin_init(&md->lock, PTHREAD_PROCESS_PRIVATE);
ftl_band_md_clear(&band->md);
pthread_spin_init(&lba_map->lock, PTHREAD_PROCESS_PRIVATE);
ftl_band_md_clear(band);
return 0;
}
@ -543,8 +543,8 @@ ftl_dev_band_max_seq(struct spdk_ftl_dev *dev)
size_t seq = 0;
LIST_FOREACH(band, &dev->shut_bands, list_entry) {
if (band->md.seq > seq) {
seq = band->md.seq;
if (band->seq > seq) {
seq = band->seq;
}
}
@ -560,7 +560,7 @@ _ftl_init_bands_state(void *ctx)
dev->seq = ftl_dev_band_max_seq(dev);
LIST_FOREACH_SAFE(band, &dev->shut_bands, list_entry, temp_band) {
if (!band->md.num_vld) {
if (!band->lba_map.num_vld) {
ftl_band_set_state(band, FTL_BAND_STATE_FREE);
}
}
@ -578,7 +578,7 @@ ftl_init_num_free_bands(struct spdk_ftl_dev *dev)
int cnt = 0;
LIST_FOREACH(band, &dev->shut_bands, list_entry) {
if (band->num_chunks && !band->md.num_vld) {
if (band->num_chunks && !band->lba_map.num_vld) {
cnt++;
}
}
@ -1041,7 +1041,7 @@ ftl_dev_free_sync(struct spdk_ftl_dev *dev)
if (dev->bands) {
for (i = 0; i < ftl_dev_num_bands(dev); ++i) {
free(dev->bands[i].chunk_buf);
spdk_bit_array_free(&dev->bands[i].md.vld_map);
spdk_bit_array_free(&dev->bands[i].lba_map.vld);
}
}

View File

@ -47,7 +47,7 @@ ftl_io_inc_req(struct ftl_io *io)
struct ftl_band *band = io->band;
if (!(io->flags & FTL_IO_CACHE) && io->type != FTL_IO_READ && io->type != FTL_IO_ERASE) {
ftl_band_acquire_md(band);
ftl_band_acquire_lba_map(band);
}
__atomic_fetch_add(&io->dev->num_inflight, 1, __ATOMIC_SEQ_CST);
@ -62,7 +62,7 @@ ftl_io_dec_req(struct ftl_io *io)
unsigned long num_inflight __attribute__((unused));
if (!(io->flags & FTL_IO_CACHE) && io->type != FTL_IO_READ && io->type != FTL_IO_ERASE) {
ftl_band_release_md(band);
ftl_band_release_lba_map(band);
}
num_inflight = __atomic_fetch_sub(&io->dev->num_inflight, 1, __ATOMIC_SEQ_CST);

View File

@ -45,9 +45,8 @@ struct spdk_ftl_dev;
struct ftl_rwb_batch;
struct ftl_band;
struct ftl_io;
struct ftl_md;
typedef int (*ftl_md_pack_fn)(struct spdk_ftl_dev *, struct ftl_md *, void *);
typedef int (*ftl_md_pack_fn)(struct ftl_band *);
/* IO flags */
enum ftl_io_flags {
@ -220,12 +219,6 @@ struct ftl_md_io {
/* Parent IO structure */
struct ftl_io io;
/* Destination metadata pointer */
struct ftl_md *md;
/* Metadata's buffer */
void *buf;
/* Serialization/deserialization callback */
ftl_md_pack_fn pack_fn;

View File

@ -185,12 +185,11 @@ ftl_reloc_read_lba_map(struct ftl_band_reloc *breloc)
io->cb.ctx = io;
io->cb.fn = ftl_reloc_read_lba_map_cb;
if (ftl_band_alloc_md(band)) {
if (ftl_band_alloc_lba_map(band)) {
assert(false);
}
return ftl_band_read_lba_map(band, &band->md, 0,
ftl_num_band_lbks(dev), io->cb);
return ftl_band_read_lba_map(band, 0, ftl_num_band_lbks(dev), io->cb);
}
static void
@ -203,7 +202,7 @@ ftl_reloc_prep(struct ftl_band_reloc *breloc)
reloc->num_active++;
if (!band->high_prio) {
assert(band->md.lba_map == NULL);
assert(band->lba_map.map == NULL);
ftl_reloc_read_lba_map(breloc);
return;
}
@ -406,7 +405,7 @@ ftl_reloc_io_reinit(struct ftl_io *io, struct ftl_band_reloc *breloc,
continue;
}
io->lba.vector[i] = breloc->band->md.lba_map[lbkoff];
io->lba.vector[i] = breloc->band->lba_map.map[lbkoff];
}
ftl_trace_lba_io_init(io->dev, io);
@ -544,7 +543,7 @@ ftl_reloc_release(struct ftl_band_reloc *breloc)
ftl_reloc_release_io(breloc);
ftl_reloc_iter_reset(breloc);
ftl_band_release_md(band);
ftl_band_release_lba_map(band);
breloc->active = 0;
reloc->num_active--;
@ -790,6 +789,6 @@ ftl_reloc_add(struct ftl_reloc *reloc, struct ftl_band *band, size_t offset,
if (prio) {
TAILQ_INSERT_TAIL(&reloc->prio_queue, breloc, entry);
ftl_band_acquire_md(breloc->band);
ftl_band_acquire_lba_map(breloc->band);
}
}

View File

@ -145,8 +145,8 @@ ftl_restore_complete(struct ftl_restore *restore, int status)
static int
ftl_band_cmp(const void *lband, const void *rband)
{
uint64_t lseq = ((struct ftl_restore_band *)lband)->band->md.seq;
uint64_t rseq = ((struct ftl_restore_band *)rband)->band->md.seq;
uint64_t lseq = ((struct ftl_restore_band *)lband)->band->seq;
uint64_t rseq = ((struct ftl_restore_band *)rband)->band->seq;
if (lseq < rseq) {
return -1;
@ -171,7 +171,7 @@ ftl_restore_check_seq(const struct ftl_restore *restore)
}
next_band = LIST_NEXT(rband->band, list_entry);
if (next_band && rband->band->md.seq == next_band->md.seq) {
if (next_band && rband->band->seq == next_band->seq) {
return -1;
}
}
@ -256,8 +256,8 @@ ftl_restore_head_md(struct ftl_restore *restore)
{
struct spdk_ftl_dev *dev = restore->dev;
struct ftl_restore_band *rband;
struct ftl_lba_map *lba_map;
struct ftl_cb cb;
char *head_buf = restore->md_buf;
unsigned int num_failed = 0, num_ios;
size_t i;
@ -266,9 +266,12 @@ ftl_restore_head_md(struct ftl_restore *restore)
for (i = 0; i < ftl_dev_num_bands(dev); ++i) {
rband = &restore->bands[i];
lba_map = &rband->band->lba_map;
cb.ctx = rband;
if (ftl_band_read_head_md(rband->band, &rband->band->md, head_buf, cb)) {
lba_map->dma_buf = restore->md_buf + i * ftl_head_md_num_lbks(dev) * FTL_BLOCK_SIZE;
if (ftl_band_read_head_md(rband->band, cb)) {
if (spdk_likely(rband->band->num_chunks)) {
SPDK_ERRLOG("Failed to read metadata on band %zu\n", i);
@ -283,8 +286,6 @@ ftl_restore_head_md(struct ftl_restore *restore)
num_failed++;
}
head_buf += ftl_head_md_num_lbks(dev) * FTL_BLOCK_SIZE;
}
if (spdk_unlikely(num_failed > 0)) {
@ -320,11 +321,11 @@ ftl_restore_l2p(struct ftl_band *band)
size_t i;
for (i = 0; i < ftl_num_band_lbks(band->dev); ++i) {
if (!spdk_bit_array_get(band->md.vld_map, i)) {
if (!spdk_bit_array_get(band->lba_map.vld, i)) {
continue;
}
lba = band->md.lba_map[i];
lba = band->lba_map.map[i];
if (lba >= dev->num_lbas) {
return -1;
}
@ -340,7 +341,7 @@ ftl_restore_l2p(struct ftl_band *band)
ftl_l2p_set(dev, lba, ppa);
}
band->md.lba_map = NULL;
band->lba_map.map = NULL;
return 0;
}
@ -397,9 +398,10 @@ ftl_restore_tail_md(struct ftl_restore_band *rband)
};
band->tail_md_ppa = ftl_band_tail_md_ppa(band);
band->md.lba_map = restore->lba_map;
band->lba_map.map = restore->lba_map;
band->lba_map.dma_buf = restore->md_buf;
if (ftl_band_read_tail_md(band, &band->md, restore->md_buf, band->tail_md_ppa, cb)) {
if (ftl_band_read_tail_md(band, band->tail_md_ppa, cb)) {
SPDK_ERRLOG("Failed to send tail metadata read\n");
ftl_restore_complete(restore, -EIO);
return -EIO;

View File

@ -180,7 +180,7 @@ ftl_trace_defrag_band(struct spdk_ftl_dev *dev, const struct ftl_band *band)
struct ftl_trace *trace = &dev->stats.trace;
spdk_trace_record(FTL_TRACE_BAND_DEFRAG(FTL_TRACE_SOURCE_INTERNAL),
ftl_trace_next_id(trace), 0, band->md.num_vld, band->id);
ftl_trace_next_id(trace), 0, band->lba_map.num_vld, band->id);
}
void

View File

@ -95,8 +95,8 @@ test_init_ftl_band(struct spdk_ftl_dev *dev, size_t id)
LIST_INSERT_HEAD(&dev->shut_bands, band, list_entry);
CIRCLEQ_INIT(&band->chunks);
band->md.vld_map = spdk_bit_array_create(ftl_num_band_lbks(dev));
SPDK_CU_ASSERT_FATAL(band->md.vld_map != NULL);
band->lba_map.vld = spdk_bit_array_create(ftl_num_band_lbks(dev));
SPDK_CU_ASSERT_FATAL(band->lba_map.vld != NULL);
band->chunk_buf = calloc(ftl_dev_num_punits(dev), sizeof(*band->chunk_buf));
SPDK_CU_ASSERT_FATAL(band->chunk_buf != NULL);
@ -112,7 +112,7 @@ test_init_ftl_band(struct spdk_ftl_dev *dev, size_t id)
band->num_chunks++;
}
pthread_spin_init(&band->md.lock, PTHREAD_PROCESS_PRIVATE);
pthread_spin_init(&band->lba_map.lock, PTHREAD_PROCESS_PRIVATE);
return band;
}
@ -132,10 +132,10 @@ void
test_free_ftl_band(struct ftl_band *band)
{
SPDK_CU_ASSERT_FATAL(band != NULL);
spdk_bit_array_free(&band->md.vld_map);
spdk_bit_array_free(&band->lba_map.vld);
free(band->chunk_buf);
free(band->md.lba_map);
spdk_dma_free(band->md.dma_buf);
free(band->lba_map.map);
spdk_dma_free(band->lba_map.dma_buf);
}
uint64_t

View File

@ -67,7 +67,7 @@ setup_band(void)
g_dev = test_init_ftl_dev(&g_geo, &g_range);
g_band = test_init_ftl_band(g_dev, TEST_BAND_IDX);
rc = ftl_band_alloc_md(g_band);
rc = ftl_band_alloc_lba_map(g_band);
CU_ASSERT_EQUAL_FATAL(rc, 0);
}
@ -153,68 +153,68 @@ test_band_ppa_from_lbkoff(void)
static void
test_band_set_addr(void)
{
struct ftl_md *md;
struct ftl_lba_map *lba_map;
struct ftl_ppa ppa;
uint64_t offset = 0;
setup_band();
md = &g_band->md;
lba_map = &g_band->lba_map;
ppa = ppa_from_punit(g_range.begin);
ppa.chk = TEST_BAND_IDX;
CU_ASSERT_EQUAL(md->num_vld, 0);
CU_ASSERT_EQUAL(lba_map->num_vld, 0);
offset = test_offset_from_ppa(ppa, g_band);
ftl_band_set_addr(g_band, TEST_LBA, ppa);
CU_ASSERT_EQUAL(md->num_vld, 1);
CU_ASSERT_EQUAL(md->lba_map[offset], TEST_LBA);
CU_ASSERT_TRUE(spdk_bit_array_get(md->vld_map, offset));
CU_ASSERT_EQUAL(lba_map->num_vld, 1);
CU_ASSERT_EQUAL(lba_map->map[offset], TEST_LBA);
CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset));
ppa.pu++;
offset = test_offset_from_ppa(ppa, g_band);
ftl_band_set_addr(g_band, TEST_LBA + 1, ppa);
CU_ASSERT_EQUAL(md->num_vld, 2);
CU_ASSERT_EQUAL(md->lba_map[offset], TEST_LBA + 1);
CU_ASSERT_TRUE(spdk_bit_array_get(md->vld_map, offset));
CU_ASSERT_EQUAL(lba_map->num_vld, 2);
CU_ASSERT_EQUAL(lba_map->map[offset], TEST_LBA + 1);
CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset));
ppa.pu--;
offset = test_offset_from_ppa(ppa, g_band);
CU_ASSERT_TRUE(spdk_bit_array_get(md->vld_map, offset));
CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset));
cleanup_band();
}
static void
test_invalidate_addr(void)
{
struct ftl_md *md;
struct ftl_lba_map *lba_map;
struct ftl_ppa ppa;
uint64_t offset[2];
setup_band();
md = &g_band->md;
lba_map = &g_band->lba_map;
ppa = ppa_from_punit(g_range.begin);
ppa.chk = TEST_BAND_IDX;
offset[0] = test_offset_from_ppa(ppa, g_band);
ftl_band_set_addr(g_band, TEST_LBA, ppa);
CU_ASSERT_EQUAL(md->num_vld, 1);
CU_ASSERT_TRUE(spdk_bit_array_get(md->vld_map, offset[0]));
CU_ASSERT_EQUAL(lba_map->num_vld, 1);
CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset[0]));
ftl_invalidate_addr(g_band->dev, ppa);
CU_ASSERT_EQUAL(md->num_vld, 0);
CU_ASSERT_FALSE(spdk_bit_array_get(md->vld_map, offset[0]));
CU_ASSERT_EQUAL(lba_map->num_vld, 0);
CU_ASSERT_FALSE(spdk_bit_array_get(lba_map->vld, offset[0]));
offset[0] = test_offset_from_ppa(ppa, g_band);
ftl_band_set_addr(g_band, TEST_LBA, ppa);
ppa.pu++;
offset[1] = test_offset_from_ppa(ppa, g_band);
ftl_band_set_addr(g_band, TEST_LBA + 1, ppa);
CU_ASSERT_EQUAL(md->num_vld, 2);
CU_ASSERT_TRUE(spdk_bit_array_get(md->vld_map, offset[0]));
CU_ASSERT_TRUE(spdk_bit_array_get(md->vld_map, offset[1]));
CU_ASSERT_EQUAL(lba_map->num_vld, 2);
CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset[0]));
CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset[1]));
ftl_invalidate_addr(g_band->dev, ppa);
CU_ASSERT_EQUAL(md->num_vld, 1);
CU_ASSERT_TRUE(spdk_bit_array_get(md->vld_map, offset[0]));
CU_ASSERT_FALSE(spdk_bit_array_get(md->vld_map, offset[1]));
CU_ASSERT_EQUAL(lba_map->num_vld, 1);
CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset[0]));
CU_ASSERT_FALSE(spdk_bit_array_get(lba_map->vld, offset[1]));
cleanup_band();
}

View File

@ -39,8 +39,8 @@
#include "ftl/ftl_io.c"
DEFINE_STUB(ftl_trace_alloc_id, uint64_t, (struct spdk_ftl_dev *dev), 0);
DEFINE_STUB_V(ftl_band_acquire_md, (struct ftl_band *band));
DEFINE_STUB_V(ftl_band_release_md, (struct ftl_band *band));
DEFINE_STUB_V(ftl_band_acquire_lba_map, (struct ftl_band *band));
DEFINE_STUB_V(ftl_band_release_lba_map, (struct ftl_band *band));
static struct spdk_ftl_dev *
setup_device(void)

View File

@ -62,9 +62,9 @@ setup_band(struct ftl_band **band, const struct spdk_ocssd_geometry_data *geo,
dev = test_init_ftl_dev(geo, range);
*band = test_init_ftl_band(dev, 0);
rc = ftl_band_alloc_md(*band);
rc = ftl_band_alloc_lba_map(*band);
SPDK_CU_ASSERT_FATAL(rc == 0);
ftl_band_clear_md(*band);
ftl_band_clear_lba_map(*band);
}
static void
@ -80,19 +80,18 @@ static void
test_md_unpack(void)
{
struct ftl_band *band;
struct ftl_md *md;
struct ftl_lba_map *lba_map;
setup_band(&band, &g_geo, &g_range);
md = &band->md;
lba_map = &band->lba_map;
SPDK_CU_ASSERT_FATAL(lba_map->dma_buf);
SPDK_CU_ASSERT_FATAL(md->dma_buf);
ftl_pack_head_md(band);
CU_ASSERT_EQUAL(ftl_unpack_head_md(band), FTL_MD_SUCCESS);
ftl_pack_head_md(band->dev, md, md->dma_buf);
CU_ASSERT_EQUAL(ftl_unpack_head_md(band->dev, md, md->dma_buf), FTL_MD_SUCCESS);
ftl_pack_tail_md(band->dev, md, md->dma_buf);
CU_ASSERT_EQUAL(ftl_unpack_tail_md(band->dev, md, md->dma_buf), FTL_MD_SUCCESS);
ftl_pack_tail_md(band);
CU_ASSERT_EQUAL(ftl_unpack_tail_md(band), FTL_MD_SUCCESS);
cleanup_band(band);
}
@ -101,36 +100,35 @@ static void
test_md_unpack_fail(void)
{
struct ftl_band *band;
struct ftl_md *md;
struct ftl_lba_map *lba_map;
struct ftl_md_hdr *hdr;
setup_band(&band, &g_geo, &g_range);
md = &band->md;
SPDK_CU_ASSERT_FATAL(md->dma_buf);
lba_map = &band->lba_map;
SPDK_CU_ASSERT_FATAL(lba_map->dma_buf);
/* check crc */
ftl_pack_tail_md(band->dev, md, md->dma_buf);
ftl_pack_tail_md(band);
/* flip last bit of lba_map */
*((char *)md->dma_buf + ftl_tail_md_num_lbks(band->dev) * FTL_BLOCK_SIZE - 1) ^= 0x1;
CU_ASSERT_EQUAL(ftl_unpack_tail_md(band->dev, md, md->dma_buf), FTL_MD_INVALID_CRC);
*((char *)lba_map->dma_buf + ftl_tail_md_num_lbks(band->dev) * FTL_BLOCK_SIZE - 1) ^= 0x1;
CU_ASSERT_EQUAL(ftl_unpack_tail_md(band), FTL_MD_INVALID_CRC);
/* check invalid version */
hdr = md->dma_buf;
ftl_pack_tail_md(band->dev, md, md->dma_buf);
hdr = lba_map->dma_buf;
ftl_pack_tail_md(band);
hdr->ver++;
CU_ASSERT_EQUAL(ftl_unpack_tail_md(band->dev, md, md->dma_buf), FTL_MD_INVALID_VER);
CU_ASSERT_EQUAL(ftl_unpack_tail_md(band), FTL_MD_INVALID_VER);
/* check wrong UUID */
ftl_pack_head_md(band->dev, md, md->dma_buf);
ftl_pack_head_md(band);
hdr->uuid.u.raw[0] ^= 0x1;
CU_ASSERT_EQUAL(ftl_unpack_head_md(band->dev, md, md->dma_buf), FTL_MD_NO_MD);
CU_ASSERT_EQUAL(ftl_unpack_head_md(band), FTL_MD_NO_MD);
/* check invalid size */
ftl_pack_tail_md(band->dev, md, md->dma_buf);
ftl_pack_tail_md(band);
band->dev->geo.clba--;
CU_ASSERT_EQUAL(ftl_unpack_tail_md(band->dev, md, md->dma_buf), FTL_MD_INVALID_SIZE);
CU_ASSERT_EQUAL(ftl_unpack_tail_md(band), FTL_MD_INVALID_SIZE);
cleanup_band(band);
}

View File

@ -61,31 +61,30 @@ DEFINE_STUB_V(ftl_band_set_state, (struct ftl_band *band, enum ftl_band_state st
DEFINE_STUB_V(ftl_trace_lba_io_init, (struct spdk_ftl_dev *dev, const struct ftl_io *io));
int
ftl_band_alloc_md(struct ftl_band *band)
ftl_band_alloc_lba_map(struct ftl_band *band)
{
struct spdk_ftl_dev *dev = band->dev;
struct ftl_md *md = &band->md;
ftl_band_acquire_md(band);
md->lba_map = spdk_mempool_get(dev->lba_pool);
ftl_band_acquire_lba_map(band);
band->lba_map.map = spdk_mempool_get(dev->lba_pool);
return 0;
}
void
ftl_band_release_md(struct ftl_band *band)
ftl_band_release_lba_map(struct ftl_band *band)
{
struct spdk_ftl_dev *dev = band->dev;
band->md.ref_cnt--;
spdk_mempool_put(dev->lba_pool, band->md.lba_map);
band->md.lba_map = NULL;
band->lba_map.ref_cnt--;
spdk_mempool_put(dev->lba_pool, band->lba_map.map);
band->lba_map.map = NULL;
}
void
ftl_band_acquire_md(struct ftl_band *band)
ftl_band_acquire_lba_map(struct ftl_band *band)
{
band->md.ref_cnt++;
band->lba_map.ref_cnt++;
}
size_t
@ -95,8 +94,8 @@ ftl_lba_map_num_lbks(const struct spdk_ftl_dev *dev)
}
int
ftl_band_read_lba_map(struct ftl_band *band, struct ftl_md *md,
size_t offset, size_t lbk_cnt, struct ftl_cb cb)
ftl_band_read_lba_map(struct ftl_band *band, size_t offset,
size_t lbk_cnt, struct ftl_cb cb)
{
cb.fn(cb.ctx, 0);
return 0;
@ -222,13 +221,13 @@ cleanup_reloc(struct spdk_ftl_dev *dev, struct ftl_reloc *reloc)
static void
set_band_valid_map(struct ftl_band *band, size_t offset, size_t num_lbks)
{
struct ftl_md *md = &band->md;
struct ftl_lba_map *lba_map = &band->lba_map;
size_t i;
SPDK_CU_ASSERT_FATAL(md != NULL);
SPDK_CU_ASSERT_FATAL(lba_map != NULL);
for (i = offset; i < offset + num_lbks; ++i) {
spdk_bit_array_set(md->vld_map, i);
md->num_vld++;
spdk_bit_array_set(lba_map->vld, i);
lba_map->num_vld++;
}
}

View File

@ -55,7 +55,7 @@ static struct spdk_ftl_punit_range g_range = {
};
#if defined(DEBUG)
DEFINE_STUB(ftl_band_validate_md, bool, (struct ftl_band *band, const uint64_t *lba_map), true);
DEFINE_STUB(ftl_band_validate_md, bool, (struct ftl_band *band), true);
#endif
DEFINE_STUB_V(ftl_io_dec_req, (struct ftl_io *io));
DEFINE_STUB_V(ftl_io_inc_req, (struct ftl_io *io));