ftl: p2l checkpointing

Since base device doesn't require VSS, FTL introduces a mechanism that
will allow for recovering both the P2L and write pointer of open bands
after a dirty shutdown. After writing 1MiB of data to a band, a 4KiB
block describing the P2L will be persisted to cache device, effectively
emulating VSS for the base device.

Signed-off-by: Artur Paszkiewicz <artur.paszkiewicz@intel.com>
Signed-off-by: Kozlowski Mateusz <mateusz.kozlowski@intel.com>
Change-Id: Ic6be52dc09b237297a5cda3e752d6c038e98b70e
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/13367
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Artur Paszkiewicz 2022-07-07 10:32:30 -07:00 committed by Jim Harris
parent 36049672a3
commit 1738488e41
22 changed files with 957 additions and 19 deletions

View File

@ -27,9 +27,10 @@ FTL_SUBDIRS := mngt utils
C_SRCS = ftl_core.c ftl_init.c ftl_layout.c ftl_debug.c ftl_io.c ftl_sb.c ftl_l2p.c ftl_l2p_flat.c
C_SRCS += ftl_nv_cache.c ftl_band.c ftl_band_ops.c ftl_writer.c ftl_rq.c ftl_reloc.c ftl_l2p_cache.c
C_SRCS += ftl_p2l.c
C_SRCS += mngt/ftl_mngt.c mngt/ftl_mngt_bdev.c mngt/ftl_mngt_shutdown.c mngt/ftl_mngt_startup.c
C_SRCS += mngt/ftl_mngt_md.c mngt/ftl_mngt_misc.c mngt/ftl_mngt_ioch.c mngt/ftl_mngt_l2p.c
C_SRCS += mngt/ftl_mngt_band.c mngt/ftl_mngt_self_test.c
C_SRCS += mngt/ftl_mngt_band.c mngt/ftl_mngt_self_test.c mngt/ftl_mngt_p2l.c
C_SRCS += utils/ftl_conf.c utils/ftl_md.c utils/ftl_mempool.c utils/ftl_bitmap.c
SPDK_MAP_FILE = $(abspath $(CURDIR)/spdk_ftl.map)

View File

@ -106,7 +106,9 @@ _ftl_band_set_closed_cb(struct ftl_band *band, bool valid)
band->owner.state_change_fn(band);
}
/* Free the p2l map if there are no outstanding IOs */
ftl_p2l_validate_ckpt(band);
/* Free the P2L map if there are no outstanding IOs */
ftl_band_release_p2l_map(band);
assert(band->p2l_map.ref_cnt == 0);
@ -186,7 +188,7 @@ ftl_band_set_type(struct ftl_band *band, enum ftl_band_type type)
}
void
ftl_band_set_addr(struct ftl_band *band, uint64_t lba, ftl_addr addr)
ftl_band_set_p2l(struct ftl_band *band, uint64_t lba, ftl_addr addr, uint64_t seq_id)
{
struct ftl_p2l_map *p2l_map = &band->p2l_map;
uint64_t offset;
@ -194,7 +196,13 @@ ftl_band_set_addr(struct ftl_band *band, uint64_t lba, ftl_addr addr)
offset = ftl_band_block_offset_from_addr(band, addr);
p2l_map->band_map[offset].lba = lba;
p2l_map->num_valid++;
p2l_map->band_map[offset].seq_id = seq_id;
}
void
ftl_band_set_addr(struct ftl_band *band, uint64_t lba, ftl_addr addr)
{
band->p2l_map.num_valid++;
ftl_bitmap_set(band->dev->valid_map, addr);
}
@ -374,6 +382,10 @@ ftl_band_release_p2l_map(struct ftl_band *band)
p2l_map->ref_cnt--;
if (p2l_map->ref_cnt == 0) {
if (p2l_map->p2l_ckpt) {
ftl_p2l_ckpt_release(band->dev, p2l_map->p2l_ckpt);
p2l_map->p2l_ckpt = NULL;
}
ftl_band_free_p2l_map(band);
ftl_band_free_md_entry(band);
}
@ -394,6 +406,8 @@ ftl_band_write_prep(struct ftl_band *band)
return -1;
}
band->p2l_map.p2l_ckpt = ftl_p2l_ckpt_acquire(dev);
band->md->p2l_md_region = ftl_p2l_ckpt_region_type(band->p2l_map.p2l_ckpt);
ftl_band_iter_init(band);
band->md->seq = ftl_get_next_seq_id(dev);

View File

@ -17,7 +17,7 @@
#include "utils/ftl_df.h"
#define FTL_MAX_OPEN_BANDS 4
#define FTL_MAX_OPEN_BANDS FTL_LAYOUT_REGION_TYPE_P2L_COUNT
#define FTL_BAND_VERSION_0 0
#define FTL_BAND_VERSION_1 1
@ -61,6 +61,9 @@ struct ftl_band_md {
/* Band type set during opening */
enum ftl_band_type type;
/* nv_cache p2l md region associated with band */
enum ftl_layout_region_type p2l_md_region;
/* Sequence ID when band was opened */
uint64_t seq;
@ -157,6 +160,7 @@ ftl_addr ftl_band_next_xfer_addr(struct ftl_band *band, ftl_addr addr, size_t nu
ftl_addr ftl_band_next_addr(struct ftl_band *band, ftl_addr addr, size_t offset);
size_t ftl_band_user_blocks_left(const struct ftl_band *band, size_t offset);
size_t ftl_band_user_blocks(const struct ftl_band *band);
void ftl_band_set_p2l(struct ftl_band *band, uint64_t lba, ftl_addr addr, uint64_t seq_id);
void ftl_band_set_addr(struct ftl_band *band, uint64_t lba, ftl_addr addr);
struct ftl_band *ftl_band_from_addr(struct spdk_ftl_dev *dev, ftl_addr addr);
ftl_addr ftl_band_tail_md_addr(struct ftl_band *band);

View File

@ -15,14 +15,11 @@ static void
write_rq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
{
struct ftl_rq *rq = arg;
struct ftl_band *band = rq->io.band;
rq->success = success;
assert(band->queue_depth > 0);
band->queue_depth--;
ftl_p2l_ckpt_issue(rq);
rq->owner.cb(rq);
spdk_bdev_free_io(bdev_io);
}

View File

@ -167,6 +167,14 @@ struct spdk_ftl_dev {
/* Retry init sequence */
bool init_retry;
/* P2L checkpointing */
struct {
/* Free regions */
TAILQ_HEAD(, ftl_p2l_ckpt) free;
/* In use regions */
TAILQ_HEAD(, ftl_p2l_ckpt) inuse;
} p2l_ckpt;
};
void ftl_apply_limits(struct spdk_ftl_dev *dev);
@ -200,12 +208,8 @@ ftl_addr_get_band(const struct spdk_ftl_dev *dev, ftl_addr addr)
static inline uint32_t
ftl_get_write_unit_size(struct spdk_bdev *bdev)
{
if (spdk_bdev_is_zoned(bdev)) {
return spdk_bdev_get_write_unit_size(bdev);
}
/* TODO: this should be passed via input parameter */
return 32;
/* Full block of P2L map worth of xfer_sz is needed for P2L checkpointing */
return FTL_NUM_LBA_IN_BLOCK;
}
static inline struct spdk_thread *

View File

@ -20,6 +20,11 @@
/* Smallest data unit size */
#define FTL_BLOCK_SIZE 4096ULL
#define FTL_P2L_VERSION_0 0
#define FTL_P2L_VERSION_1 1
#define FTL_P2L_VERSION_CURRENT FTL_P2L_VERSION_1
/*
* This type represents address in the ftl address space. Values from 0 to based bdev size are
* mapped directly to base device lbas. Values above that represent nv cache lbas.
@ -90,9 +95,61 @@ struct ftl_p2l_map {
struct ftl_nv_cache_chunk_md *chunk_dma_md;
};
/* P2L checkpointing region */
struct ftl_p2l_ckpt *p2l_ckpt;
};
struct ftl_p2l_sync_ctx {
struct ftl_band *band;
uint64_t page_start;
uint64_t page_end;
int md_region;
};
struct ftl_p2l_ckpt_page {
struct ftl_p2l_map_entry map[FTL_NUM_LBA_IN_BLOCK];
};
struct ftl_p2l_ckpt;
struct ftl_band;
struct spdk_ftl_dev;
struct ftl_mngt_process;
struct ftl_rq;
int ftl_p2l_ckpt_init(struct spdk_ftl_dev *dev);
void ftl_p2l_ckpt_deinit(struct spdk_ftl_dev *dev);
void ftl_p2l_ckpt_issue(struct ftl_rq *rq);
struct ftl_p2l_ckpt *ftl_p2l_ckpt_acquire(struct spdk_ftl_dev *dev);
struct ftl_p2l_ckpt *ftl_p2l_ckpt_acquire_region_type(struct spdk_ftl_dev *dev,
uint32_t region_type);
void ftl_p2l_ckpt_release(struct spdk_ftl_dev *dev, struct ftl_p2l_ckpt *ckpt);
enum ftl_layout_region_type ftl_p2l_ckpt_region_type(const struct ftl_p2l_ckpt *ckpt);
#if defined(DEBUG)
void ftl_p2l_validate_ckpt(struct ftl_band *band);
#else
static inline void
ftl_p2l_validate_ckpt(struct ftl_band *band)
{
}
#endif
int ftl_mngt_p2l_ckpt_get_seq_id(struct spdk_ftl_dev *dev, int md_region);
int ftl_mngt_p2l_ckpt_restore(struct ftl_band *band, uint32_t md_region, uint64_t seq_id);
int ftl_mngt_p2l_ckpt_restore_clean(struct ftl_band *band);
void ftl_mngt_p2l_ckpt_restore_shm_clean(struct ftl_band *band);
void ftl_mngt_persist_bands_p2l(struct ftl_mngt_process *mngt);
struct ftl_reloc *ftl_reloc_init(struct spdk_ftl_dev *dev);

View File

@ -47,6 +47,7 @@ struct ftl_l2p_page {
struct ftl_l2p_cache_page_io_ctx ctx;
bool on_lru_list;
void *page_buffer;
uint64_t ckpt_seq_id;
ftl_df_obj_id obj_id;
};

View File

@ -127,9 +127,16 @@ set_region_bdev_btm(struct ftl_layout_region *reg, struct spdk_ftl_dev *dev)
static int
setup_layout_nvc(struct spdk_ftl_dev *dev)
{
int region_type;
uint64_t left, offset = 0;
struct ftl_layout *layout = &dev->layout;
struct ftl_layout_region *region, *mirror;
static const char *p2l_region_name[] = {
"p2l0",
"p2l1",
"p2l2",
"p2l3"
};
#ifdef SPDK_FTL_VSS_EMU
/* Skip the already init`d VSS region */
@ -191,6 +198,30 @@ setup_layout_nvc(struct spdk_ftl_dev *dev)
goto error;
}
/*
* Initialize P2L checkpointing regions
*/
SPDK_STATIC_ASSERT(SPDK_COUNTOF(p2l_region_name) == FTL_LAYOUT_REGION_TYPE_P2L_COUNT,
"Incorrect # of P2L region names");
for (region_type = FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MIN;
region_type <= FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MAX;
region_type++) {
if (offset >= layout->nvc.total_blocks) {
goto error;
}
region = &layout->region[region_type];
region->type = region_type;
region->name = p2l_region_name[region_type - FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MIN];
region->current.version = FTL_P2L_VERSION_CURRENT;
region->prev.version = FTL_P2L_VERSION_CURRENT;
region->current.offset = offset;
region->current.blocks = blocks_region(layout->p2l.ckpt_pages * FTL_BLOCK_SIZE);
region->entry_size = 1;
region->num_entries = region->current.blocks;
set_region_bdev_nvc(region, dev);
offset += region->current.blocks;
}
/*
* Initialize NV Cache metadata
*/
@ -372,6 +403,9 @@ ftl_layout_setup(struct spdk_ftl_dev *dev)
layout->l2p.addr_size = layout->l2p.addr_length > 32 ? 8 : 4;
layout->l2p.lbas_in_page = FTL_BLOCK_SIZE / layout->l2p.addr_size;
/* Setup P2L ckpt */
layout->p2l.ckpt_pages = spdk_divide_round_up(ftl_get_num_blocks_in_band(dev), dev->xfer_size);
if (setup_layout_nvc(dev)) {
return -EINVAL;
}
@ -388,10 +422,9 @@ ftl_layout_setup(struct spdk_ftl_dev *dev)
blocks2mib(layout->base.total_blocks));
FTL_NOTICELOG(dev, "NV cache device capacity: %.2f MiB\n",
blocks2mib(layout->nvc.total_blocks));
FTL_NOTICELOG(dev, "L2P entries: %"PRIu64"\n",
dev->num_lbas);
FTL_NOTICELOG(dev, "L2P address size: %"PRIu64"\n",
layout->l2p.addr_size);
FTL_NOTICELOG(dev, "L2P entries: %"PRIu64"\n", dev->num_lbas);
FTL_NOTICELOG(dev, "L2P address size: %"PRIu64"\n", layout->l2p.addr_size);
FTL_NOTICELOG(dev, "P2L checkpoint pages: %"PRIu64"\n", layout->p2l.ckpt_pages);
return 0;
}

View File

@ -11,6 +11,9 @@
struct spdk_ftl_dev;
struct ftl_md;
#define FTL_LAYOUT_REGION_TYPE_P2L_COUNT \
(FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MAX - FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MIN + 1)
enum ftl_layout_region_type {
#ifdef SPDK_FTL_VSS_EMU
/** VSS region for NV cache VSS emulation */
@ -42,6 +45,18 @@ enum ftl_layout_region_type {
/* User data region on the base device */
FTL_LAYOUT_REGION_TYPE_DATA_BASE,
/* P2L checkpointing allows for emulation of VSS on base device.
* 4 entries are needed - 2 for each writer
* Although the naming may suggest a particular region is assigned to its corresponding writer, it's not
* the case - they can be used interchangeably
*/
FTL_LAYOUT_REGION_TYPE_P2L_CKPT_GC,
FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MIN = FTL_LAYOUT_REGION_TYPE_P2L_CKPT_GC,
FTL_LAYOUT_REGION_TYPE_P2L_CKPT_GC_NEXT,
FTL_LAYOUT_REGION_TYPE_P2L_CKPT_COMP,
FTL_LAYOUT_REGION_TYPE_P2L_CKPT_COMP_NEXT,
FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MAX = FTL_LAYOUT_REGION_TYPE_P2L_CKPT_COMP_NEXT,
FTL_LAYOUT_REGION_TYPE_MAX,
};
@ -130,6 +145,12 @@ struct ftl_layout {
uint64_t lbas_in_page;
} l2p;
/* Organization of P2L checkpoints */
struct {
/* Number of P2L checkpoint pages */
uint64_t ckpt_pages;
} p2l;
struct ftl_layout_region region[FTL_LAYOUT_REGION_TYPE_MAX];
/* Metadata object corresponding to the regions */

546
lib/ftl/ftl_p2l.c Normal file
View File

@ -0,0 +1,546 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) Intel Corporation.
* All rights reserved.
*/
#include "spdk/bdev_module.h"
#include "spdk/crc32.h"
#include "ftl_internal.h"
#include "ftl_band.h"
#include "ftl_core.h"
#include "ftl_layout.h"
#include "ftl_nv_cache_io.h"
#include "ftl_writer.h"
#include "mngt/ftl_mngt.h"
struct ftl_p2l_ckpt {
TAILQ_ENTRY(ftl_p2l_ckpt) link;
union ftl_md_vss *vss_md_page;
struct ftl_md *md;
struct ftl_layout_region *layout_region;
uint64_t num_pages;
#if defined(DEBUG)
uint64_t dbg_bmp_sz;
void *dbg_bmp;
struct ftl_bitmap *bmp;
#endif
};
static struct ftl_p2l_ckpt *
ftl_p2l_ckpt_new(struct spdk_ftl_dev *dev, int region_type)
{
struct ftl_p2l_ckpt *ckpt;
ckpt = calloc(1, sizeof(struct ftl_p2l_ckpt));
if (!ckpt) {
return NULL;
}
ckpt->vss_md_page = ftl_md_vss_buf_alloc(&dev->layout.region[region_type],
dev->layout.region[region_type].num_entries);
ckpt->layout_region = &dev->layout.region[region_type];
ckpt->md = dev->layout.md[region_type];
ckpt->num_pages = spdk_divide_round_up(ftl_get_num_blocks_in_band(dev), FTL_NUM_LBA_IN_BLOCK);
if (!ckpt->vss_md_page) {
free(ckpt);
return NULL;
}
#if defined(DEBUG)
/* The bitmap size must be a multiple of word size (8b) - round up */
ckpt->dbg_bmp_sz = spdk_divide_round_up(ckpt->num_pages, 8);
ckpt->dbg_bmp = calloc(1, ckpt->dbg_bmp_sz);
assert(ckpt->dbg_bmp);
ckpt->bmp = ftl_bitmap_create(ckpt->dbg_bmp, ckpt->dbg_bmp_sz);
assert(ckpt->bmp);
#endif
return ckpt;
}
static void
ftl_p2l_ckpt_destroy(struct ftl_p2l_ckpt *ckpt)
{
#if defined(DEBUG)
ftl_bitmap_destroy(ckpt->bmp);
free(ckpt->dbg_bmp);
#endif
spdk_dma_free(ckpt->vss_md_page);
free(ckpt);
}
int
ftl_p2l_ckpt_init(struct spdk_ftl_dev *dev)
{
int region_type;
struct ftl_p2l_ckpt *ckpt;
TAILQ_INIT(&dev->p2l_ckpt.free);
TAILQ_INIT(&dev->p2l_ckpt.inuse);
for (region_type = FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MIN;
region_type <= FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MAX;
region_type++) {
ckpt = ftl_p2l_ckpt_new(dev, region_type);
if (!ckpt) {
return -1;
}
TAILQ_INSERT_TAIL(&dev->p2l_ckpt.free, ckpt, link);
}
return 0;
}
void
ftl_p2l_ckpt_deinit(struct spdk_ftl_dev *dev)
{
struct ftl_p2l_ckpt *ckpt, *ckpt_next;
TAILQ_FOREACH_SAFE(ckpt, &dev->p2l_ckpt.free, link, ckpt_next) {
TAILQ_REMOVE(&dev->p2l_ckpt.free, ckpt, link);
ftl_p2l_ckpt_destroy(ckpt);
}
TAILQ_FOREACH_SAFE(ckpt, &dev->p2l_ckpt.inuse, link, ckpt_next) {
TAILQ_REMOVE(&dev->p2l_ckpt.inuse, ckpt, link);
ftl_p2l_ckpt_destroy(ckpt);
}
}
struct ftl_p2l_ckpt *
ftl_p2l_ckpt_acquire(struct spdk_ftl_dev *dev)
{
struct ftl_p2l_ckpt *ckpt;
ckpt = TAILQ_FIRST(&dev->p2l_ckpt.free);
assert(ckpt);
TAILQ_REMOVE(&dev->p2l_ckpt.free, ckpt, link);
TAILQ_INSERT_TAIL(&dev->p2l_ckpt.inuse, ckpt, link);
return ckpt;
}
void
ftl_p2l_ckpt_release(struct spdk_ftl_dev *dev, struct ftl_p2l_ckpt *ckpt)
{
assert(ckpt);
#if defined(DEBUG)
memset(ckpt->dbg_bmp, 0, ckpt->dbg_bmp_sz);
#endif
TAILQ_REMOVE(&dev->p2l_ckpt.inuse, ckpt, link);
TAILQ_INSERT_TAIL(&dev->p2l_ckpt.free, ckpt, link);
}
static void
ftl_p2l_ckpt_issue_end(int status, void *arg)
{
struct ftl_rq *rq = arg;
assert(rq);
if (status) {
/* retry */
ftl_md_persist_entry_retry(&rq->md_persist_entry_ctx);
return;
}
assert(rq->io.band->queue_depth > 0);
rq->io.band->queue_depth--;
rq->owner.cb(rq);
}
void
ftl_p2l_ckpt_issue(struct ftl_rq *rq)
{
struct ftl_rq_entry *iter = rq->entries;
ftl_addr addr = rq->io.addr;
struct ftl_p2l_ckpt *ckpt = NULL;
struct ftl_p2l_ckpt_page *map_page;
union ftl_md_vss *md_page;
struct ftl_band *band;
uint64_t band_offs, p2l_map_page_no, i;
assert(rq);
band = rq->io.band;
ckpt = band->p2l_map.p2l_ckpt;
assert(ckpt);
/* Derive the P2L map page no */
band_offs = ftl_band_block_offset_from_addr(band, rq->io.addr);
p2l_map_page_no = band_offs / FTL_NUM_LBA_IN_BLOCK;
assert((band_offs + rq->num_blocks - 1) / FTL_NUM_LBA_IN_BLOCK == p2l_map_page_no);
assert(p2l_map_page_no < ckpt->num_pages);
/* Get the corresponding P2L map page - the underlying stored data is the same as in the end metadata of band P2L (ftl_p2l_map_entry),
* however we're interested in a whole page (4KiB) worth of content
*/
map_page = ((struct ftl_p2l_ckpt_page *)band->p2l_map.band_map) + p2l_map_page_no;
assert(map_page);
/* Set up the md */
md_page = &ckpt->vss_md_page[p2l_map_page_no];
md_page->p2l_ckpt.seq_id = band->md->seq;
assert(rq->num_blocks == FTL_NUM_LBA_IN_BLOCK);
/* Update the band P2L map */
for (i = 0; i < rq->num_blocks; i++, iter++) {
if (iter->lba != FTL_LBA_INVALID) {
/* This is compaction or reloc */
assert(!ftl_addr_in_nvc(rq->dev, addr));
ftl_band_set_p2l(band, iter->lba, addr, iter->seq_id);
}
addr = ftl_band_next_addr(band, addr, 1);
}
#if defined(DEBUG)
ftl_bitmap_set(ckpt->bmp, p2l_map_page_no);
#endif
md_page->p2l_ckpt.p2l_checksum = spdk_crc32c_update(map_page,
rq->num_blocks * sizeof(struct ftl_p2l_map_entry), 0);
/* Save the P2L map entry */
ftl_md_persist_entry(ckpt->md, p2l_map_page_no, map_page, md_page, ftl_p2l_ckpt_issue_end,
rq, &rq->md_persist_entry_ctx);
}
#if defined(DEBUG)
static void
ftl_p2l_validate_pages(struct ftl_band *band, struct ftl_p2l_ckpt *ckpt,
uint64_t page_begin, uint64_t page_end, bool val)
{
uint64_t page_no;
for (page_no = page_begin; page_no < page_end; page_no++) {
assert(ftl_bitmap_get(ckpt->bmp, page_no) == val);
}
}
void
ftl_p2l_validate_ckpt(struct ftl_band *band)
{
struct ftl_p2l_ckpt *ckpt = band->p2l_map.p2l_ckpt;
uint64_t num_blks_tail_md = ftl_tail_md_num_blocks(band->dev);
uint64_t num_pages_tail_md = num_blks_tail_md / FTL_NUM_LBA_IN_BLOCK;
if (!ckpt) {
return;
}
assert(num_blks_tail_md % FTL_NUM_LBA_IN_BLOCK == 0);
/* all data pages written */
ftl_p2l_validate_pages(band, ckpt,
0, ckpt->num_pages - num_pages_tail_md, true);
/* tail md pages not written */
ftl_p2l_validate_pages(band, ckpt, ckpt->num_pages - num_pages_tail_md,
ckpt->num_pages, false);
}
#endif
static struct ftl_band *
ftl_get_band_from_region(struct spdk_ftl_dev *dev, enum ftl_layout_region_type type)
{
struct ftl_band *band = NULL;
uint64_t i;
assert(type >= FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MIN);
assert(type <= FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MAX);
for (i = 0; i < ftl_get_num_bands(dev); i++) {
band = &dev->bands[i];
if ((band->md->state == FTL_BAND_STATE_OPEN ||
band->md->state == FTL_BAND_STATE_FULL) &&
band->md->p2l_md_region == type) {
return band;
}
}
return NULL;
}
static void ftl_mngt_persist_band_p2l(struct ftl_mngt_process *mngt, struct ftl_p2l_sync_ctx *ctx);
static void
ftl_p2l_ckpt_persist_end(int status, void *arg)
{
struct ftl_mngt_process *mngt = arg;
struct ftl_p2l_sync_ctx *ctx;
assert(mngt);
if (status) {
ftl_mngt_fail_step(mngt);
return;
}
ctx = ftl_mngt_get_step_ctx(mngt);
ctx->page_start++;
if (ctx->page_start == ctx->page_end) {
ctx->md_region++;
ftl_mngt_continue_step(mngt);
} else {
ftl_mngt_persist_band_p2l(mngt, ctx);
}
}
static void
ftl_mngt_persist_band_p2l(struct ftl_mngt_process *mngt, struct ftl_p2l_sync_ctx *ctx)
{
struct ftl_band *band = ctx->band;
union ftl_md_vss *md_page;
struct ftl_p2l_ckpt_page *map_page;
struct ftl_p2l_ckpt *ckpt;
ckpt = band->p2l_map.p2l_ckpt;
map_page = ((struct ftl_p2l_ckpt_page *)band->p2l_map.band_map) + ctx->page_start;
md_page = &ckpt->vss_md_page[ctx->page_start];
md_page->p2l_ckpt.seq_id = band->md->seq;
md_page->p2l_ckpt.p2l_checksum = spdk_crc32c_update(map_page,
FTL_NUM_LBA_IN_BLOCK * sizeof(struct ftl_p2l_map_entry), 0);
/* Save the P2L map entry */
ftl_md_persist_entry(ckpt->md, ctx->page_start, map_page, md_page,
ftl_p2l_ckpt_persist_end, mngt, &band->md_persist_entry_ctx);
}
void
ftl_mngt_persist_bands_p2l(struct ftl_mngt_process *mngt)
{
struct ftl_p2l_sync_ctx *ctx = ftl_mngt_get_step_ctx(mngt);
struct ftl_band *band;
uint64_t band_offs, p2l_map_page_no;
if (ctx->md_region > FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MAX) {
ftl_mngt_next_step(mngt);
return;
}
band = ftl_get_band_from_region(ftl_mngt_get_dev(mngt), ctx->md_region);
/* No band has the md region assigned (shutdown happened before next_band was assigned) */
if (!band) {
ctx->page_start = 0;
ctx->page_end = 0;
ctx->md_region++;
ftl_mngt_continue_step(mngt);
return;
}
band_offs = ftl_band_block_offset_from_addr(band, band->md->iter.addr);
p2l_map_page_no = band_offs / FTL_NUM_LBA_IN_BLOCK;
ctx->page_start = 0;
ctx->page_end = p2l_map_page_no;
ctx->band = band;
/* Band wasn't written to - no need to sync its P2L */
if (ctx->page_end == 0) {
ctx->md_region++;
ftl_mngt_continue_step(mngt);
return;
}
ftl_mngt_persist_band_p2l(mngt, ctx);
}
int
ftl_mngt_p2l_ckpt_get_seq_id(struct spdk_ftl_dev *dev, int md_region)
{
struct ftl_layout *layout = &dev->layout;
struct ftl_md *md = layout->md[md_region];
union ftl_md_vss *page_md_buf = ftl_md_get_vss_buffer(md);
uint64_t page_no, seq_id = 0;
for (page_no = 0; page_no < layout->p2l.ckpt_pages; page_no++, page_md_buf++) {
if (seq_id < page_md_buf->p2l_ckpt.seq_id) {
seq_id = page_md_buf->p2l_ckpt.seq_id;
}
}
return seq_id;
}
int
ftl_mngt_p2l_ckpt_restore(struct ftl_band *band, uint32_t md_region, uint64_t seq_id)
{
struct ftl_layout *layout = &band->dev->layout;
struct ftl_md *md = layout->md[md_region];
union ftl_md_vss *page_md_buf = ftl_md_get_vss_buffer(md);
struct ftl_p2l_ckpt_page *page = ftl_md_get_buffer(md);
struct ftl_p2l_ckpt_page *map_page;
uint64_t page_no, page_max = 0;
bool page_found = false;
assert(band->md->p2l_md_region == md_region);
if (band->md->p2l_md_region != md_region) {
return -EINVAL;
}
assert(band->md->seq == seq_id);
if (band->md->seq != seq_id) {
return -EINVAL;
}
for (page_no = 0; page_no < layout->p2l.ckpt_pages; page_no++, page++, page_md_buf++) {
if (page_md_buf->p2l_ckpt.seq_id != seq_id) {
continue;
}
page_max = page_no;
page_found = true;
/* Get the corresponding P2L map page - the underlying stored data is the same as in the end metadata of band P2L (ftl_p2l_map_entry),
* however we're interested in a whole page (4KiB) worth of content
*/
map_page = ((struct ftl_p2l_ckpt_page *)band->p2l_map.band_map) + page_no;
if (page_md_buf->p2l_ckpt.p2l_checksum &&
page_md_buf->p2l_ckpt.p2l_checksum != spdk_crc32c_update(page,
FTL_NUM_LBA_IN_BLOCK * sizeof(struct ftl_p2l_map_entry), 0)) {
return -EINVAL;
}
/* Restore the page from P2L checkpoint */
*map_page = *page;
}
assert(page_found);
if (!page_found) {
return -EINVAL;
}
/* Restore check point in band P2L map */
band->p2l_map.p2l_ckpt = ftl_p2l_ckpt_acquire_region_type(
band->dev, md_region);
#ifdef DEBUG
/* Set check point valid map for validation */
struct ftl_p2l_ckpt *ckpt = band->p2l_map.p2l_ckpt ;
for (uint64_t i = 0; i <= page_max; i++) {
ftl_bitmap_set(ckpt->bmp, i);
}
#endif
ftl_band_iter_init(band);
ftl_band_iter_set(band, (page_max + 1) * FTL_NUM_LBA_IN_BLOCK);
return 0;
}
enum ftl_layout_region_type
ftl_p2l_ckpt_region_type(const struct ftl_p2l_ckpt *ckpt) {
return ckpt->layout_region->type;
}
struct ftl_p2l_ckpt *
ftl_p2l_ckpt_acquire_region_type(struct spdk_ftl_dev *dev, uint32_t region_type)
{
struct ftl_p2l_ckpt *ckpt = NULL;
TAILQ_FOREACH(ckpt, &dev->p2l_ckpt.free, link) {
if (ckpt->layout_region->type == region_type) {
break;
}
}
assert(ckpt);
TAILQ_REMOVE(&dev->p2l_ckpt.free, ckpt, link);
TAILQ_INSERT_TAIL(&dev->p2l_ckpt.inuse, ckpt, link);
return ckpt;
}
int
ftl_mngt_p2l_ckpt_restore_clean(struct ftl_band *band)
{
struct spdk_ftl_dev *dev = band->dev;
struct ftl_layout *layout = &dev->layout;
struct ftl_p2l_ckpt_page *page, *map_page;
enum ftl_layout_region_type md_region = band->md->p2l_md_region;
uint64_t page_no;
uint64_t num_written_pages;
union ftl_md_vss *page_md_buf;
if (md_region < FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MIN ||
md_region > FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MAX) {
return -EINVAL;
}
assert(band->md->iter.offset % FTL_NUM_LBA_IN_BLOCK == 0);
num_written_pages = band->md->iter.offset / FTL_NUM_LBA_IN_BLOCK;
/* Associate band with md region before shutdown */
if (!band->p2l_map.p2l_ckpt) {
band->p2l_map.p2l_ckpt = ftl_p2l_ckpt_acquire_region_type(dev, md_region);
}
/* Band was opened but no data was written */
if (band->md->iter.offset == 0) {
return 0;
}
page_no = 0;
/* Restore P2L map up to last written page */
page_md_buf = ftl_md_get_vss_buffer(layout->md[md_region]);
page = ftl_md_get_buffer(layout->md[md_region]);
for (; page_no < num_written_pages; page_no++, page++, page_md_buf++) {
if (page_md_buf->p2l_ckpt.seq_id != band->md->seq) {
assert(page_md_buf->p2l_ckpt.seq_id == band->md->seq);
}
/* Get the corresponding P2L map page */
map_page = ((struct ftl_p2l_ckpt_page *)band->p2l_map.band_map) + page_no;
/* Restore the page from P2L checkpoint */
*map_page = *page;
#if defined(DEBUG)
assert(ftl_bitmap_get(band->p2l_map.p2l_ckpt->bmp, page_no) == false);
ftl_bitmap_set(band->p2l_map.p2l_ckpt->bmp, page_no);
#endif
}
assert(page_md_buf->p2l_ckpt.seq_id < band->md->seq);
return 0;
}
void
ftl_mngt_p2l_ckpt_restore_shm_clean(struct ftl_band *band)
{
struct spdk_ftl_dev *dev = band->dev;
enum ftl_layout_region_type md_region = band->md->p2l_md_region;
/* Associate band with md region before shutdown */
if (!band->p2l_map.p2l_ckpt) {
band->p2l_map.p2l_ckpt = ftl_p2l_ckpt_acquire_region_type(dev, md_region);
}
#if defined(DEBUG)
uint64_t page_no;
uint64_t num_written_pages;
assert(band->md->iter.offset % FTL_NUM_LBA_IN_BLOCK == 0);
num_written_pages = band->md->iter.offset / FTL_NUM_LBA_IN_BLOCK;
/* Band was opened but no data was written */
if (band->md->iter.offset == 0) {
return;
}
/* Set page number to first data page - skip head md */
page_no = 0;
for (; page_no < num_written_pages; page_no++) {
assert(ftl_bitmap_get(band->p2l_map.p2l_ckpt->bmp, page_no) == false);
ftl_bitmap_set(band->p2l_map.p2l_ckpt->bmp, page_no);
}
#endif
}

View File

@ -39,6 +39,9 @@ struct ftl_superblock {
uint32_t reserved2;
/* Last L2P checkpoint +1 (i.e. min_seq_id, 0:no ckpt) */
uint64_t ckpt_seq_id;
struct ftl_superblock_gc_info gc_info;
struct ftl_superblock_md_region md_layout_head;

View File

@ -100,6 +100,13 @@ get_band(struct ftl_writer *writer)
}
}
if (writer->num_bands >= FTL_LAYOUT_REGION_TYPE_P2L_COUNT / 2) {
/* Maximum number of opened band exceed (we split this
* value between and compaction and GC writer
*/
return NULL;
}
writer->band = ftl_band_get_next_free(writer->dev);
if (writer->band) {
writer->num_bands++;

View File

@ -369,6 +369,7 @@ ftl_mngt_finalize_init_bands(struct spdk_ftl_dev *dev, struct ftl_mngt_process *
offset = band->md->iter.offset;
ftl_band_iter_init(band);
ftl_band_iter_set(band, offset);
ftl_mngt_p2l_ckpt_restore_shm_clean(band);
} else if (dev->sb->clean) {
band->md->df_p2l_map = FTL_DF_OBJ_ID_INVALID;
if (ftl_band_alloc_p2l_map(band)) {
@ -379,6 +380,11 @@ ftl_mngt_finalize_init_bands(struct spdk_ftl_dev *dev, struct ftl_mngt_process *
offset = band->md->iter.offset;
ftl_band_iter_init(band);
ftl_band_iter_set(band, offset);
if (ftl_mngt_p2l_ckpt_restore_clean(band)) {
ftl_mngt_fail_step(mngt);
return;
}
}
}

View File

@ -89,6 +89,10 @@ ftl_mngt_open_base_bdev(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
}
dev->xfer_size = ftl_get_write_unit_size(bdev);
if (dev->xfer_size != FTL_NUM_LBA_IN_BLOCK) {
FTL_ERRLOG(dev, "Unsupported xfer_size (%"PRIu64")\n", dev->xfer_size);
goto error;
}
/* TODO: validate size when base device VSS usage gets added */
dev->md_size = spdk_bdev_get_md_size(bdev);

View File

@ -213,6 +213,22 @@ ftl_mngt_persist_vld_map_metadata(struct spdk_ftl_dev *dev, struct ftl_mngt_proc
persist(dev, mngt, FTL_LAYOUT_REGION_TYPE_VALID_MAP);
}
static void
ftl_mngt_persist_p2l_metadata(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
{
/* Sync runtime P2L to persist any invalidation that may have happened */
struct ftl_p2l_sync_ctx *ctx = ftl_mngt_get_step_ctx(mngt);
/*
* ftl_mngt_persist_bands_p2l will increment the md_region before the step_continue for next regions
*/
if (ctx->md_region <= FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MIN) {
ctx->md_region = FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MIN;
}
ftl_mngt_persist_bands_p2l(mngt);
}
void
ftl_mngt_persist_band_info_metadata(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
{
@ -268,6 +284,11 @@ static const struct ftl_mngt_process_desc desc_persist = {
.name = "Persist valid map metadata",
.action = ftl_mngt_persist_vld_map_metadata,
},
{
.name = "Persist P2L metadata",
.action = ftl_mngt_persist_p2l_metadata,
.ctx_size = sizeof(struct ftl_p2l_sync_ctx),
},
{
.name = "persist band info metadata",
.action = ftl_mngt_persist_band_info_metadata,
@ -329,6 +350,7 @@ ftl_mngt_init_default_sb(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt
sb->uuid = dev->conf.uuid;
sb->clean = 0;
dev->sb_shm->shm_clean = false;
sb->ckpt_seq_id = 0;
/* Max 16 IO depth per band relocate */
sb->max_reloc_qdepth = 16;

154
lib/ftl/mngt/ftl_mngt_p2l.c Normal file
View File

@ -0,0 +1,154 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) Intel Corporation.
* All rights reserved.
*/
#include "ftl_mngt.h"
#include "ftl_mngt_steps.h"
#include "ftl_internal.h"
#include "ftl_core.h"
struct ftl_mngt_p2l_md_ctx {
struct ftl_mngt_process *mngt;
int md_region;
int status;
};
static void ftl_p2l_wipe_md_region(struct spdk_ftl_dev *dev, struct ftl_mngt_p2l_md_ctx *ctx);
void
ftl_mngt_p2l_init_ckpt(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
{
if (!ftl_p2l_ckpt_init(dev)) {
ftl_mngt_next_step(mngt);
} else {
ftl_mngt_fail_step(mngt);
}
}
void
ftl_mngt_p2l_deinit_ckpt(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
{
ftl_p2l_ckpt_deinit(dev);
ftl_mngt_next_step(mngt);
}
static void
ftl_p2l_wipe_md_region_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
{
struct ftl_mngt_p2l_md_ctx *ctx = md->owner.cb_ctx;
if (status) {
ftl_mngt_fail_step(ctx->mngt);
return;
}
if (ctx->md_region == FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MAX) {
ftl_mngt_next_step(ctx->mngt);
return;
}
ctx->md_region++;
ftl_p2l_wipe_md_region(dev, ctx);
}
static void
ftl_p2l_wipe_md_region(struct spdk_ftl_dev *dev, struct ftl_mngt_p2l_md_ctx *ctx)
{
struct ftl_layout *layout = &dev->layout;
struct ftl_md *md = layout->md[ctx->md_region];
assert(ctx->md_region >= FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MIN);
assert(ctx->md_region <= FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MAX);
if (!md) {
ftl_mngt_fail_step(ctx->mngt);
return;
}
md->owner.cb_ctx = ctx;
md->cb = ftl_p2l_wipe_md_region_cb;
ftl_md_persist(md);
}
void
ftl_mngt_p2l_wipe(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
{
struct ftl_mngt_p2l_md_ctx *ctx;
if (ftl_mngt_alloc_step_ctx(mngt, sizeof(struct ftl_mngt_p2l_md_ctx))) {
ftl_mngt_fail_step(mngt);
return;
}
ctx = ftl_mngt_get_step_ctx(mngt);
ctx->mngt = mngt;
ctx->md_region = FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MIN;
ftl_p2l_wipe_md_region(dev, ctx);
}
void
ftl_mngt_p2l_free_bufs(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
{
struct ftl_md *md;
int region_type;
for (region_type = FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MIN;
region_type <= FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MAX;
region_type++) {
md = dev->layout.md[region_type];
assert(md);
ftl_md_free_buf(md, ftl_md_destroy_region_flags(dev, dev->layout.region[region_type].type));
}
ftl_mngt_next_step(mngt);
}
static void
ftl_mngt_p2l_restore_ckpt_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
{
struct ftl_mngt_p2l_md_ctx *ctx = md->owner.cb_ctx;
assert(ctx);
if (status) {
ctx->status = status;
}
if (++ctx->md_region == FTL_LAYOUT_REGION_TYPE_P2L_COUNT) {
if (!ctx->status) {
ftl_mngt_next_step(ctx->mngt);
} else {
ftl_mngt_fail_step(ctx->mngt);
}
}
}
void
ftl_mngt_p2l_restore_ckpt(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
{
struct ftl_layout *layout = &dev->layout;
struct ftl_md *md;
struct ftl_mngt_p2l_md_ctx *ctx;
int md_region;
if (ftl_fast_startup(dev)) {
FTL_NOTICELOG(dev, "SHM: skipping p2l ckpt restore\n");
ftl_mngt_next_step(mngt);
return;
}
if (ftl_mngt_alloc_step_ctx(mngt, sizeof(struct ftl_mngt_p2l_md_ctx))) {
ftl_mngt_fail_step(mngt);
return;
}
ctx = ftl_mngt_get_step_ctx(mngt);
ctx->mngt = mngt;
ctx->md_region = 0;
ctx->status = 0;
for (md_region = FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MIN;
md_region <= FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MAX; md_region++) {
md = layout->md[md_region];
assert(md);
md->owner.cb_ctx = ctx;
md->cb = ftl_mngt_p2l_restore_ckpt_cb;
ftl_md_restore(md);
}
}

View File

@ -47,6 +47,10 @@ static const struct ftl_mngt_process_desc desc_shutdown = {
.name = "Deinitialize L2P",
.action = ftl_mngt_deinit_l2p
},
{
.name = "Deinitialize P2L checkpointing",
.action = ftl_mngt_p2l_deinit_ckpt
},
{
.name = "Rollback FTL device",
.action = ftl_mngt_rollback_device
@ -91,6 +95,10 @@ static const struct ftl_mngt_process_desc desc_fast_shutdown = {
.name = "Deinitialize L2P",
.action = ftl_mngt_deinit_l2p
},
{
.name = "Deinitialize P2L checkpointing",
.action = ftl_mngt_p2l_deinit_ckpt
},
{
.name = "Rollback FTL device",
.action = ftl_mngt_rollback_device

View File

@ -162,6 +162,19 @@ static const struct ftl_mngt_process_desc desc_first_start = {
.name = "Save initial chunk info metadata",
.action = ftl_mngt_persist_nv_cache_metadata,
},
{
.name = "Initialize P2L checkpointing",
.action = ftl_mngt_p2l_init_ckpt,
.cleanup = ftl_mngt_p2l_deinit_ckpt
},
{
.name = "Wipe P2L region",
.action = ftl_mngt_p2l_wipe,
},
{
.name = "Free P2L region bufs",
.action = ftl_mngt_p2l_free_bufs,
},
{
.name = "Set FTL dirty state",
.action = ftl_mngt_set_dirty,
@ -203,6 +216,15 @@ static const struct ftl_mngt_process_desc desc_clean_start = {
.name = "Restore metadata",
.action = ftl_mngt_restore_md
},
{
.name = "Initialize P2L checkpointing",
.action = ftl_mngt_p2l_init_ckpt,
.cleanup = ftl_mngt_p2l_deinit_ckpt
},
{
.name = "Restore P2L checkpoints",
.action = ftl_mngt_p2l_restore_ckpt
},
{
.name = "Initialize L2P",
.action = ftl_mngt_init_l2p,
@ -216,6 +238,10 @@ static const struct ftl_mngt_process_desc desc_clean_start = {
.name = "Finalize band initialization",
.action = ftl_mngt_finalize_init_bands,
},
{
.name = "Free P2L region bufs",
.action = ftl_mngt_p2l_free_bufs,
},
{
.name = "Start core poller",
.action = ftl_mngt_start_core_poller,

View File

@ -110,6 +110,16 @@ void ftl_mngt_init_vld_map(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mn
void ftl_mngt_deinit_vld_map(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt);
void ftl_mngt_p2l_init_ckpt(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt);
void ftl_mngt_p2l_deinit_ckpt(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt);
void ftl_mngt_p2l_wipe(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt);
void ftl_mngt_p2l_free_bufs(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt);
void ftl_mngt_p2l_restore_ckpt(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt);
void ftl_mngt_self_test(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt);
void ftl_mngt_persist_band_info_metadata(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt);

View File

@ -122,6 +122,11 @@ union ftl_md_vss {
uint64_t seq_id;
} unmap;
struct {
uint64_t seq_id;
uint32_t p2l_checksum;
} p2l_ckpt;
struct {
uint64_t lba;
uint64_t seq_id;

View File

@ -14,6 +14,7 @@
#define TEST_BAND_IDX 42
#define TEST_LBA 0x68676564
#define TEST_SEQ 0xDEADBEEF
#define G_GEO_ZONE_SIZE 10000
#define G_GEO_OPTIMAL_OPEN_ZONES 1
@ -29,6 +30,7 @@ static struct ftl_band *g_band;
#if defined(DEBUG)
DEFINE_STUB_V(ftl_band_validate_md, (struct ftl_band *band, ftl_band_validate_md_cb cb));
DEFINE_STUB_V(ftl_p2l_validate_ckpt, (struct ftl_band *band));
#endif
DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
DEFINE_STUB(spdk_bdev_get_block_size, uint32_t, (const struct spdk_bdev *bdev), 512);
@ -75,8 +77,10 @@ DEFINE_STUB(ftl_reloc_is_defrag_active, bool, (const struct ftl_reloc *reloc), f
DEFINE_STUB(ftl_reloc_is_halted, bool, (const struct ftl_reloc *reloc), false);
DEFINE_STUB_V(ftl_reloc_halt, (struct ftl_reloc *reloc));
DEFINE_STUB(spdk_bdev_is_zoned, bool, (const struct spdk_bdev *bdev), true);
DEFINE_STUB(ftl_p2l_ckpt_acquire, struct ftl_p2l_ckpt *, (struct spdk_ftl_dev *dev), NULL);
DEFINE_STUB(ftl_mngt_unmap, int, (struct spdk_ftl_dev *dev, uint64_t lba, uint64_t num_blocks,
spdk_ftl_fn cb, void *cb_cntx), 0);
DEFINE_STUB_V(ftl_p2l_ckpt_release, (struct spdk_ftl_dev *dev, struct ftl_p2l_ckpt *ckpt));
DEFINE_STUB_V(ftl_l2p_process, (struct spdk_ftl_dev *dev));
DEFINE_STUB_V(ftl_nv_cache_process, (struct spdk_ftl_dev *dev));
@ -123,6 +127,8 @@ DEFINE_STUB(ftl_writer_is_halted, bool, (struct ftl_writer *writer), true);
DEFINE_STUB(ftl_mempool_claim_df, void *, (struct ftl_mempool *mpool, ftl_df_obj_id df_obj_id),
NULL);
DEFINE_STUB(ftl_bitmap_count_set, uint64_t, (struct ftl_bitmap *bitmap), 0);
DEFINE_STUB(ftl_p2l_ckpt_region_type, enum ftl_layout_region_type,
(const struct ftl_p2l_ckpt *ckpt), 0);
static void
adjust_bitmap(struct ftl_bitmap **bitmap, uint64_t *bit)
@ -255,15 +261,19 @@ test_band_set_addr(void)
offset = test_offset_from_addr(addr, g_band);
ftl_band_set_addr(g_band, TEST_LBA, addr);
ftl_band_set_p2l(g_band, TEST_LBA, addr, TEST_SEQ);
CU_ASSERT_EQUAL(p2l_map->num_valid, 1);
CU_ASSERT_EQUAL(p2l_map->band_map[offset].lba, TEST_LBA);
CU_ASSERT_EQUAL(p2l_map->band_map[offset].seq_id, TEST_SEQ);
CU_ASSERT_TRUE(ftl_bitmap_get(p2l_map->valid, offset));
addr += g_geo.zone_size / 2;
offset = test_offset_from_addr(addr, g_band);
ftl_band_set_addr(g_band, TEST_LBA + 1, addr);
ftl_band_set_p2l(g_band, TEST_LBA + 1, addr, TEST_SEQ + 1);
CU_ASSERT_EQUAL(p2l_map->num_valid, 2);
CU_ASSERT_EQUAL(p2l_map->band_map[offset].lba, TEST_LBA + 1);
CU_ASSERT_EQUAL(p2l_map->band_map[offset].seq_id, TEST_SEQ + 1);
CU_ASSERT_TRUE(ftl_bitmap_get(p2l_map->valid, offset));
addr -= g_geo.zone_size / 2;
offset = test_offset_from_addr(addr, g_band);
@ -285,6 +295,7 @@ test_invalidate_addr(void)
offset[0] = test_offset_from_addr(addr, g_band);
ftl_band_set_addr(g_band, TEST_LBA, addr);
ftl_band_set_p2l(g_band, TEST_LBA, addr, TEST_SEQ);
CU_ASSERT_EQUAL(p2l_map->num_valid, 1);
CU_ASSERT_TRUE(ftl_bitmap_get(p2l_map->valid, offset[0]));
ftl_invalidate_addr(g_band->dev, addr);
@ -293,9 +304,11 @@ test_invalidate_addr(void)
offset[0] = test_offset_from_addr(addr, g_band);
ftl_band_set_addr(g_band, TEST_LBA, addr);
ftl_band_set_p2l(g_band, TEST_LBA, addr, TEST_SEQ);
addr += g_geo.zone_size / 2;
offset[1] = test_offset_from_addr(addr, g_band);
ftl_band_set_addr(g_band, TEST_LBA + 1, addr);
ftl_band_set_p2l(g_band, TEST_LBA + 1, addr, TEST_SEQ);
CU_ASSERT_EQUAL(p2l_map->num_valid, 2);
CU_ASSERT_TRUE(ftl_bitmap_get(p2l_map->valid, offset[0]));
CU_ASSERT_TRUE(ftl_bitmap_get(p2l_map->valid, offset[1]));

View File

@ -69,6 +69,8 @@ DEFINE_STUB(ftl_reloc_is_defrag_active, bool, (const struct ftl_reloc *reloc), f
DEFINE_STUB(ftl_reloc_is_halted, bool, (const struct ftl_reloc *reloc), false);
DEFINE_STUB_V(ftl_reloc_resume, (struct ftl_reloc *reloc));
DEFINE_STUB_V(ftl_l2p_unpin, (struct spdk_ftl_dev *dev, uint64_t lba, uint64_t count));
DEFINE_STUB(ftl_p2l_ckpt_acquire, struct ftl_p2l_ckpt *, (struct spdk_ftl_dev *dev), NULL);
DEFINE_STUB_V(ftl_p2l_ckpt_release, (struct spdk_ftl_dev *dev, struct ftl_p2l_ckpt *ckpt));
DEFINE_STUB(ftl_l2p_get, ftl_addr, (struct spdk_ftl_dev *dev, uint64_t lba), 0);
DEFINE_STUB_V(ftl_mempool_put, (struct ftl_mempool *mpool, void *element));