ftl: sequence id tracking

Track the relative sequence of opening and closing bands and chunks.
Necessary for detecting the most recent user data during dirty shutdown recovery.

Signed-off-by: Artur Paszkiewicz <artur.paszkiewicz@intel.com>
Signed-off-by: Kozlowski Mateusz <mateusz.kozlowski@intel.com>
Change-Id: I682030e58284d7b090667e4e5a9f4bbc7615708a
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/13366
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Mellanox Build Bot
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Artur Paszkiewicz 2022-06-01 10:49:48 +02:00 committed by Jim Harris
parent 35a9ddb5b6
commit 36049672a3
21 changed files with 208 additions and 16 deletions

View File

@ -68,6 +68,7 @@ _ftl_band_set_free(struct ftl_band *band)
/* Add the band to the free band list */ /* Add the band to the free band list */
TAILQ_INSERT_TAIL(&dev->free_bands, band, queue_entry); TAILQ_INSERT_TAIL(&dev->free_bands, band, queue_entry);
band->md->close_seq_id = 0;
band->reloc = false; band->reloc = false;
dev->num_free++; dev->num_free++;
@ -192,7 +193,7 @@ ftl_band_set_addr(struct ftl_band *band, uint64_t lba, ftl_addr addr)
offset = ftl_band_block_offset_from_addr(band, addr); offset = ftl_band_block_offset_from_addr(band, addr);
p2l_map->band_map[offset] = lba; p2l_map->band_map[offset].lba = lba;
p2l_map->num_valid++; p2l_map->num_valid++;
ftl_bitmap_set(band->dev->valid_map, addr); ftl_bitmap_set(band->dev->valid_map, addr);
} }
@ -387,12 +388,17 @@ ftl_band_p2l_map_addr(struct ftl_band *band)
int int
ftl_band_write_prep(struct ftl_band *band) ftl_band_write_prep(struct ftl_band *band)
{ {
struct spdk_ftl_dev *dev = band->dev;
if (ftl_band_alloc_p2l_map(band)) { if (ftl_band_alloc_p2l_map(band)) {
return -1; return -1;
} }
ftl_band_iter_init(band); ftl_band_iter_init(band);
band->md->seq = ftl_get_next_seq_id(dev);
FTL_DEBUGLOG(dev, "Band to write, id %u seq %"PRIu64"\n", band->id, band->md->seq);
return 0; return 0;
} }

View File

@ -61,6 +61,12 @@ struct ftl_band_md {
/* Band type set during opening */ /* Band type set during opening */
enum ftl_band_type type; enum ftl_band_type type;
/* Sequence ID when band was opened */
uint64_t seq;
/* Sequence ID when band was closed */
uint64_t close_seq_id;
/* Number of times band was fully written (ie. number of free -> closed state cycles) */ /* Number of times band was fully written (ie. number of free -> closed state cycles) */
uint64_t wr_cnt; uint64_t wr_cnt;

View File

@ -341,6 +341,7 @@ ftl_band_close(struct ftl_band *band)
uint64_t num_blocks = ftl_tail_md_num_blocks(dev); uint64_t num_blocks = ftl_tail_md_num_blocks(dev);
/* Write P2L map first, after completion, set the state to close on nvcache, then internally */ /* Write P2L map first, after completion, set the state to close on nvcache, then internally */
band->md->close_seq_id = ftl_get_next_seq_id(dev);
ftl_band_set_state(band, FTL_BAND_STATE_CLOSING); ftl_band_set_state(band, FTL_BAND_STATE_CLOSING);
ftl_basic_rq_init(dev, &band->metadata_rq, metadata, num_blocks); ftl_basic_rq_init(dev, &band->metadata_rq, metadata, num_blocks);
ftl_basic_rq_set_owner(&band->metadata_rq, band_map_write_cb, band); ftl_basic_rq_set_owner(&band->metadata_rq, band_map_write_cb, band);
@ -374,6 +375,7 @@ ftl_band_free(struct ftl_band *band)
memcpy(p2l_map->band_dma_md, band->md, region->entry_size * FTL_BLOCK_SIZE); memcpy(p2l_map->band_dma_md, band->md, region->entry_size * FTL_BLOCK_SIZE);
p2l_map->band_dma_md->state = FTL_BAND_STATE_FREE; p2l_map->band_dma_md->state = FTL_BAND_STATE_FREE;
p2l_map->band_dma_md->close_seq_id = 0;
p2l_map->band_dma_md->p2l_map_checksum = 0; p2l_map->band_dma_md->p2l_map_checksum = 0;
ftl_md_persist_entry(md, band->id, p2l_map->band_dma_md, NULL, ftl_md_persist_entry(md, band->id, p2l_map->band_dma_md, NULL,

View File

@ -151,7 +151,8 @@ ftl_invalidate_addr(struct spdk_ftl_dev *dev, ftl_addr addr)
/* Invalidate open/full band p2l_map entry to keep p2l and l2p /* Invalidate open/full band p2l_map entry to keep p2l and l2p
* consistency when band is going to close state */ * consistency when band is going to close state */
if (FTL_BAND_STATE_OPEN == band->md->state || FTL_BAND_STATE_FULL == band->md->state) { if (FTL_BAND_STATE_OPEN == band->md->state || FTL_BAND_STATE_FULL == band->md->state) {
p2l_map->band_map[ftl_band_block_offset_from_addr(band, addr)] = FTL_LBA_INVALID; p2l_map->band_map[ftl_band_block_offset_from_addr(band, addr)].lba = FTL_LBA_INVALID;
p2l_map->band_map[ftl_band_block_offset_from_addr(band, addr)].seq_id = 0;
} }
} }

View File

@ -183,6 +183,8 @@ bool ftl_needs_reloc(struct spdk_ftl_dev *dev);
struct ftl_band *ftl_band_get_next_free(struct spdk_ftl_dev *dev); struct ftl_band *ftl_band_get_next_free(struct spdk_ftl_dev *dev);
void ftl_recover_max_seq(struct spdk_ftl_dev *dev);
static inline uint64_t static inline uint64_t
ftl_get_num_blocks_in_band(const struct spdk_ftl_dev *dev) ftl_get_num_blocks_in_band(const struct spdk_ftl_dev *dev)
{ {
@ -250,11 +252,17 @@ ftl_addr_from_nvc_offset(const struct spdk_ftl_dev *dev, uint64_t cache_offset)
return cache_offset + dev->layout.base.total_blocks; return cache_offset + dev->layout.base.total_blocks;
} }
static inline uint64_t
ftl_get_next_seq_id(struct spdk_ftl_dev *dev)
{
return ++dev->sb->seq_id;
}
static inline size_t static inline size_t
ftl_p2l_map_num_blocks(const struct spdk_ftl_dev *dev) ftl_p2l_map_num_blocks(const struct spdk_ftl_dev *dev)
{ {
return spdk_divide_round_up(ftl_get_num_blocks_in_band(dev) * sizeof(uint64_t), return spdk_divide_round_up(ftl_get_num_blocks_in_band(dev) *
FTL_BLOCK_SIZE); sizeof(struct ftl_p2l_map_entry), FTL_BLOCK_SIZE);
} }
static inline size_t static inline size_t

View File

@ -59,10 +59,10 @@ ftl_band_validate_md_pin(struct ftl_band_validate_ctx *ctx)
continue; continue;
} }
assert(p2l_map->band_map[i] != FTL_LBA_INVALID); assert(p2l_map->band_map[i].lba != FTL_LBA_INVALID);
ctx->remaining++; ctx->remaining++;
ctx->pin_cnt++; ctx->pin_cnt++;
ftl_l2p_pin(dev, p2l_map->band_map[i], 1, ftl_band_validate_md_l2p_pin_cb, ctx, ftl_l2p_pin(dev, p2l_map->band_map[i].lba, 1, ftl_band_validate_md_l2p_pin_cb, ctx,
&ctx->l2p_pin_ctx[i]); &ctx->l2p_pin_ctx[i]);
} }

View File

@ -52,8 +52,13 @@ enum ftl_md_status {
FTL_MD_INVALID_SIZE FTL_MD_INVALID_SIZE
}; };
struct ftl_p2l_map_entry {
uint64_t lba;
uint64_t seq_id;
};
/* Number of LBAs that could be stored in a single block */ /* Number of LBAs that could be stored in a single block */
#define FTL_NUM_LBA_IN_BLOCK (FTL_BLOCK_SIZE / sizeof(uint64_t)) #define FTL_NUM_LBA_IN_BLOCK (FTL_BLOCK_SIZE / sizeof(struct ftl_p2l_map_entry))
/* /*
* Mapping of physical (actual location on disk) to logical (user's POV) addresses. Used in two main scenarios: * Mapping of physical (actual location on disk) to logical (user's POV) addresses. Used in two main scenarios:
@ -75,8 +80,8 @@ struct ftl_p2l_map {
/* P2L map (only valid for open/relocating bands) */ /* P2L map (only valid for open/relocating bands) */
union { union {
uint64_t *band_map; struct ftl_p2l_map_entry *band_map;
void *chunk_map; void *chunk_map;
}; };
/* DMA buffer for region's metadata entry */ /* DMA buffer for region's metadata entry */

View File

@ -144,6 +144,9 @@ struct ftl_rq_entry {
/* Logical block address */ /* Logical block address */
uint64_t lba; uint64_t lba;
/* Sequence id of original chunk where this user data was written to */
uint64_t seq_id;
/* Index of this entry within FTL request */ /* Index of this entry within FTL request */
const uint64_t index; const uint64_t index;

View File

@ -155,6 +155,10 @@ ftl_l2p_update_cache(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr new_addr,
if (new_addr < current_addr) { if (new_addr < current_addr) {
return; return;
} }
} else {
if (new_chunk->md->seq_id < current_chunk->md->seq_id) {
return;
}
} }
} }

View File

@ -307,12 +307,14 @@ ftl_nv_cache_get_wr_buffer(struct ftl_nv_cache *nv_cache, struct ftl_io *io)
void void
ftl_nv_cache_fill_md(struct ftl_io *io) ftl_nv_cache_fill_md(struct ftl_io *io)
{ {
struct ftl_nv_cache_chunk *chunk = io->nv_cache_chunk;
uint64_t i; uint64_t i;
union ftl_md_vss *metadata = io->md; union ftl_md_vss *metadata = io->md;
uint64_t lba = ftl_io_get_lba(io, 0); uint64_t lba = ftl_io_get_lba(io, 0);
for (i = 0; i < io->num_blocks; ++i, lba++, metadata++) { for (i = 0; i < io->num_blocks; ++i, lba++, metadata++) {
metadata->nv_cache.lba = lba; metadata->nv_cache.lba = lba;
metadata->nv_cache.seq_id = chunk->md->seq_id;
} }
} }
@ -433,6 +435,7 @@ chunk_free_cb(int status, void *ctx)
nv_cache->chunk_free_count++; nv_cache->chunk_free_count++;
nv_cache->chunk_full_count--; nv_cache->chunk_full_count--;
chunk->md->state = FTL_CHUNK_STATE_FREE; chunk->md->state = FTL_CHUNK_STATE_FREE;
chunk->md->close_seq_id = 0;
ftl_chunk_free_chunk_free_entry(chunk); ftl_chunk_free_chunk_free_entry(chunk);
} else { } else {
ftl_md_persist_entry_retry(&chunk->md_persist_entry_ctx); ftl_md_persist_entry_retry(&chunk->md_persist_entry_ctx);
@ -460,6 +463,7 @@ ftl_chunk_persist_free_state(struct ftl_nv_cache *nv_cache)
memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE); memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_FREE; p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_FREE;
p2l_map->chunk_dma_md->close_seq_id = 0;
p2l_map->chunk_dma_md->p2l_map_checksum = 0; p2l_map->chunk_dma_md->p2l_map_checksum = 0;
ftl_md_persist_entry(md, get_chunk_idx(chunk), p2l_map->chunk_dma_md, NULL, ftl_md_persist_entry(md, get_chunk_idx(chunk), p2l_map->chunk_dma_md, NULL,
@ -539,6 +543,7 @@ static void
compaction_process_pin_lba(struct ftl_nv_cache_compactor *comp) compaction_process_pin_lba(struct ftl_nv_cache_compactor *comp)
{ {
union ftl_md_vss *md; union ftl_md_vss *md;
struct ftl_nv_cache_chunk *chunk = comp->rd->owner.priv;
struct spdk_ftl_dev *dev = comp->rd->dev; struct spdk_ftl_dev *dev = comp->rd->dev;
uint64_t i; uint64_t i;
uint32_t count = comp->rd->iter.count; uint32_t count = comp->rd->iter.count;
@ -553,7 +558,7 @@ compaction_process_pin_lba(struct ftl_nv_cache_compactor *comp)
entry = &comp->rd->entries[i]; entry = &comp->rd->entries[i];
pin_ctx = &entry->l2p_pin_ctx; pin_ctx = &entry->l2p_pin_ctx;
md = entry->io_md; md = entry->io_md;
if (md->nv_cache.lba == FTL_LBA_INVALID) { if (md->nv_cache.lba == FTL_LBA_INVALID || md->nv_cache.seq_id != chunk->md->seq_id) {
ftl_l2p_pin_skip(dev, compaction_process_pin_lba_cb, comp, pin_ctx); ftl_l2p_pin_skip(dev, compaction_process_pin_lba_cb, comp, pin_ctx);
} else { } else {
ftl_l2p_pin(dev, md->nv_cache.lba, 1, compaction_process_pin_lba_cb, comp, pin_ctx); ftl_l2p_pin(dev, md->nv_cache.lba, 1, compaction_process_pin_lba_cb, comp, pin_ctx);
@ -697,6 +702,7 @@ compaction_process_pad(struct ftl_nv_cache_compactor *compactor)
iter->addr = FTL_ADDR_INVALID; iter->addr = FTL_ADDR_INVALID;
iter->owner.priv = NULL; iter->owner.priv = NULL;
iter->lba = FTL_LBA_INVALID; iter->lba = FTL_LBA_INVALID;
iter->seq_id = 0;
iter++; iter++;
wr->iter.idx++; wr->iter.idx++;
} }
@ -858,7 +864,7 @@ compaction_process_finish_read(struct ftl_nv_cache_compactor *compactor)
while (wr->iter.idx < num_entries && rd->iter.idx < rd->iter.count) { while (wr->iter.idx < num_entries && rd->iter.idx < rd->iter.count) {
/* Get metadata */ /* Get metadata */
md = rd->entries[rd->iter.idx].io_md; md = rd->entries[rd->iter.idx].io_md;
if (md->nv_cache.lba == FTL_LBA_INVALID) { if (md->nv_cache.lba == FTL_LBA_INVALID || md->nv_cache.seq_id != chunk->md->seq_id) {
cache_addr++; cache_addr++;
rd->iter.idx++; rd->iter.idx++;
chunk_compaction_advance(chunk, 1); chunk_compaction_advance(chunk, 1);
@ -878,6 +884,7 @@ compaction_process_finish_read(struct ftl_nv_cache_compactor *compactor)
iter->addr = current_addr; iter->addr = current_addr;
iter->owner.priv = chunk; iter->owner.priv = chunk;
iter->lba = md->nv_cache.lba; iter->lba = md->nv_cache.lba;
iter->seq_id = chunk->md->seq_id;
/* Advance within batch */ /* Advance within batch */
iter++; iter++;
@ -1214,6 +1221,7 @@ ftl_nv_cache_process(struct spdk_ftl_dev *dev)
TAILQ_REMOVE(&nv_cache->chunk_free_list, chunk, entry); TAILQ_REMOVE(&nv_cache->chunk_free_list, chunk, entry);
TAILQ_INSERT_TAIL(&nv_cache->chunk_open_list, chunk, entry); TAILQ_INSERT_TAIL(&nv_cache->chunk_open_list, chunk, entry);
nv_cache->chunk_free_count--; nv_cache->chunk_free_count--;
chunk->md->seq_id = ftl_get_next_seq_id(dev);
ftl_chunk_open(chunk); ftl_chunk_open(chunk);
} }
@ -1316,6 +1324,52 @@ ftl_nv_cache_save_state(struct ftl_nv_cache *nv_cache)
return status; return status;
} }
static int
sort_chunks_cmp(const void *a, const void *b)
{
struct ftl_nv_cache_chunk *a_chunk = *(struct ftl_nv_cache_chunk **)a;
struct ftl_nv_cache_chunk *b_chunk = *(struct ftl_nv_cache_chunk **)b;
return a_chunk->md->seq_id - b_chunk->md->seq_id;
}
static int
sort_chunks(struct ftl_nv_cache *nv_cache)
{
struct ftl_nv_cache_chunk **chunks_list;
struct ftl_nv_cache_chunk *chunk;
uint32_t i;
if (TAILQ_EMPTY(&nv_cache->chunk_full_list)) {
return 0;
}
chunks_list = calloc(nv_cache->chunk_full_count,
sizeof(chunks_list[0]));
if (!chunks_list) {
return -ENOMEM;
}
i = 0;
TAILQ_FOREACH(chunk, &nv_cache->chunk_full_list, entry) {
chunks_list[i] = chunk;
i++;
}
assert(i == nv_cache->chunk_full_count);
qsort(chunks_list, nv_cache->chunk_full_count, sizeof(chunks_list[0]),
sort_chunks_cmp);
TAILQ_INIT(&nv_cache->chunk_full_list);
for (i = 0; i < nv_cache->chunk_full_count; i++) {
chunk = chunks_list[i];
TAILQ_INSERT_TAIL(&nv_cache->chunk_full_list, chunk, entry);
}
free(chunks_list);
return 0;
}
static int static int
chunk_alloc_p2l_map(struct ftl_nv_cache_chunk *chunk) chunk_alloc_p2l_map(struct ftl_nv_cache_chunk *chunk)
{ {
@ -1398,6 +1452,11 @@ ftl_nv_cache_load_state(struct ftl_nv_cache *nv_cache)
goto error; goto error;
} }
status = sort_chunks(nv_cache);
if (status) {
FTL_ERRLOG(dev, "FTL NV Cache: sorting chunks ERROR\n");
}
FTL_NOTICELOG(dev, "FTL NV Cache: full chunks = %lu, empty chunks = %lu\n", FTL_NOTICELOG(dev, "FTL NV Cache: full chunks = %lu, empty chunks = %lu\n",
nv_cache->chunk_full_count, nv_cache->chunk_free_count); nv_cache->chunk_full_count, nv_cache->chunk_free_count);
@ -1411,6 +1470,26 @@ error:
return status; return status;
} }
void
ftl_nv_cache_get_max_seq_id(struct ftl_nv_cache *nv_cache, uint64_t *open_seq_id,
uint64_t *close_seq_id)
{
uint64_t i, o_seq_id = 0, c_seq_id = 0;
struct ftl_nv_cache_chunk *chunk;
chunk = nv_cache->chunks;
assert(chunk);
/* Iterate over chunks and get their max open and close seq id */
for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
o_seq_id = spdk_max(o_seq_id, chunk->md->seq_id);
c_seq_id = spdk_max(c_seq_id, chunk->md->close_seq_id);
}
*open_seq_id = o_seq_id;
*close_seq_id = c_seq_id;
}
typedef void (*ftl_chunk_ops_cb)(struct ftl_nv_cache_chunk *chunk, void *cntx, bool status); typedef void (*ftl_chunk_ops_cb)(struct ftl_nv_cache_chunk *chunk, void *cntx, bool status);
static void static void
@ -1530,6 +1609,8 @@ chunk_close_cb(int status, void *ctx)
TAILQ_INSERT_TAIL(&chunk->nv_cache->chunk_full_list, chunk, entry); TAILQ_INSERT_TAIL(&chunk->nv_cache->chunk_full_list, chunk, entry);
chunk->nv_cache->chunk_full_count++; chunk->nv_cache->chunk_full_count++;
chunk->nv_cache->last_seq_id = chunk->md->close_seq_id;
chunk->md->state = FTL_CHUNK_STATE_CLOSED; chunk->md->state = FTL_CHUNK_STATE_CLOSED;
} else { } else {
ftl_md_persist_entry_retry(&chunk->md_persist_entry_ctx); ftl_md_persist_entry_retry(&chunk->md_persist_entry_ctx);
@ -1569,6 +1650,7 @@ ftl_chunk_close(struct ftl_nv_cache_chunk *chunk)
struct ftl_basic_rq *brq = &chunk->metadata_rq; struct ftl_basic_rq *brq = &chunk->metadata_rq;
void *metadata = chunk->p2l_map.chunk_map; void *metadata = chunk->p2l_map.chunk_map;
chunk->md->close_seq_id = ftl_get_next_seq_id(dev);
ftl_basic_rq_init(dev, brq, metadata, chunk->nv_cache->tail_md_chunk_blocks); ftl_basic_rq_init(dev, brq, metadata, chunk->nv_cache->tail_md_chunk_blocks);
ftl_basic_rq_set_owner(brq, chunk_map_write_cb, chunk); ftl_basic_rq_set_owner(brq, chunk_map_write_cb, chunk);

View File

@ -39,6 +39,12 @@ enum ftl_chunk_state {
}; };
struct ftl_nv_cache_chunk_md { struct ftl_nv_cache_chunk_md {
/* Sequence id of writing */
uint64_t seq_id;
/* Sequence ID when chunk was closed */
uint64_t close_seq_id;
/* Current lba to write */ /* Current lba to write */
uint32_t write_pointer; uint32_t write_pointer;
@ -161,6 +167,8 @@ struct ftl_nv_cache {
uint64_t chunk_compaction_threshold; uint64_t chunk_compaction_threshold;
struct ftl_nv_cache_chunk *chunks; struct ftl_nv_cache_chunk *chunks;
uint64_t last_seq_id;
}; };
int ftl_nv_cache_init(struct spdk_ftl_dev *dev); int ftl_nv_cache_init(struct spdk_ftl_dev *dev);
@ -197,6 +205,15 @@ bool ftl_nv_cache_is_halted(struct ftl_nv_cache *nv_cache);
size_t ftl_nv_cache_chunk_tail_md_num_blocks(const struct ftl_nv_cache *nv_cache); size_t ftl_nv_cache_chunk_tail_md_num_blocks(const struct ftl_nv_cache *nv_cache);
uint64_t chunk_tail_md_offset(struct ftl_nv_cache *nv_cache); uint64_t chunk_tail_md_offset(struct ftl_nv_cache *nv_cache);
/**
* @brief Iterates over NV caches chunks and returns the max open and closed sequence id
*
* @param nv_cache FLT NV cache
* @param[out] open_seq_id Max detected open sequence id
* @param[out] close_seq_id Max detected close sequence id
*/
void ftl_nv_cache_get_max_seq_id(struct ftl_nv_cache *nv_cache, uint64_t *open_seq_id,
uint64_t *close_seq_id);
typedef int (*ftl_chunk_md_cb)(struct ftl_nv_cache_chunk *chunk, void *cntx); typedef int (*ftl_chunk_md_cb)(struct ftl_nv_cache_chunk *chunk, void *cntx);

View File

@ -294,9 +294,10 @@ move_advance_rq(struct ftl_rq *rq)
assert(offset < ftl_get_num_blocks_in_band(band->dev)); assert(offset < ftl_get_num_blocks_in_band(band->dev));
assert(ftl_band_block_offset_valid(band, offset)); assert(ftl_band_block_offset_valid(band, offset));
entry->lba = band->p2l_map.band_map[offset]; entry->lba = band->p2l_map.band_map[offset].lba;
entry->addr = rq->io.addr; entry->addr = rq->io.addr;
entry->owner.priv = band; entry->owner.priv = band;
entry->seq_id = band->p2l_map.band_map[offset].seq_id;
entry++; entry++;
rq->io.addr = ftl_band_next_addr(band, rq->io.addr, 1); rq->io.addr = ftl_band_next_addr(band, rq->io.addr, 1);
@ -323,6 +324,7 @@ move_init_entries(struct ftl_rq *rq, uint64_t idx, uint64_t count)
iter->addr = FTL_ADDR_INVALID; iter->addr = FTL_ADDR_INVALID;
iter->owner.priv = NULL; iter->owner.priv = NULL;
iter->lba = FTL_LBA_INVALID; iter->lba = FTL_LBA_INVALID;
iter->seq_id = 0;
iter++; iter++;
i++; i++;
} }
@ -360,6 +362,7 @@ move_rq_pad(struct ftl_rq *rq, struct ftl_band *band)
entry->addr = rq->io.addr; entry->addr = rq->io.addr;
entry->owner.priv = band; entry->owner.priv = band;
entry->lba = FTL_LBA_INVALID; entry->lba = FTL_LBA_INVALID;
entry->seq_id = 0;
entry++; entry++;
rq->io.addr = ftl_band_next_addr(band, rq->io.addr, 1); rq->io.addr = ftl_band_next_addr(band, rq->io.addr, 1);
band->owner.cnt++; band->owner.cnt++;

View File

@ -62,6 +62,7 @@ ftl_rq_new(struct spdk_ftl_dev *dev, uint32_t io_md_size)
entry->addr = FTL_ADDR_INVALID; entry->addr = FTL_ADDR_INVALID;
entry->lba = FTL_LBA_INVALID; entry->lba = FTL_LBA_INVALID;
entry->io_payload = io_payload; entry->io_payload = io_payload;
entry->seq_id = 0;
if (io_md_size) { if (io_md_size) {
entry->io_md = io_md; entry->io_md = io_md;

View File

@ -59,6 +59,7 @@ struct ftl_superblock_shm {
bool in_progress; bool in_progress;
uint64_t start_lba; uint64_t start_lba;
uint64_t num_blocks; uint64_t num_blocks;
uint64_t seq_id;
} trim; } trim;
struct ftl_superblock_gc_info gc_info; struct ftl_superblock_gc_info gc_info;

View File

@ -17,6 +17,9 @@ struct ftl_superblock {
struct spdk_uuid uuid; struct spdk_uuid uuid;
/* Current sequence number */
uint64_t seq_id;
/* Flag describing clean shutdown */ /* Flag describing clean shutdown */
uint64_t clean; uint64_t clean;
@ -29,9 +32,11 @@ struct ftl_superblock {
/* Maximum IO depth per band relocate */ /* Maximum IO depth per band relocate */
uint64_t max_reloc_qdepth; uint64_t max_reloc_qdepth;
/* Reserved field */ /* Reserved fields */
uint64_t reserved; uint64_t reserved;
bool reserved3;
uint32_t reserved2; uint32_t reserved2;
struct ftl_superblock_gc_info gc_info; struct ftl_superblock_gc_info gc_info;

View File

@ -47,6 +47,7 @@ ftl_writer_band_state_change(struct ftl_band *band)
assert(writer->num_bands > 0); assert(writer->num_bands > 0);
writer->num_bands--; writer->num_bands--;
ftl_band_clear_owner(band, ftl_writer_band_state_change, writer); ftl_band_clear_owner(band, ftl_writer_band_state_change, writer);
writer->last_seq_id = band->md->close_seq_id;
break; break;
default: default:

View File

@ -36,6 +36,8 @@ struct ftl_writer {
/* Which type of band the writer uses */ /* Which type of band the writer uses */
enum ftl_band_type writer_type; enum ftl_band_type writer_type;
uint64_t last_seq_id;
}; };
bool ftl_writer_is_halted(struct ftl_writer *writer); bool ftl_writer_is_halted(struct ftl_writer *writer);

View File

@ -199,6 +199,44 @@ ftl_mngt_decorate_bands(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
ftl_mngt_next_step(mngt); ftl_mngt_next_step(mngt);
} }
void
ftl_recover_max_seq(struct spdk_ftl_dev *dev)
{
struct ftl_band *band;
size_t band_close_seq_id = 0, band_open_seq_id = 0;
size_t chunk_close_seq_id = 0, chunk_open_seq_id = 0;
size_t max = 0;
TAILQ_FOREACH(band, &dev->shut_bands, queue_entry) {
band_open_seq_id = spdk_max(band_open_seq_id, band->md->seq);
band_close_seq_id = spdk_max(band_close_seq_id, band->md->close_seq_id);
}
ftl_nv_cache_get_max_seq_id(&dev->nv_cache, &chunk_open_seq_id, &chunk_close_seq_id);
dev->nv_cache.last_seq_id = chunk_close_seq_id;
dev->writer_gc.last_seq_id = band_close_seq_id;
dev->writer_user.last_seq_id = band_close_seq_id;
max = spdk_max(max, band_open_seq_id);
max = spdk_max(max, band_close_seq_id);
max = spdk_max(max, chunk_open_seq_id);
max = spdk_max(max, chunk_close_seq_id);
dev->sb->seq_id = max;
}
static int
_band_cmp(const void *_a, const void *_b)
{
struct ftl_band *a, *b;
a = *((struct ftl_band **)_a);
b = *((struct ftl_band **)_b);
return a->md->seq - b->md->seq;
}
static struct ftl_band * static struct ftl_band *
next_high_prio_band(struct spdk_ftl_dev *dev) next_high_prio_band(struct spdk_ftl_dev *dev)
{ {
@ -267,6 +305,8 @@ ftl_mngt_finalize_init_bands(struct spdk_ftl_dev *dev, struct ftl_mngt_process *
uint64_t offset; uint64_t offset;
bool fast_startup = ftl_fast_startup(dev); bool fast_startup = ftl_fast_startup(dev);
ftl_recover_max_seq(dev);
TAILQ_FOREACH_SAFE(band, &dev->free_bands, queue_entry, temp_band) { TAILQ_FOREACH_SAFE(band, &dev->free_bands, queue_entry, temp_band) {
band->md->df_p2l_map = FTL_DF_OBJ_ID_INVALID; band->md->df_p2l_map = FTL_DF_OBJ_ID_INVALID;
} }
@ -293,6 +333,8 @@ ftl_mngt_finalize_init_bands(struct spdk_ftl_dev *dev, struct ftl_mngt_process *
} }
/* Assign open bands to writers and alloc necessary resources */ /* Assign open bands to writers and alloc necessary resources */
qsort(open_bands, num_open, sizeof(open_bands[0]), _band_cmp);
for (i = 0; i < num_open; ++i) { for (i = 0; i < num_open; ++i) {
band = open_bands[i]; band = open_bands[i];

View File

@ -162,7 +162,8 @@ ftl_mngt_scrub_nv_cache(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
FTL_NOTICELOG(dev, "Scrubbing %lluGiB\n", region->current.blocks * FTL_BLOCK_SIZE / GiB); FTL_NOTICELOG(dev, "Scrubbing %lluGiB\n", region->current.blocks * FTL_BLOCK_SIZE / GiB);
/* Need to scrub user data, so in case of dirty shutdown the recovery won't /* Need to scrub user data, so in case of dirty shutdown the recovery won't
* pull in data during open chunks recovery from any previous instance * pull in data during open chunks recovery from any previous instance (since during short
* tests it's very likely that chunks seq_id will be in line between new head md and old VSS)
*/ */
md->cb = user_clear_cb; md->cb = user_clear_cb;
md->owner.cb_ctx = mngt; md->owner.cb_ctx = mngt;

View File

@ -119,10 +119,12 @@ union ftl_md_vss {
struct { struct {
uint64_t start_lba; uint64_t start_lba;
uint64_t num_blocks; uint64_t num_blocks;
uint64_t seq_id;
} unmap; } unmap;
struct { struct {
uint64_t lba; uint64_t lba;
uint64_t seq_id;
} nv_cache; } nv_cache;
}; };

View File

@ -256,14 +256,14 @@ test_band_set_addr(void)
ftl_band_set_addr(g_band, TEST_LBA, addr); ftl_band_set_addr(g_band, TEST_LBA, addr);
CU_ASSERT_EQUAL(p2l_map->num_valid, 1); CU_ASSERT_EQUAL(p2l_map->num_valid, 1);
CU_ASSERT_EQUAL(p2l_map->band_map[offset], TEST_LBA); CU_ASSERT_EQUAL(p2l_map->band_map[offset].lba, TEST_LBA);
CU_ASSERT_TRUE(ftl_bitmap_get(p2l_map->valid, offset)); CU_ASSERT_TRUE(ftl_bitmap_get(p2l_map->valid, offset));
addr += g_geo.zone_size / 2; addr += g_geo.zone_size / 2;
offset = test_offset_from_addr(addr, g_band); offset = test_offset_from_addr(addr, g_band);
ftl_band_set_addr(g_band, TEST_LBA + 1, addr); ftl_band_set_addr(g_band, TEST_LBA + 1, addr);
CU_ASSERT_EQUAL(p2l_map->num_valid, 2); CU_ASSERT_EQUAL(p2l_map->num_valid, 2);
CU_ASSERT_EQUAL(p2l_map->band_map[offset], TEST_LBA + 1); CU_ASSERT_EQUAL(p2l_map->band_map[offset].lba, TEST_LBA + 1);
CU_ASSERT_TRUE(ftl_bitmap_get(p2l_map->valid, offset)); CU_ASSERT_TRUE(ftl_bitmap_get(p2l_map->valid, offset));
addr -= g_geo.zone_size / 2; addr -= g_geo.zone_size / 2;
offset = test_offset_from_addr(addr, g_band); offset = test_offset_from_addr(addr, g_band);