FTL: L2P chunk recovery

Recover L2P from chunks' P2L.

Signed-off-by: Kozlowski Mateusz <mateusz.kozlowski@intel.com>
Signed-off-by: Artur Paszkiewicz <artur.paszkiewicz@intel.com>
Change-Id: I039cfc54374fad0ba584d6029b752ca2f31925cf
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/13374
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Mellanox Build Bot
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Kozlowski Mateusz 2022-05-30 10:50:53 +02:00 committed by Jim Harris
parent d1462266ce
commit 5c5587d805
3 changed files with 244 additions and 0 deletions

View File

@ -1546,6 +1546,37 @@ ftl_chunk_basic_rq_write(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *
dev->io_activity_total += brq->num_blocks;
}
static void
read_brq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
{
struct ftl_basic_rq *brq = arg;
brq->success = success;
brq->owner.cb(brq);
spdk_bdev_free_io(bdev_io);
}
static int
ftl_chunk_basic_rq_read(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq)
{
struct ftl_nv_cache *nv_cache = chunk->nv_cache;
struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
int rc;
brq->io.chunk = chunk;
brq->success = false;
rc = ftl_nv_cache_bdev_read_blocks_with_md(dev, nv_cache->bdev_desc, nv_cache->cache_ioch,
brq->io_payload, NULL, brq->io.addr, brq->num_blocks, read_brq_end, brq);
if (spdk_likely(!rc)) {
dev->io_activity_total += brq->num_blocks;
}
return rc;
}
static void
chunk_open_cb(int status, void *ctx)
{
@ -1660,6 +1691,156 @@ ftl_chunk_close(struct ftl_nv_cache_chunk *chunk)
ftl_chunk_basic_rq_write(chunk, brq);
}
static int
ftl_chunk_read_tail_md(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq,
void (*cb)(struct ftl_basic_rq *brq), void *cb_ctx)
{
struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
void *metadata;
int rc;
metadata = chunk->p2l_map.chunk_map;
ftl_basic_rq_init(dev, brq, metadata, chunk->nv_cache->tail_md_chunk_blocks);
ftl_basic_rq_set_owner(brq, cb, cb_ctx);
brq->io.addr = chunk->offset + chunk_tail_md_offset(chunk->nv_cache);
rc = ftl_chunk_basic_rq_read(chunk, brq);
return rc;
}
struct restore_chunk_md_ctx {
ftl_chunk_md_cb cb;
void *cb_ctx;
int status;
uint64_t qd;
uint64_t id;
};
static inline bool
is_chunk_count_valid(struct ftl_nv_cache *nv_cache)
{
uint64_t chunk_count = 0;
chunk_count += nv_cache->chunk_open_count;
chunk_count += nv_cache->chunk_free_count;
chunk_count += nv_cache->chunk_full_count;
chunk_count += nv_cache->chunk_comp_count;
return chunk_count == nv_cache->chunk_count;
}
static void
walk_tail_md_cb(struct ftl_basic_rq *brq)
{
struct ftl_mngt_process *mngt = brq->owner.priv;
struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
struct restore_chunk_md_ctx *ctx = ftl_mngt_get_step_ctx(mngt);
int rc = 0;
if (brq->success) {
rc = ctx->cb(chunk, ctx->cb_ctx);
} else {
rc = -EIO;
}
if (rc) {
ctx->status = rc;
}
ctx->qd--;
chunk_free_p2l_map(chunk);
ftl_mngt_continue_step(mngt);
}
static void
ftl_mngt_nv_cache_walk_tail_md(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt,
uint64_t seq_id, ftl_chunk_md_cb cb, void *cb_ctx)
{
struct ftl_nv_cache *nvc = &dev->nv_cache;
struct restore_chunk_md_ctx *ctx;
ctx = ftl_mngt_get_step_ctx(mngt);
if (!ctx) {
if (ftl_mngt_alloc_step_ctx(mngt, sizeof(*ctx))) {
ftl_mngt_fail_step(mngt);
return;
}
ctx = ftl_mngt_get_step_ctx(mngt);
assert(ctx);
ctx->cb = cb;
ctx->cb_ctx = cb_ctx;
}
/*
* This function generates a high queue depth and will utilize ftl_mngt_continue_step during completions to make sure all chunks
* are processed before returning an error (if any were found) or continuing on.
*/
if (0 == ctx->qd && ctx->id == nvc->chunk_count) {
if (!is_chunk_count_valid(nvc)) {
FTL_ERRLOG(dev, "Recovery ERROR, invalid number of chunk\n");
assert(false);
ctx->status = -EINVAL;
}
if (ctx->status) {
ftl_mngt_fail_step(mngt);
} else {
ftl_mngt_next_step(mngt);
}
return;
}
while (ctx->id < nvc->chunk_count) {
struct ftl_nv_cache_chunk *chunk = &nvc->chunks[ctx->id];
int rc;
if (!chunk->recovery) {
/* This chunk is empty and not used in recovery */
ctx->id++;
continue;
}
if (seq_id && (chunk->md->close_seq_id <= seq_id)) {
ctx->id++;
continue;
}
if (chunk_alloc_p2l_map(chunk)) {
/* No more free P2L map, break and continue later */
break;
}
ctx->id++;
rc = ftl_chunk_read_tail_md(chunk, &chunk->metadata_rq, walk_tail_md_cb, mngt);
if (0 == rc) {
ctx->qd++;
} else {
chunk_free_p2l_map(chunk);
ctx->status = rc;
}
}
if (0 == ctx->qd) {
/*
* No QD could happen due to all leftover chunks being in free state.
* Additionally ftl_chunk_read_tail_md could fail starting with the first IO in a given patch.
* For streamlining of all potential error handling (since many chunks are reading P2L at the same time),
* we're using ftl_mngt_continue_step to arrive at the same spot of checking for mngt step end (see beginning of function).
*/
ftl_mngt_continue_step(mngt);
}
}
void
ftl_mngt_nv_cache_restore_l2p(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt,
ftl_chunk_md_cb cb, void *cb_ctx)
{
ftl_mngt_nv_cache_walk_tail_md(dev, mngt, dev->sb->ckpt_seq_id, cb, cb_ctx);
}
static void
restore_chunk_state_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
{

View File

@ -219,6 +219,9 @@ void ftl_mngt_nv_cache_restore_chunk_state(struct spdk_ftl_dev *dev, struct ftl_
typedef int (*ftl_chunk_md_cb)(struct ftl_nv_cache_chunk *chunk, void *cntx);
void ftl_mngt_nv_cache_restore_l2p(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt,
ftl_chunk_md_cb cb, void *cb_ctx);
struct ftl_nv_cache_chunk *ftl_nv_cache_get_chunk_from_addr(struct spdk_ftl_dev *dev,
ftl_addr addr);

View File

@ -472,6 +472,62 @@ ftl_mngt_recovery_iteration_restore_band_l2p(struct spdk_ftl_dev *dev,
ftl_mngt_recovery_walk_band_tail_md(dev, mngt, restore_band_l2p_cb);
}
static int
restore_chunk_l2p_cb(struct ftl_nv_cache_chunk *chunk, void *ctx)
{
struct ftl_mngt_recovery_ctx *pctx = ctx;
struct spdk_ftl_dev *dev;
struct ftl_nv_cache *nv_cache = chunk->nv_cache;
ftl_addr addr;
const uint64_t seq_id = chunk->md->seq_id;
uint64_t i, lba;
uint32_t chunk_map_crc;
dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
chunk_map_crc = spdk_crc32c_update(chunk->p2l_map.chunk_map,
ftl_nv_cache_chunk_tail_md_num_blocks(chunk->nv_cache) * FTL_BLOCK_SIZE, 0);
if (chunk->md->p2l_map_checksum != chunk_map_crc) {
return -1;
}
for (i = 0; i < nv_cache->chunk_blocks; ++i) {
uint64_t lba_off;
lba = ftl_chunk_map_get_lba(chunk, i);
if (lba == FTL_LBA_INVALID) {
continue;
}
if (lba >= dev->num_lbas) {
FTL_ERRLOG(dev, "L2P Chunk restore ERROR, LBA out of range\n");
return -1;
}
if (lba < pctx->iter.lba_first || lba >= pctx->iter.lba_last) {
continue;
}
lba_off = lba - pctx->iter.lba_first;
if (seq_id < pctx->l2p_snippet.seq_id[lba_off]) {
/* Newer data already recovered */
continue;
}
addr = ftl_addr_from_nvc_offset(dev, chunk->offset + i);
ftl_addr_store(dev, pctx->l2p_snippet.l2p, lba_off, addr);
pctx->l2p_snippet.seq_id[lba_off] = seq_id;
}
return 0;
}
static void
ftl_mngt_recovery_iteration_restore_chunk_l2p(struct spdk_ftl_dev *dev,
struct ftl_mngt_process *mngt)
{
ftl_mngt_nv_cache_restore_l2p(dev, mngt, restore_chunk_l2p_cb, ftl_mngt_get_caller_ctx(mngt));
}
static void
ftl_mngt_recovery_iteration_restore_valid_map(struct spdk_ftl_dev *dev,
struct ftl_mngt_process *mngt)
@ -661,6 +717,10 @@ static const struct ftl_mngt_process_desc g_desc_recovery_iteration = {
.name = "Initialize sequence IDs",
.action = ftl_mngt_recovery_iteration_init_seq_ids,
},
{
.name = "Restore chunk L2P",
.action = ftl_mngt_recovery_iteration_restore_chunk_l2p,
},
{
.name = "Restore band L2P",
.ctx_size = sizeof(struct band_md_ctx),