FTL: Add helper L2P set/get functions for nv_cache

Signed-off-by: Kozlowski Mateusz <mateusz.kozlowski@intel.com>
Signed-off-by: Artur Paszkiewicz <artur.paszkiewicz@intel.com>
Change-Id: I61ed4434283c21d7dc62b70898f920e66b595a4f
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/13321
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Kozlowski Mateusz 2022-06-13 11:07:48 +02:00 committed by Tomasz Zawadzki
parent 506315a651
commit 4a24a7b3e0
4 changed files with 120 additions and 17 deletions

View File

@ -83,9 +83,6 @@ struct spdk_ftl_dev {
/* Logical -> physical table */ /* Logical -> physical table */
void *l2p; void *l2p;
/* l2p deferred pins list */
TAILQ_HEAD(, ftl_l2p_pin_ctx) l2p_deferred_pins;
/* Size of the l2p table */ /* Size of the l2p table */
uint64_t num_lbas; uint64_t num_lbas;

View File

@ -16,7 +16,6 @@
int int
ftl_l2p_init(struct spdk_ftl_dev *dev) ftl_l2p_init(struct spdk_ftl_dev *dev)
{ {
TAILQ_INIT(&dev->l2p_deferred_pins);
return FTL_L2P_OP(init)(dev); return FTL_L2P_OP(init)(dev);
} }
@ -79,24 +78,12 @@ ftl_l2p_clear(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx)
void void
ftl_l2p_process(struct spdk_ftl_dev *dev) ftl_l2p_process(struct spdk_ftl_dev *dev)
{ {
struct ftl_l2p_pin_ctx *pin_ctx;
pin_ctx = TAILQ_FIRST(&dev->l2p_deferred_pins);
if (pin_ctx) {
TAILQ_REMOVE(&dev->l2p_deferred_pins, pin_ctx, link);
FTL_L2P_OP(pin)(dev, pin_ctx);
}
FTL_L2P_OP(process)(dev); FTL_L2P_OP(process)(dev);
} }
bool bool
ftl_l2p_is_halted(struct spdk_ftl_dev *dev) ftl_l2p_is_halted(struct spdk_ftl_dev *dev)
{ {
if (!TAILQ_EMPTY(&dev->l2p_deferred_pins)) {
return false;
}
return FTL_L2P_OP(is_halted)(dev); return FTL_L2P_OP(is_halted)(dev);
} }
@ -106,6 +93,59 @@ ftl_l2p_halt(struct spdk_ftl_dev *dev)
return FTL_L2P_OP(halt)(dev); return FTL_L2P_OP(halt)(dev);
} }
void
ftl_l2p_update_cache(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr new_addr, ftl_addr old_addr)
{
struct ftl_nv_cache_chunk *current_chunk, *new_chunk;
ftl_addr current_addr;
/* Updating L2P for data in cache device - used by user writes.
* Split off from updating L2P in base due to extra edge cases for handling dirty shutdown in the cache case,
* namely keeping two simultaneous writes to same LBA consistent before/after shutdown - on base device we
* can simply ignore the L2P update, here we need to keep the address with more advanced write pointer
*/
assert(ftl_check_core_thread(dev));
assert(new_addr != FTL_ADDR_INVALID);
assert(ftl_addr_in_nvc(dev, new_addr));
current_addr = ftl_l2p_get(dev, lba);
if (current_addr != FTL_ADDR_INVALID) {
/* Check if write-after-write happened (two simultaneous user writes to the same LBA) */
if (spdk_unlikely(current_addr != old_addr
&& ftl_addr_in_nvc(dev, current_addr))) {
current_chunk = ftl_nv_cache_get_chunk_from_addr(dev, current_addr);
new_chunk = ftl_nv_cache_get_chunk_from_addr(dev, new_addr);
/* To keep data consistency after recovery skip oldest block */
/* If both user writes are to the same chunk, the highest address should 'win', to keep data after
* dirty shutdown recovery consistent. If they're on different chunks, then higher seq_id chunk 'wins' */
if (current_chunk == new_chunk) {
if (new_addr < current_addr) {
return;
}
}
}
/* For recovery from SHM case valid maps need to be set before l2p set and
* invalidated after it */
/* DO NOT CHANGE ORDER - START */
ftl_nv_cache_set_addr(dev, lba, new_addr);
ftl_l2p_set(dev, lba, new_addr);
ftl_invalidate_addr(dev, current_addr);
/* DO NOT CHANGE ORDER - END */
return;
}
/* If current address doesn't have any value (ie. it was never set, or it was trimmed), then we can just set L2P */
/* DO NOT CHANGE ORDER - START (need to set P2L maps/valid map first) */
ftl_nv_cache_set_addr(dev, lba, new_addr);
ftl_l2p_set(dev, lba, new_addr);
/* DO NOT CHANGE ORDER - END */
}
void void
ftl_l2p_update_base(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr new_addr, ftl_addr old_addr) ftl_l2p_update_base(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr new_addr, ftl_addr old_addr)
{ {
@ -142,7 +182,8 @@ void
ftl_l2p_pin_complete(struct spdk_ftl_dev *dev, int status, struct ftl_l2p_pin_ctx *pin_ctx) ftl_l2p_pin_complete(struct spdk_ftl_dev *dev, int status, struct ftl_l2p_pin_ctx *pin_ctx)
{ {
if (spdk_unlikely(status == -EAGAIN)) { if (spdk_unlikely(status == -EAGAIN)) {
TAILQ_INSERT_TAIL(&dev->l2p_deferred_pins, pin_ctx, link); /* Path updated in later patch */
assert(false);
} else { } else {
pin_ctx->cb(dev, status, pin_ctx); pin_ctx->cb(dev, status, pin_ctx);
} }

View File

@ -283,6 +283,62 @@ ftl_nv_cache_is_halted(struct ftl_nv_cache *nv_cache)
return true; return true;
} }
void
ftl_chunk_map_set_lba(struct ftl_nv_cache_chunk *chunk,
uint64_t offset, uint64_t lba)
{
struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
ftl_lba_store(dev, p2l_map->chunk_map, offset, lba);
}
uint64_t
ftl_chunk_map_get_lba(struct ftl_nv_cache_chunk *chunk, uint64_t offset)
{
struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
return ftl_lba_load(dev, p2l_map->chunk_map, offset);
}
static void
ftl_chunk_set_addr(struct ftl_nv_cache_chunk *chunk, uint64_t lba, ftl_addr addr)
{
struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
uint64_t cache_offset = ftl_addr_to_nvc_offset(dev, addr);
uint64_t offset;
offset = (cache_offset - chunk->offset) % chunk->nv_cache->chunk_blocks;
ftl_chunk_map_set_lba(chunk, offset, lba);
}
struct ftl_nv_cache_chunk *
ftl_nv_cache_get_chunk_from_addr(struct spdk_ftl_dev *dev, ftl_addr addr)
{
struct ftl_nv_cache_chunk *chunk = dev->nv_cache.chunks;
uint64_t chunk_idx;
uint64_t cache_offset = ftl_addr_to_nvc_offset(dev, addr);
assert(chunk != NULL);
chunk_idx = (cache_offset - chunk->offset) / chunk->nv_cache->chunk_blocks;
chunk += chunk_idx;
return chunk;
}
void
ftl_nv_cache_set_addr(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr addr)
{
struct ftl_nv_cache_chunk *chunk;
chunk = ftl_nv_cache_get_chunk_from_addr(dev, addr);
assert(lba != FTL_LBA_INVALID);
ftl_chunk_set_addr(chunk, lba, addr);
}
static void ftl_chunk_open(struct ftl_nv_cache_chunk *chunk); static void ftl_chunk_open(struct ftl_nv_cache_chunk *chunk);
void void

View File

@ -137,6 +137,12 @@ int ftl_nv_cache_read(struct ftl_io *io, ftl_addr addr, uint32_t num_blocks,
bool ftl_nv_cache_full(struct ftl_nv_cache *nv_cache); bool ftl_nv_cache_full(struct ftl_nv_cache *nv_cache);
void ftl_nv_cache_process(struct spdk_ftl_dev *dev); void ftl_nv_cache_process(struct spdk_ftl_dev *dev);
void ftl_chunk_map_set_lba(struct ftl_nv_cache_chunk *chunk,
uint64_t offset, uint64_t lba);
uint64_t ftl_chunk_map_get_lba(struct ftl_nv_cache_chunk *chunk, uint64_t offset);
void ftl_nv_cache_set_addr(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr addr);
int ftl_nv_cache_save_state(struct ftl_nv_cache *nv_cache); int ftl_nv_cache_save_state(struct ftl_nv_cache *nv_cache);
void ftl_nv_cache_halt(struct ftl_nv_cache *nv_cache); void ftl_nv_cache_halt(struct ftl_nv_cache *nv_cache);
@ -157,4 +163,7 @@ uint64_t chunk_tail_md_offset(struct ftl_nv_cache *nv_cache);
typedef int (*ftl_chunk_md_cb)(struct ftl_nv_cache_chunk *chunk, void *cntx); typedef int (*ftl_chunk_md_cb)(struct ftl_nv_cache_chunk *chunk, void *cntx);
struct ftl_nv_cache_chunk *ftl_nv_cache_get_chunk_from_addr(struct spdk_ftl_dev *dev,
ftl_addr addr);
#endif /* FTL_NV_CACHE_H */ #endif /* FTL_NV_CACHE_H */