FTL: Add L2P cache eviction logic

Adds eviction of least recently used pages from the L2P cache - dirty
pages will be persisted.

Signed-off-by: Kozlowski Mateusz <mateusz.kozlowski@intel.com>
Signed-off-by: Artur Paszkiewicz <artur.paszkiewicz@intel.com>
Change-Id: Ic646f7e9da777d077b5cb9b409c3f03ef05b1273
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/13357
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Mellanox Build Bot
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Kozlowski Mateusz 2022-06-01 14:58:42 +02:00 committed by Ben Walker
parent 73f9b4f5fe
commit 94b7f8d82d

View File

@ -140,6 +140,7 @@ typedef void (*ftl_l2p_cache_sync_cb)(struct spdk_ftl_dev *dev, int status, void
static bool page_set_is_done(struct ftl_l2p_page_set *page_set);
static void page_set_end(struct spdk_ftl_dev *dev, struct ftl_l2p_cache *cache,
struct ftl_l2p_page_set *page_set);
static void page_out_io_retry(void *arg);
static void page_in_io_retry(void *arg);
static inline void
@ -216,6 +217,18 @@ ftl_l2p_cache_page_remove(struct ftl_l2p_cache *cache, struct ftl_l2p_page *page
ftl_mempool_put(cache->l2_ctx_pool, page);
}
static inline struct ftl_l2p_page *
ftl_l2p_cache_get_coldest_page(struct ftl_l2p_cache *cache)
{
return TAILQ_LAST(&cache->lru_list, l2p_lru_list);
}
static inline struct ftl_l2p_page *
ftl_l2p_cache_get_hotter_page(struct ftl_l2p_page *page)
{
return TAILQ_PREV(page, l2p_lru_list, list_entry);
}
static inline uint64_t
ftl_l2p_cache_page_get_bdev_offset(struct ftl_l2p_cache *cache,
struct ftl_l2p_page *page)
@ -258,6 +271,14 @@ ftl_l2p_cache_page_alloc(struct ftl_l2p_cache *cache, size_t page_no)
return page;
}
static inline bool
ftl_l2p_cache_page_can_remove(struct ftl_l2p_page *page)
{
return (!page->updates &&
page->state != L2P_CACHE_PAGE_INIT &&
!page->pin_ref_cnt);
}
static inline ftl_addr
ftl_l2p_cache_get_addr(struct spdk_ftl_dev *dev,
struct ftl_l2p_cache *cache, struct ftl_l2p_page *page, uint64_t lba)
@ -302,6 +323,21 @@ ftl_l2p_cache_page_unpin(struct ftl_l2p_cache *cache, struct ftl_l2p_page *page)
}
}
static inline bool
ftl_l2p_cache_page_can_evict(struct ftl_l2p_page *page)
{
return (page->state == L2P_CACHE_PAGE_FLUSHING ||
page->state == L2P_CACHE_PAGE_PERSISTING ||
page->state == L2P_CACHE_PAGE_INIT ||
page->pin_ref_cnt) ? false : true;
}
static bool
ftl_l2p_cache_evict_continue(struct ftl_l2p_cache *cache)
{
return cache->l2_pgs_avail + cache->l2_pgs_evicting < cache->evict_keep;
}
static void *
_ftl_l2p_cache_init(struct spdk_ftl_dev *dev, size_t addr_size, uint64_t l2p_size)
{
@ -900,6 +936,143 @@ ftl_l2p_cache_process_page_sets(struct spdk_ftl_dev *dev, struct ftl_l2p_cache *
return 0;
}
static struct ftl_l2p_page *
eviction_get_page(struct spdk_ftl_dev *dev, struct ftl_l2p_cache *cache)
{
uint64_t i = 0;
struct ftl_l2p_page *page = ftl_l2p_cache_get_coldest_page(cache);
while (page) {
ftl_bug(L2P_CACHE_PAGE_READY != page->state);
ftl_bug(page->pin_ref_cnt);
if (ftl_l2p_cache_page_can_evict(page)) {
ftl_l2p_cache_lru_remove_page(cache, page);
return page;
}
/*
* Practically only one iteration is needed to find a page. It is because
* the rank of pages contains only ready and unpinned pages
*/
ftl_bug(++i > 1024);
page = ftl_l2p_cache_get_hotter_page(page);
}
return NULL;
}
static void
page_out_io_complete(struct spdk_ftl_dev *dev, struct ftl_l2p_cache *cache,
struct ftl_l2p_page *page, bool success)
{
cache->l2_pgs_evicting--;
ftl_bug(page->ctx.updates > page->updates);
ftl_bug(!TAILQ_EMPTY(&page->ppe_list));
ftl_bug(page->on_lru_list);
if (spdk_likely(success)) {
page->updates -= page->ctx.updates;
}
if (success && ftl_l2p_cache_page_can_remove(page)) {
ftl_l2p_cache_page_remove(cache, page);
} else {
if (!page->pin_ref_cnt) {
ftl_l2p_cache_lru_add_page(cache, page);
}
page->state = L2P_CACHE_PAGE_READY;
}
}
static void
page_out_io_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
{
struct ftl_l2p_page *page = cb_arg;
struct ftl_l2p_cache *cache = page->ctx.cache;
struct spdk_ftl_dev *dev = cache->dev;
spdk_bdev_free_io(bdev_io);
page_out_io_complete(dev, cache, page, success);
}
static void
page_out_io(struct spdk_ftl_dev *dev, struct ftl_l2p_cache *cache,
struct ftl_l2p_page *page)
{
struct spdk_io_channel *ioch;
struct spdk_bdev *bdev;
struct spdk_bdev_io_wait_entry *bdev_io_wait;
int rc;
page->ctx.cache = cache;
rc = ftl_nv_cache_bdev_write_blocks_with_md(dev, ftl_l2p_cache_get_bdev_desc(cache),
ftl_l2p_cache_get_bdev_iochannel(cache),
page->page_buffer, NULL, ftl_l2p_cache_page_get_bdev_offset(cache, page),
1, page_out_io_cb, page);
cache->l2_pgs_evicting++;
if (spdk_likely(0 == rc)) {
return;
}
if (rc == -ENOMEM) {
ioch = ftl_l2p_cache_get_bdev_iochannel(cache);
bdev = spdk_bdev_desc_get_bdev(ftl_l2p_cache_get_bdev_desc(cache));
bdev_io_wait = &page->ctx.bdev_io_wait;
bdev_io_wait->bdev = bdev;
bdev_io_wait->cb_fn = page_out_io_retry;
bdev_io_wait->cb_arg = page;
rc = spdk_bdev_queue_io_wait(bdev, ioch, bdev_io_wait);
ftl_bug(rc);
} else {
ftl_abort();
}
}
static void
page_out_io_retry(void *arg)
{
struct ftl_l2p_page *page = arg;
struct ftl_l2p_cache *cache = page->ctx.cache;
struct spdk_ftl_dev *dev = cache->dev;
cache->l2_pgs_evicting--;
page_out_io(dev, cache, page);
}
static void
ftl_l2p_cache_process_eviction(struct spdk_ftl_dev *dev, struct ftl_l2p_cache *cache)
{
struct ftl_l2p_page *page;
if (!ftl_l2p_cache_evict_continue(cache)) {
return;
}
if (cache->l2_pgs_evicting > 512) {
return;
}
page = eviction_get_page(dev, cache);
if (spdk_unlikely(!page)) {
return;
}
if (page->updates) {
page->state = L2P_CACHE_PAGE_FLUSHING;
page->ctx.updates = page->updates;
page_out_io(dev, cache, page);
} else {
/* Page clean and we can remove it */
ftl_l2p_cache_page_remove(cache, page);
}
}
void
ftl_l2p_cache_process(struct spdk_ftl_dev *dev)
{
@ -915,4 +1088,6 @@ ftl_l2p_cache_process(struct spdk_ftl_dev *dev)
break;
}
}
ftl_l2p_cache_process_eviction(dev, cache);
}