ftl: Unmap functionality

Adds ability to send trim commands to FTL - only 4MiB aligned requests (both
for offset and length of request) will be processed. During a trim
operation an L2P page (containing 1024 4B entries, 1 per user LBA; which
is where the 4MiB alignment comes from) will be marked as unmapped.
After this point any L2P access to that page will actually set the
entries themselves as FTL_ADDR_INVALID. This is done to make the trim as
fast as possible, since for large requests it's probable that most of
the L2P pages aren't actually in DRAM.

Signed-off-by: Kozlowski Mateusz <mateusz.kozlowski@intel.com>
Signed-off-by: Artur Paszkiewicz <artur.paszkiewicz@intel.com>
Change-Id: I4a04ee9498a2a6939af31b06f2e45d2b7cccbf19
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/13378
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Kozlowski Mateusz 2022-05-27 12:31:42 +02:00 committed by Jim Harris
parent 78c3cbf4c9
commit 66fe5f75bb
18 changed files with 398 additions and 2 deletions

View File

@ -203,6 +203,22 @@ int spdk_ftl_writev(struct spdk_ftl_dev *dev, struct ftl_io *io, struct spdk_io_
uint64_t lba, uint64_t lba_cnt, uint64_t lba, uint64_t lba_cnt,
struct iovec *iov, size_t iov_cnt, spdk_ftl_fn cb_fn, void *cb_arg); struct iovec *iov, size_t iov_cnt, spdk_ftl_fn cb_fn, void *cb_arg);
/**
* Submits a unmap to the specified device.
*
* \param dev Device
* \param io Allocated ftl_io
* \param ch I/O channel
* \param lba Starting LBA to write the data
* \param lba_cnt Number of blocks to unmap
* \param cb_fn Callback function to invoke when the I/O is completed
* \param cb_arg Argument to pass to the callback function
*
* \return 0 if successfully submitted, negative errno otherwise.
*/
int spdk_ftl_unmap(struct spdk_ftl_dev *dev, struct ftl_io *io, struct spdk_io_channel *ch,
uint64_t lba, uint64_t lba_cnt, spdk_ftl_fn cb_fn, void *cb_arg);
/** /**
* Returns the size of ftl_io struct that needs to be passed to spdk_ftl_read/write * Returns the size of ftl_io struct that needs to be passed to spdk_ftl_read/write
* *

View File

@ -349,6 +349,8 @@ start_io(struct ftl_io *io)
TAILQ_INSERT_TAIL(&dev->wr_sq, io, queue_entry); TAILQ_INSERT_TAIL(&dev->wr_sq, io, queue_entry);
break; break;
case FTL_IO_UNMAP: case FTL_IO_UNMAP:
TAILQ_INSERT_TAIL(&dev->unmap_sq, io, queue_entry);
break;
default: default:
io->status = -EOPNOTSUPP; io->status = -EOPNOTSUPP;
ftl_io_complete(io); ftl_io_complete(io);
@ -434,6 +436,59 @@ spdk_ftl_readv(struct spdk_ftl_dev *dev, struct ftl_io *io, struct spdk_io_chann
return queue_io(dev, io); return queue_io(dev, io);
} }
int
ftl_unmap(struct spdk_ftl_dev *dev, struct ftl_io *io, struct spdk_io_channel *ch,
uint64_t lba, uint64_t lba_cnt, spdk_ftl_fn cb_fn, void *cb_arg)
{
int rc;
rc = ftl_io_init(ch, io, lba, lba_cnt, NULL, 0, cb_fn, cb_arg, FTL_IO_UNMAP);
if (rc) {
return rc;
}
return queue_io(dev, io);
}
int
spdk_ftl_unmap(struct spdk_ftl_dev *dev, struct ftl_io *io, struct spdk_io_channel *ch,
uint64_t lba, uint64_t lba_cnt, spdk_ftl_fn cb_fn, void *cb_arg)
{
int rc;
uint64_t aligment = dev->layout.l2p.lbas_in_page;
if (lba_cnt == 0) {
return -EINVAL;
}
if (lba + lba_cnt < lba_cnt) {
return -EINVAL;
}
if (lba + lba_cnt > dev->num_lbas) {
return -EINVAL;
}
if (!dev->initialized) {
return -EBUSY;
}
if (lba % aligment || lba_cnt % aligment) {
rc = ftl_io_init(ch, io, lba, lba_cnt, NULL, 0, cb_fn, cb_arg, FTL_IO_UNMAP);
if (rc) {
return rc;
}
io->status = 0;
ftl_io_complete(io);
return 0;
}
rc = ftl_unmap(dev, io, ch, lba, lba_cnt, cb_fn, cb_arg);
return rc;
}
#define FTL_IO_QUEUE_BATCH 16 #define FTL_IO_QUEUE_BATCH 16
int int
ftl_io_channel_poll(void *arg) ftl_io_channel_poll(void *arg)
@ -472,6 +527,82 @@ ftl_process_io_channel(struct spdk_ftl_dev *dev, struct ftl_io_channel *ioch)
} }
} }
static void
ftl_process_unmap_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
{
struct ftl_io *io = md->owner.cb_ctx;
io->dev->unmap_qd--;
if (spdk_unlikely(status)) {
TAILQ_INSERT_HEAD(&io->dev->unmap_sq, io, queue_entry);
return;
}
ftl_io_complete(io);
}
void
ftl_set_unmap_map(struct spdk_ftl_dev *dev, uint64_t lba, uint64_t num_blocks, uint64_t seq_id)
{
uint64_t first_page, num_pages;
uint64_t first_md_block, num_md_blocks, num_pages_in_block;
uint32_t lbas_in_page = dev->layout.l2p.lbas_in_page;
struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_TRIM_MD];
uint64_t *page = ftl_md_get_buffer(md);
union ftl_md_vss *page_vss;
size_t i;
first_page = lba / lbas_in_page;
num_pages = num_blocks / lbas_in_page;
for (i = first_page; i < first_page + num_pages; ++i) {
ftl_bitmap_set(dev->unmap_map, i);
page[i] = seq_id;
}
num_pages_in_block = FTL_BLOCK_SIZE / sizeof(*page);
first_md_block = first_page / num_pages_in_block;
num_md_blocks = spdk_divide_round_up(num_pages, num_pages_in_block);
page_vss = ftl_md_get_vss_buffer(md) + first_md_block;
for (i = first_md_block; i < num_md_blocks; ++i, page_vss++) {
page_vss->unmap.start_lba = lba;
page_vss->unmap.num_blocks = num_blocks;
page_vss->unmap.seq_id = seq_id;
}
}
static bool
ftl_process_unmap(struct ftl_io *io)
{
struct spdk_ftl_dev *dev = io->dev;
struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_TRIM_MD];
uint64_t seq_id;
seq_id = ftl_nv_cache_acquire_trim_seq_id(&dev->nv_cache);
if (seq_id == 0) {
return false;
}
dev->unmap_in_progress = true;
dev->unmap_qd++;
dev->sb_shm->trim.start_lba = io->lba;
dev->sb_shm->trim.num_blocks = io->num_blocks;
dev->sb_shm->trim.seq_id = seq_id;
dev->sb_shm->trim.in_progress = true;
ftl_set_unmap_map(dev, io->lba, io->num_blocks, seq_id);
ftl_debug_inject_unmap_error();
dev->sb_shm->trim.in_progress = false;
md->owner.cb_ctx = io;
md->cb = ftl_process_unmap_cb;
ftl_md_persist(md);
return true;
}
static void static void
ftl_process_io_queue(struct spdk_ftl_dev *dev) ftl_process_io_queue(struct spdk_ftl_dev *dev)
{ {
@ -497,6 +628,22 @@ ftl_process_io_queue(struct spdk_ftl_dev *dev)
} }
} }
if (!TAILQ_EMPTY(&dev->unmap_sq) && dev->unmap_qd == 0) {
io = TAILQ_FIRST(&dev->unmap_sq);
TAILQ_REMOVE(&dev->unmap_sq, io, queue_entry);
assert(io->type == FTL_IO_UNMAP);
/*
* Unmap operation requires generating a sequence id for itself, which it gets based on the open chunk
* in nv cache. If there are no open chunks (because we're in the middle of state transistion or compaction
* lagged behind), then we need to wait for the nv cache to resolve the situation - it's fine to just put the
* unmap and try again later.
*/
if (!ftl_process_unmap(io)) {
TAILQ_INSERT_HEAD(&dev->unmap_sq, io, queue_entry);
}
}
TAILQ_FOREACH(ioch, &dev->ioch_queue, entry) { TAILQ_FOREACH(ioch, &dev->ioch_queue, entry) {
ftl_process_io_channel(dev, ioch); ftl_process_io_channel(dev, ioch);
} }

View File

@ -157,6 +157,15 @@ struct spdk_ftl_dev {
/* Write submission queue */ /* Write submission queue */
TAILQ_HEAD(, ftl_io) wr_sq; TAILQ_HEAD(, ftl_io) wr_sq;
/* Trim submission queue */
TAILQ_HEAD(, ftl_io) unmap_sq;
/* Trim valid map */
struct ftl_bitmap *unmap_map;
struct ftl_md *unmap_map_md;
size_t unmap_qd;
bool unmap_in_progress;
/* Writer for user IOs */ /* Writer for user IOs */
struct ftl_writer writer_user; struct ftl_writer writer_user;
@ -191,8 +200,14 @@ bool ftl_needs_reloc(struct spdk_ftl_dev *dev);
struct ftl_band *ftl_band_get_next_free(struct spdk_ftl_dev *dev); struct ftl_band *ftl_band_get_next_free(struct spdk_ftl_dev *dev);
void ftl_set_unmap_map(struct spdk_ftl_dev *dev, uint64_t lba, uint64_t num_blocks,
uint64_t seq_id);
void ftl_recover_max_seq(struct spdk_ftl_dev *dev); void ftl_recover_max_seq(struct spdk_ftl_dev *dev);
int ftl_unmap(struct spdk_ftl_dev *dev, struct ftl_io *io, struct spdk_io_channel *ch,
uint64_t lba, size_t lba_cnt, spdk_ftl_fn cb_fn, void *cb_arg);
static inline uint64_t static inline uint64_t
ftl_get_num_blocks_in_band(const struct spdk_ftl_dev *dev) ftl_get_num_blocks_in_band(const struct spdk_ftl_dev *dev)
{ {

View File

@ -13,6 +13,15 @@
#if defined(DEBUG) #if defined(DEBUG)
void ftl_band_validate_md(struct ftl_band *band, ftl_band_validate_md_cb cb); void ftl_band_validate_md(struct ftl_band *band, ftl_band_validate_md_cb cb);
void ftl_dev_dump_bands(struct spdk_ftl_dev *dev); void ftl_dev_dump_bands(struct spdk_ftl_dev *dev);
static inline void
ftl_debug_inject_unmap_error(void)
{
static int unmap_no = 0;
if (getenv("FTL_CRASH_ON_UNMAP") && unmap_no++ == 256) {
abort();
}
}
#else #else
static void static void
@ -35,6 +44,10 @@ static inline void
ftl_dev_dump_bands(struct spdk_ftl_dev *dev) ftl_dev_dump_bands(struct spdk_ftl_dev *dev)
{ {
} }
static inline void
ftl_debug_inject_unmap_error(void)
{
}
#endif #endif
void ftl_dev_dump_stats(const struct spdk_ftl_dev *dev); void ftl_dev_dump_stats(const struct spdk_ftl_dev *dev);

View File

@ -117,6 +117,7 @@ allocate_dev(const struct spdk_ftl_conf *conf, int *error)
TAILQ_INIT(&dev->rd_sq); TAILQ_INIT(&dev->rd_sq);
TAILQ_INIT(&dev->wr_sq); TAILQ_INIT(&dev->wr_sq);
TAILQ_INIT(&dev->unmap_sq);
TAILQ_INIT(&dev->ioch_queue); TAILQ_INIT(&dev->ioch_queue);
ftl_writer_init(dev, &dev->writer_user, SPDK_FTL_LIMIT_HIGH, FTL_BAND_TYPE_COMPACTION); ftl_writer_init(dev, &dev->writer_user, SPDK_FTL_LIMIT_HIGH, FTL_BAND_TYPE_COMPACTION);

View File

@ -134,6 +134,10 @@ ftl_io_cb(struct ftl_io *io, void *arg, int status)
ftl_io_clear(io); ftl_io_clear(io);
TAILQ_INSERT_HEAD(&io->dev->wr_sq, io, queue_entry); TAILQ_INSERT_HEAD(&io->dev->wr_sq, io, queue_entry);
break; break;
case FTL_IO_UNMAP:
ftl_io_clear(io);
TAILQ_INSERT_HEAD(&io->dev->unmap_sq, io, queue_entry);
break;
default: default:
/* Unknown IO type, complete to the user */ /* Unknown IO type, complete to the user */
assert(0); assert(0);

View File

@ -123,6 +123,16 @@ ftl_l2p_halt(struct spdk_ftl_dev *dev)
return FTL_L2P_OP(halt)(dev); return FTL_L2P_OP(halt)(dev);
} }
static uint64_t
get_trim_seq_id(struct spdk_ftl_dev *dev, uint64_t lba)
{
struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_TRIM_MD];
uint64_t *page = ftl_md_get_buffer(md);
uint64_t page_no = lba / dev->layout.l2p.lbas_in_page;
return page[page_no];
}
void void
ftl_l2p_update_cache(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr new_addr, ftl_addr old_addr) ftl_l2p_update_cache(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr new_addr, ftl_addr old_addr)
{ {
@ -171,6 +181,14 @@ ftl_l2p_update_cache(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr new_addr,
ftl_invalidate_addr(dev, current_addr); ftl_invalidate_addr(dev, current_addr);
/* DO NOT CHANGE ORDER - END */ /* DO NOT CHANGE ORDER - END */
return; return;
} else {
uint64_t trim_seq_id = get_trim_seq_id(dev, lba);
uint64_t new_seq_id = ftl_nv_cache_get_chunk_from_addr(dev, new_addr)->md->seq_id;
/* Check if region hasn't been unmapped during IO */
if (new_seq_id < trim_seq_id) {
return;
}
} }
/* If current address doesn't have any value (ie. it was never set, or it was trimmed), then we can just set L2P */ /* If current address doesn't have any value (ie. it was never set, or it was trimmed), then we can just set L2P */

View File

@ -41,6 +41,7 @@ void ftl_l2p_set(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr addr);
ftl_addr ftl_l2p_get(struct spdk_ftl_dev *dev, uint64_t lba); ftl_addr ftl_l2p_get(struct spdk_ftl_dev *dev, uint64_t lba);
void ftl_l2p_clear(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx); void ftl_l2p_clear(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx);
void ftl_l2p_unmap(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx);
void ftl_l2p_restore(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx); void ftl_l2p_restore(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx);
void ftl_l2p_persist(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx); void ftl_l2p_persist(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx);
void ftl_l2p_process(struct spdk_ftl_dev *dev); void ftl_l2p_process(struct spdk_ftl_dev *dev);

View File

@ -158,6 +158,12 @@ ftl_l2p_cache_get_l1_page_size(void)
return 1UL << 12; return 1UL << 12;
} }
static inline uint64_t
ftl_l2p_cache_get_lbas_in_page(struct ftl_l2p_cache *cache)
{
return cache->lbas_in_page;
}
static inline size_t static inline size_t
ftl_l2p_cache_get_page_all_size(void) ftl_l2p_cache_get_page_all_size(void)
{ {
@ -295,9 +301,29 @@ ftl_l2p_cache_set_addr(struct spdk_ftl_dev *dev, struct ftl_l2p_cache *cache,
ftl_addr_store(dev, page->page_buffer, lba % cache->lbas_in_page, addr); ftl_addr_store(dev, page->page_buffer, lba % cache->lbas_in_page, addr);
} }
static void
ftl_l2p_page_set_invalid(struct spdk_ftl_dev *dev, struct ftl_l2p_page *page)
{
ftl_addr addr;
struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)dev->l2p;
uint64_t naddr;
page->updates++;
naddr = ftl_l2p_cache_get_lbas_in_page(cache);
for (uint64_t i = 0; i < naddr; i++) {
addr = ftl_addr_load(dev, page->page_buffer, i);
if (addr == FTL_ADDR_INVALID) {
continue;
}
ftl_invalidate_addr(dev, addr);
ftl_l2p_cache_set_addr(dev, cache, page, i, FTL_ADDR_INVALID);
}
}
static inline void static inline void
ftl_l2p_cache_page_pin(struct ftl_l2p_cache *cache, ftl_l2p_cache_page_pin(struct ftl_l2p_cache *cache, struct ftl_l2p_page *page)
struct ftl_l2p_page *page)
{ {
page->pin_ref_cnt++; page->pin_ref_cnt++;
/* Pinned pages can't be evicted (since L2P sets/gets will be executed on it), so remove them from LRU */ /* Pinned pages can't be evicted (since L2P sets/gets will be executed on it), so remove them from LRU */
@ -546,6 +572,7 @@ process_persist_page_out_cb(struct spdk_bdev_io *bdev_io, bool success, void *ar
{ {
struct ftl_l2p_page *page = arg; struct ftl_l2p_page *page = arg;
struct ftl_l2p_cache *cache = page->ctx.cache; struct ftl_l2p_cache *cache = page->ctx.cache;
struct spdk_ftl_dev *dev = cache->dev;
struct ftl_l2p_cache_process_ctx *ctx = &cache->mctx; struct ftl_l2p_cache_process_ctx *ctx = &cache->mctx;
assert(bdev_io); assert(bdev_io);
@ -555,6 +582,13 @@ process_persist_page_out_cb(struct spdk_bdev_io *bdev_io, bool success, void *ar
ctx->status = -EIO; ctx->status = -EIO;
} }
if (ftl_bitmap_get(dev->unmap_map, ctx->idx)) {
/*
* Page had been unmapped, in persist path before IO, it was invalidated entirely
* now clear unmap flag
*/
ftl_bitmap_clear(dev->unmap_map, page->page_no);
}
ftl_l2p_cache_page_remove(cache, page); ftl_l2p_cache_page_remove(cache, page);
ctx->qd--; ctx->qd--;
@ -731,6 +765,7 @@ static void
process_persist(struct ftl_l2p_cache *cache) process_persist(struct ftl_l2p_cache *cache)
{ {
struct ftl_l2p_cache_process_ctx *ctx = &cache->mctx; struct ftl_l2p_cache_process_ctx *ctx = &cache->mctx;
struct spdk_ftl_dev *dev = cache->dev;
while (ctx->idx < cache->num_pages && ctx->qd < 64) { while (ctx->idx < cache->num_pages && ctx->qd < 64) {
struct ftl_l2p_page *page = get_l2p_page_by_df_id(cache, ctx->idx); struct ftl_l2p_page *page = get_l2p_page_by_df_id(cache, ctx->idx);
@ -740,6 +775,11 @@ process_persist(struct ftl_l2p_cache *cache)
continue; continue;
} }
/* Finished unmap if the page was marked */
if (ftl_bitmap_get(dev->unmap_map, ctx->idx)) {
ftl_l2p_page_set_invalid(dev, page);
}
if (page->on_lru_list) { if (page->on_lru_list) {
ftl_l2p_cache_lru_remove_page(cache, page); ftl_l2p_cache_lru_remove_page(cache, page);
} }
@ -920,6 +960,11 @@ ftl_l2p_cache_get(struct spdk_ftl_dev *dev, uint64_t lba)
assert(ftl_l2p_cache_running(cache)); assert(ftl_l2p_cache_running(cache));
assert(page->pin_ref_cnt); assert(page->pin_ref_cnt);
if (ftl_bitmap_get(dev->unmap_map, page->page_no)) {
ftl_l2p_page_set_invalid(dev, page);
ftl_bitmap_clear(dev->unmap_map, page->page_no);
}
ftl_l2p_cache_lru_promote_page(cache, page); ftl_l2p_cache_lru_promote_page(cache, page);
addr = ftl_l2p_cache_get_addr(dev, cache, page, lba); addr = ftl_l2p_cache_get_addr(dev, cache, page, lba);
@ -937,6 +982,11 @@ ftl_l2p_cache_set(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr addr)
assert(ftl_l2p_cache_running(cache)); assert(ftl_l2p_cache_running(cache));
assert(page->pin_ref_cnt); assert(page->pin_ref_cnt);
if (ftl_bitmap_get(dev->unmap_map, page->page_no)) {
ftl_l2p_page_set_invalid(dev, page);
ftl_bitmap_clear(dev->unmap_map, page->page_no);
}
page->updates++; page->updates++;
ftl_l2p_cache_lru_promote_page(cache, page); ftl_l2p_cache_lru_promote_page(cache, page);
ftl_l2p_cache_set_addr(dev, cache, page, lba, addr); ftl_l2p_cache_set_addr(dev, cache, page, lba, addr);

View File

@ -16,6 +16,7 @@ void ftl_l2p_cache_pin(struct spdk_ftl_dev *dev, struct ftl_l2p_pin_ctx *pin_ctx
void ftl_l2p_cache_unpin(struct spdk_ftl_dev *dev, uint64_t lba, uint64_t count); void ftl_l2p_cache_unpin(struct spdk_ftl_dev *dev, uint64_t lba, uint64_t count);
ftl_addr ftl_l2p_cache_get(struct spdk_ftl_dev *dev, uint64_t lba); ftl_addr ftl_l2p_cache_get(struct spdk_ftl_dev *dev, uint64_t lba);
void ftl_l2p_cache_set(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr addr); void ftl_l2p_cache_set(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr addr);
void ftl_l2p_cache_unmap(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx);
void ftl_l2p_cache_clear(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx); void ftl_l2p_cache_clear(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx);
void ftl_l2p_cache_restore(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx); void ftl_l2p_cache_restore(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx);
void ftl_l2p_cache_persist(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx); void ftl_l2p_cache_persist(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx);

View File

@ -2225,3 +2225,37 @@ ftl_nv_cache_halt(struct ftl_nv_cache *nv_cache)
ftl_chunk_close(chunk); ftl_chunk_close(chunk);
} }
} }
uint64_t
ftl_nv_cache_acquire_trim_seq_id(struct ftl_nv_cache *nv_cache)
{
struct ftl_nv_cache_chunk *chunk = nv_cache->chunk_current;
uint64_t seq_id, free_space;
if (!chunk) {
chunk = TAILQ_FIRST(&nv_cache->chunk_open_list);
if (chunk && chunk->md->state == FTL_CHUNK_STATE_OPEN) {
return chunk->md->seq_id;
} else {
return 0;
}
}
if (chunk_is_closed(chunk)) {
return 0;
}
seq_id = nv_cache->chunk_current->md->seq_id;
free_space = chunk_get_free_space(nv_cache, chunk);
chunk->md->blocks_skipped = free_space;
chunk->md->blocks_written += free_space;
chunk->md->write_pointer += free_space;
if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) {
ftl_chunk_close(chunk);
}
nv_cache->chunk_current = NULL;
seq_id++;
return seq_id;
}

View File

@ -227,4 +227,6 @@ void ftl_mngt_nv_cache_restore_l2p(struct spdk_ftl_dev *dev, struct ftl_mngt_pro
struct ftl_nv_cache_chunk *ftl_nv_cache_get_chunk_from_addr(struct spdk_ftl_dev *dev, struct ftl_nv_cache_chunk *ftl_nv_cache_get_chunk_from_addr(struct spdk_ftl_dev *dev,
ftl_addr addr); ftl_addr addr);
uint64_t ftl_nv_cache_acquire_trim_seq_id(struct ftl_nv_cache *nv_cache);
#endif /* FTL_NV_CACHE_H */ #endif /* FTL_NV_CACHE_H */

View File

@ -176,6 +176,10 @@ ftl_mngt_scrub_nv_cache(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
void void
ftl_mngt_finalize_startup(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt) ftl_mngt_finalize_startup(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
{ {
if (ftl_bitmap_find_first_set(dev->unmap_map, 0, UINT64_MAX) != UINT64_MAX) {
dev->unmap_in_progress = true;
}
dev->initialized = 1; dev->initialized = 1;
dev->sb_shm->shm_ready = true; dev->sb_shm->shm_ready = true;
@ -246,3 +250,68 @@ ftl_mngt_deinit_vld_map(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
ftl_mngt_next_step(mngt); ftl_mngt_next_step(mngt);
} }
void
ftl_mngt_init_unmap_map(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
{
uint64_t num_l2p_pages = spdk_divide_round_up(dev->num_lbas, dev->layout.l2p.lbas_in_page);
uint64_t map_blocks = ftl_bitmap_bits_to_blocks(num_l2p_pages);
dev->unmap_map_md = ftl_md_create(dev,
map_blocks,
0,
"trim_bitmap",
ftl_md_create_shm_flags(dev), NULL);
if (!dev->unmap_map_md) {
FTL_ERRLOG(dev, "Failed to create trim bitmap md\n");
ftl_mngt_fail_step(mngt);
return;
}
dev->unmap_map = ftl_bitmap_create(ftl_md_get_buffer(dev->unmap_map_md),
ftl_md_get_buffer_size(dev->unmap_map_md));
if (!dev->unmap_map) {
FTL_ERRLOG(dev, "Failed to create unmap map\n");
ftl_mngt_fail_step(mngt);
return;
}
ftl_mngt_next_step(mngt);
}
static void
unmap_clear_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
{
struct ftl_mngt_process *mngt = md->owner.cb_ctx;
if (status) {
FTL_ERRLOG(dev, "ERROR of clearing trim unmap\n");
ftl_mngt_fail_step(mngt);
} else {
ftl_mngt_next_step(mngt);
}
}
void
ftl_mngt_unmap_clear(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
{
struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_TRIM_MD];
md->cb = unmap_clear_cb;
md->owner.cb_ctx = mngt;
ftl_md_clear(md, 0, NULL);
}
void
ftl_mngt_deinit_unmap_map(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
{
ftl_bitmap_destroy(dev->unmap_map);
dev->unmap_map = NULL;
ftl_md_destroy(dev->unmap_map_md, ftl_md_destroy_shm_flags(dev));
dev->unmap_map_md = NULL;
ftl_mngt_next_step(mngt);
}

View File

@ -110,6 +110,11 @@ static const struct ftl_mngt_process_desc desc_startup = {
.action = ftl_mngt_init_vld_map, .action = ftl_mngt_init_vld_map,
.cleanup = ftl_mngt_deinit_vld_map .cleanup = ftl_mngt_deinit_vld_map
}, },
{
.name = "Initialize trim map",
.action = ftl_mngt_init_unmap_map,
.cleanup = ftl_mngt_deinit_unmap_map
},
{ {
.name = "Initialize bands metadata", .name = "Initialize bands metadata",
.action = ftl_mngt_init_bands_md, .action = ftl_mngt_init_bands_md,
@ -171,6 +176,10 @@ static const struct ftl_mngt_process_desc desc_first_start = {
.name = "Wipe P2L region", .name = "Wipe P2L region",
.action = ftl_mngt_p2l_wipe, .action = ftl_mngt_p2l_wipe,
}, },
{
.name = "Clear trim map",
.action = ftl_mngt_unmap_clear,
},
{ {
.name = "Free P2L region bufs", .name = "Free P2L region bufs",
.action = ftl_mngt_p2l_free_bufs, .action = ftl_mngt_p2l_free_bufs,

View File

@ -64,6 +64,8 @@ void ftl_mngt_deinit_l2p(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt
void ftl_mngt_clear_l2p(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt); void ftl_mngt_clear_l2p(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt);
void ftl_mngt_unmap_l2p(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt);
void ftl_mngt_restore_l2p(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt); void ftl_mngt_restore_l2p(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt);
void ftl_mngt_scrub_nv_cache(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt); void ftl_mngt_scrub_nv_cache(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt);
@ -112,6 +114,12 @@ void ftl_mngt_init_vld_map(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mn
void ftl_mngt_deinit_vld_map(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt); void ftl_mngt_deinit_vld_map(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt);
void ftl_mngt_init_unmap_map(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt);
void ftl_mngt_deinit_unmap_map(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt);
void ftl_mngt_unmap_clear(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt);
void ftl_mngt_p2l_init_ckpt(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt); void ftl_mngt_p2l_init_ckpt(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt);
void ftl_mngt_p2l_deinit_ckpt(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt); void ftl_mngt_p2l_deinit_ckpt(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt);

View File

@ -15,6 +15,7 @@
spdk_ftl_io_size; spdk_ftl_io_size;
spdk_ftl_readv; spdk_ftl_readv;
spdk_ftl_writev; spdk_ftl_writev;
spdk_ftl_unmap;
spdk_ftl_dev_set_fast_shutdown; spdk_ftl_dev_set_fast_shutdown;
local: *; local: *;

View File

@ -148,6 +148,9 @@ _bdev_ftl_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_i
bdev_io->u.bdev.iovcnt, bdev_ftl_cb, bdev_io); bdev_io->u.bdev.iovcnt, bdev_ftl_cb, bdev_io);
case SPDK_BDEV_IO_TYPE_UNMAP: case SPDK_BDEV_IO_TYPE_UNMAP:
return spdk_ftl_unmap(ftl_bdev->dev, (struct ftl_io *)bdev_io->driver_ctx,
ch, bdev_io->u.bdev.offset_blocks,
bdev_io->u.bdev.num_blocks, bdev_ftl_cb, bdev_io);
case SPDK_BDEV_IO_TYPE_FLUSH: case SPDK_BDEV_IO_TYPE_FLUSH:
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS); spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
return 0; return 0;

View File

@ -129,6 +129,10 @@ DEFINE_STUB(ftl_mempool_claim_df, void *, (struct ftl_mempool *mpool, ftl_df_obj
DEFINE_STUB(ftl_bitmap_count_set, uint64_t, (struct ftl_bitmap *bitmap), 0); DEFINE_STUB(ftl_bitmap_count_set, uint64_t, (struct ftl_bitmap *bitmap), 0);
DEFINE_STUB(ftl_p2l_ckpt_region_type, enum ftl_layout_region_type, DEFINE_STUB(ftl_p2l_ckpt_region_type, enum ftl_layout_region_type,
(const struct ftl_p2l_ckpt *ckpt), 0); (const struct ftl_p2l_ckpt *ckpt), 0);
DEFINE_STUB(ftl_md_get_buffer, void *, (struct ftl_md *md), NULL);
DEFINE_STUB(ftl_md_get_vss_buffer, union ftl_md_vss *, (struct ftl_md *md), NULL);
DEFINE_STUB(ftl_nv_cache_acquire_trim_seq_id, uint64_t, (struct ftl_nv_cache *nv_cache), 0);
DEFINE_STUB_V(ftl_md_persist, (struct ftl_md *md));
static void static void
adjust_bitmap(struct ftl_bitmap **bitmap, uint64_t *bit) adjust_bitmap(struct ftl_bitmap **bitmap, uint64_t *bit)