lib/ftl: keep reloc traffic out of non-volatile cache
Moving data from one band to the other doesn't need to be stored on the non-volatile cache. Not only does it add unnecessary traffic to the cache (wearing it out and reducing its throughput), but it requires us to synchronize it with user writes to the same LBAs. To avoid all that, this patch adds the FTL_IO_BYPASS_CACHE flag to all writes coming from the reloc module. However, to be sure that the moved data is stored on disk and can be restored in case of power loss, we need to make sure that each free band have all of its data moved to a closed band before it can be erased. It's done by keeping track of the number of outstanding IOs moving data from particular band (num_reloc_blocks), as well as the number of open bands that contains data from this band (num_reloc_bands). Only when both of these are at zero and the band has zero valid blocks it can be erased. Change-Id: I7c106011ffc9685eb8e5ff497919237a305e4478 Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com> Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/458101 Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Mateusz Kozlowski <mateusz.kozlowski@intel.com> Reviewed-by: Wojciech Malikowski <wojciech.malikowski@intel.com> Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
parent
4d113ee5d3
commit
0f0af48009
@ -188,6 +188,13 @@ struct ftl_band {
|
||||
/* End metadata start ppa */
|
||||
struct ftl_ppa tail_md_ppa;
|
||||
|
||||
/* Bitmap of all bands that have its data moved onto this band */
|
||||
struct spdk_bit_array *reloc_bitmap;
|
||||
/* Number of open bands containing data moved from this band */
|
||||
size_t num_reloc_bands;
|
||||
/* Number of blocks currently being moved from this band */
|
||||
size_t num_reloc_blocks;
|
||||
|
||||
/* Free/shut bands' lists */
|
||||
LIST_ENTRY(ftl_band) list_entry;
|
||||
|
||||
|
@ -203,20 +203,22 @@ ftl_md_write_cb(struct ftl_io *io, void *arg, int status)
|
||||
{
|
||||
struct spdk_ftl_dev *dev = io->dev;
|
||||
struct ftl_nv_cache *nv_cache = &dev->nv_cache;
|
||||
struct ftl_band *band = io->band;
|
||||
struct ftl_wptr *wptr;
|
||||
size_t id;
|
||||
|
||||
wptr = ftl_wptr_from_band(io->band);
|
||||
wptr = ftl_wptr_from_band(band);
|
||||
|
||||
if (status) {
|
||||
ftl_md_write_fail(io, status);
|
||||
return;
|
||||
}
|
||||
|
||||
ftl_band_set_next_state(io->band);
|
||||
if (io->band->state == FTL_BAND_STATE_CLOSED) {
|
||||
ftl_band_set_next_state(band);
|
||||
if (band->state == FTL_BAND_STATE_CLOSED) {
|
||||
if (nv_cache->bdev_desc) {
|
||||
pthread_spin_lock(&nv_cache->lock);
|
||||
nv_cache->num_available += ftl_band_user_lbks(io->band);
|
||||
nv_cache->num_available += ftl_band_user_lbks(band);
|
||||
|
||||
if (spdk_unlikely(nv_cache->num_available > nv_cache->num_data_blocks)) {
|
||||
nv_cache->num_available = nv_cache->num_data_blocks;
|
||||
@ -224,6 +226,20 @@ ftl_md_write_cb(struct ftl_io *io, void *arg, int status)
|
||||
pthread_spin_unlock(&nv_cache->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Go through the reloc_bitmap, checking for all the bands that had its data moved
|
||||
* onto current band and update their counters to allow them to be used for writing
|
||||
* (once they're closed and empty).
|
||||
*/
|
||||
for (id = 0; id < ftl_dev_num_bands(dev); ++id) {
|
||||
if (spdk_bit_array_get(band->reloc_bitmap, id)) {
|
||||
assert(dev->bands[id].num_reloc_bands > 0);
|
||||
dev->bands[id].num_reloc_bands--;
|
||||
|
||||
spdk_bit_array_clear(band->reloc_bitmap, id);
|
||||
}
|
||||
}
|
||||
|
||||
ftl_remove_wptr(wptr);
|
||||
}
|
||||
}
|
||||
@ -362,11 +378,17 @@ ftl_next_write_band(struct spdk_ftl_dev *dev)
|
||||
{
|
||||
struct ftl_band *band;
|
||||
|
||||
band = LIST_FIRST(&dev->free_bands);
|
||||
if (!band) {
|
||||
/* Find a free band that has all of its data moved onto other closed bands */
|
||||
LIST_FOREACH(band, &dev->free_bands, list_entry) {
|
||||
assert(band->state == FTL_BAND_STATE_FREE);
|
||||
if (band->num_reloc_bands == 0 && band->num_reloc_blocks == 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (spdk_unlikely(!band)) {
|
||||
return NULL;
|
||||
}
|
||||
assert(band->state == FTL_BAND_STATE_FREE);
|
||||
|
||||
if (ftl_band_erase(band)) {
|
||||
/* TODO: handle erase failure */
|
||||
@ -1205,6 +1227,7 @@ ftl_write_cb(struct ftl_io *io, void *arg, int status)
|
||||
struct spdk_ftl_dev *dev = io->dev;
|
||||
struct ftl_rwb_batch *batch = io->rwb_batch;
|
||||
struct ftl_rwb_entry *entry;
|
||||
struct ftl_band *band;
|
||||
|
||||
if (status) {
|
||||
ftl_write_fail(io, status);
|
||||
@ -1213,11 +1236,17 @@ ftl_write_cb(struct ftl_io *io, void *arg, int status)
|
||||
|
||||
assert(io->lbk_cnt == dev->xfer_size);
|
||||
ftl_rwb_foreach(entry, batch) {
|
||||
band = entry->band;
|
||||
if (!(io->flags & FTL_IO_MD) && !(entry->flags & FTL_IO_PAD)) {
|
||||
/* Verify that the LBA is set for user lbks */
|
||||
assert(entry->lba != FTL_LBA_INVALID);
|
||||
}
|
||||
|
||||
if (band != NULL) {
|
||||
assert(band->num_reloc_blocks > 0);
|
||||
band->num_reloc_blocks--;
|
||||
}
|
||||
|
||||
SPDK_DEBUGLOG(SPDK_LOG_FTL_CORE, "Write ppa:%lu, lba:%lu\n",
|
||||
entry->ppa.ppa, entry->lba);
|
||||
}
|
||||
@ -1488,8 +1517,15 @@ ftl_wptr_process_writes(struct ftl_wptr *wptr)
|
||||
|
||||
ppa = wptr->ppa;
|
||||
ftl_rwb_foreach(entry, batch) {
|
||||
entry->ppa = ppa;
|
||||
/* Update band's relocation stats if the IO comes from reloc */
|
||||
if (entry->flags & FTL_IO_WEAK) {
|
||||
if (!spdk_bit_array_get(wptr->band->reloc_bitmap, entry->band->id)) {
|
||||
spdk_bit_array_set(wptr->band->reloc_bitmap, entry->band->id);
|
||||
entry->band->num_reloc_bands++;
|
||||
}
|
||||
}
|
||||
|
||||
entry->ppa = ppa;
|
||||
if (entry->lba != FTL_LBA_INVALID) {
|
||||
pthread_spin_lock(&entry->lock);
|
||||
prev_ppa = ftl_l2p_get(dev, entry->lba);
|
||||
@ -1556,13 +1592,12 @@ ftl_process_writes(struct spdk_ftl_dev *dev)
|
||||
static void
|
||||
ftl_rwb_entry_fill(struct ftl_rwb_entry *entry, struct ftl_io *io)
|
||||
{
|
||||
struct ftl_band *band;
|
||||
|
||||
memcpy(entry->data, ftl_io_iovec_addr(io), FTL_BLOCK_SIZE);
|
||||
|
||||
if (ftl_rwb_entry_weak(entry)) {
|
||||
band = ftl_band_from_ppa(io->dev, io->ppa);
|
||||
entry->ppa = ftl_band_next_ppa(band, io->ppa, io->pos);
|
||||
entry->band = ftl_band_from_ppa(io->dev, io->ppa);
|
||||
entry->ppa = ftl_band_next_ppa(entry->band, io->ppa, io->pos);
|
||||
entry->band->num_reloc_blocks++;
|
||||
}
|
||||
|
||||
entry->trace = io->trace;
|
||||
|
@ -352,6 +352,12 @@ ftl_dev_init_bands(struct spdk_ftl_dev *dev)
|
||||
SPDK_ERRLOG("Failed to initialize metadata structures for band [%u]\n", i);
|
||||
goto out;
|
||||
}
|
||||
|
||||
band->reloc_bitmap = spdk_bit_array_create(ftl_dev_num_bands(dev));
|
||||
if (!band->reloc_bitmap) {
|
||||
SPDK_ERRLOG("Failed to allocate band relocation bitmap\n");
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < ftl_dev_num_punits(dev); ++i) {
|
||||
@ -1242,6 +1248,7 @@ ftl_dev_free_sync(struct spdk_ftl_dev *dev)
|
||||
for (i = 0; i < ftl_dev_num_bands(dev); ++i) {
|
||||
free(dev->bands[i].chunk_buf);
|
||||
spdk_bit_array_free(&dev->bands[i].lba_map.vld);
|
||||
spdk_bit_array_free(&dev->bands[i].reloc_bitmap);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -448,9 +448,11 @@ ftl_reloc_io_init(struct ftl_band_reloc *breloc, struct ftl_reloc_move *move,
|
||||
static int
|
||||
ftl_reloc_write(struct ftl_band_reloc *breloc, struct ftl_reloc_move *move)
|
||||
{
|
||||
int io_flags = FTL_IO_WEAK | FTL_IO_VECTOR_LBA | FTL_IO_BYPASS_CACHE;
|
||||
|
||||
if (spdk_likely(!move->io)) {
|
||||
move->io = ftl_reloc_io_init(breloc, move, ftl_reloc_write_cb,
|
||||
FTL_IO_WRITE, FTL_IO_WEAK | FTL_IO_VECTOR_LBA);
|
||||
FTL_IO_WRITE, io_flags);
|
||||
if (!move->io) {
|
||||
ftl_reloc_free_move(breloc, move);
|
||||
return -ENOMEM;
|
||||
|
@ -302,6 +302,7 @@ ftl_rwb_batch_release(struct ftl_rwb_batch *batch)
|
||||
ftl_rwb_foreach(entry, batch) {
|
||||
num_acquired = __atomic_fetch_sub(&rwb->num_acquired[ftl_rwb_entry_type(entry)], 1,
|
||||
__ATOMIC_SEQ_CST);
|
||||
entry->band = NULL;
|
||||
assert(num_acquired > 0);
|
||||
}
|
||||
|
||||
|
@ -42,8 +42,9 @@
|
||||
#include "ftl_trace.h"
|
||||
|
||||
struct ftl_rwb;
|
||||
struct spdk_ftl_conf;
|
||||
struct ftl_rwb_batch;
|
||||
struct ftl_band;
|
||||
struct spdk_ftl_conf;
|
||||
|
||||
enum ftl_rwb_entry_type {
|
||||
FTL_RWB_TYPE_INTERNAL,
|
||||
@ -65,6 +66,9 @@ struct ftl_rwb_entry {
|
||||
/* Physical address */
|
||||
struct ftl_ppa ppa;
|
||||
|
||||
/* Band the data is moved from (only valid when relocating data) */
|
||||
struct ftl_band *band;
|
||||
|
||||
/* Position within the rwb's buffer */
|
||||
unsigned int pos;
|
||||
|
||||
|
@ -101,6 +101,9 @@ test_init_ftl_band(struct spdk_ftl_dev *dev, size_t id)
|
||||
band->chunk_buf = calloc(ftl_dev_num_punits(dev), sizeof(*band->chunk_buf));
|
||||
SPDK_CU_ASSERT_FATAL(band->chunk_buf != NULL);
|
||||
|
||||
band->reloc_bitmap = spdk_bit_array_create(ftl_dev_num_bands(dev));
|
||||
SPDK_CU_ASSERT_FATAL(band->reloc_bitmap != NULL);
|
||||
|
||||
for (size_t i = 0; i < ftl_dev_num_punits(dev); ++i) {
|
||||
chunk = &band->chunk_buf[i];
|
||||
chunk->pos = i;
|
||||
@ -133,6 +136,7 @@ test_free_ftl_band(struct ftl_band *band)
|
||||
{
|
||||
SPDK_CU_ASSERT_FATAL(band != NULL);
|
||||
spdk_bit_array_free(&band->lba_map.vld);
|
||||
spdk_bit_array_free(&band->reloc_bitmap);
|
||||
free(band->chunk_buf);
|
||||
free(band->lba_map.map);
|
||||
spdk_dma_free(band->lba_map.dma_buf);
|
||||
|
Loading…
Reference in New Issue
Block a user