ftl: nv cache write throttling

Adds user write throttling - since writing to cache must be balanced
against the ability to compact the data to the base device, this
throttling mechanism allows for a smoother, more stable performance
levels - tying the user write speed to the compaction drain speed.

Signed-off-by: Artur Paszkiewicz <artur.paszkiewicz@intel.com>
Signed-off-by: Kozlowski Mateusz <mateusz.kozlowski@intel.com>
Change-Id: Ia85efeb387f17c6c080b23ae4e658a6d7e47a2fb
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/13392
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Artur Paszkiewicz 2022-05-27 13:57:30 +02:00 committed by Jim Harris
parent 8a76d5500d
commit 63b2fecb3f
6 changed files with 160 additions and 3 deletions

View File

@ -101,6 +101,9 @@ struct spdk_ftl_conf {
struct {
/* Start compaction when full chunks exceed given % of entire chunks */
uint32_t chunk_compaction_threshold;
/* Percentage of chunks to maintain free */
uint32_t chunk_free_target;
} nv_cache;
/* Name of base block device (zoned or non-zoned) */

View File

@ -640,12 +640,13 @@ ftl_process_io_queue(struct spdk_ftl_dev *dev)
ftl_io_pin(io);
}
if (!ftl_nv_cache_full(&dev->nv_cache) && !TAILQ_EMPTY(&dev->wr_sq)) {
while (!TAILQ_EMPTY(&dev->wr_sq) && !ftl_nv_cache_throttle(dev)) {
io = TAILQ_FIRST(&dev->wr_sq);
TAILQ_REMOVE(&dev->wr_sq, io, queue_entry);
assert(io->type == FTL_IO_WRITE);
if (!ftl_nv_cache_write(io)) {
TAILQ_INSERT_HEAD(&dev->wr_sq, io, queue_entry);
break;
}
}

View File

@ -197,6 +197,11 @@ ftl_nv_cache_init(struct spdk_ftl_dev *dev)
return -ENOMEM;
}
nv_cache->throttle.interval_tsc = FTL_NV_CACHE_THROTTLE_INTERVAL_MS *
(spdk_get_ticks_hz() / 1000);
nv_cache->chunk_free_target = spdk_divide_round_up(nv_cache->chunk_count *
dev->conf.nv_cache.chunk_free_target,
100);
return 0;
}
@ -471,10 +476,44 @@ ftl_chunk_persist_free_state(struct ftl_nv_cache *nv_cache)
}
}
static void
compaction_stats_update(struct ftl_nv_cache_chunk *chunk)
{
struct ftl_nv_cache *nv_cache = chunk->nv_cache;
struct compaction_bw_stats *compaction_bw = &nv_cache->compaction_recent_bw;
double *ptr;
if (spdk_unlikely(chunk->compaction_length_tsc == 0)) {
return;
}
if (spdk_likely(compaction_bw->count == FTL_NV_CACHE_COMPACTION_SMA_N)) {
ptr = compaction_bw->buf + compaction_bw->first;
compaction_bw->first++;
if (compaction_bw->first == FTL_NV_CACHE_COMPACTION_SMA_N) {
compaction_bw->first = 0;
}
compaction_bw->sum -= *ptr;
} else {
ptr = compaction_bw->buf + compaction_bw->count;
compaction_bw->count++;
}
*ptr = (double)chunk->md->blocks_compacted * FTL_BLOCK_SIZE / chunk->compaction_length_tsc;
chunk->compaction_length_tsc = 0;
compaction_bw->sum += *ptr;
nv_cache->compaction_sma = compaction_bw->sum / compaction_bw->count;
}
static void
chunk_compaction_advance(struct ftl_nv_cache_chunk *chunk, uint64_t num_blocks)
{
struct ftl_nv_cache *nv_cache = chunk->nv_cache;
uint64_t tsc = spdk_thread_get_last_tsc(spdk_get_thread());
chunk->compaction_length_tsc += tsc - chunk->compaction_start_tsc;
chunk->compaction_start_tsc = tsc;
chunk->md->blocks_compacted += num_blocks;
if (!is_chunk_compacted(chunk)) {
@ -485,6 +524,8 @@ chunk_compaction_advance(struct ftl_nv_cache_chunk *chunk, uint64_t num_blocks)
TAILQ_REMOVE(&nv_cache->chunk_comp_list, chunk, entry);
nv_cache->chunk_comp_count--;
compaction_stats_update(chunk);
ftl_chunk_free(chunk);
}
@ -743,6 +784,8 @@ compaction_process(struct ftl_nv_cache_compactor *compactor)
return;
}
chunk->compaction_start_tsc = spdk_thread_get_last_tsc(spdk_get_thread());
/*
* Get range of blocks to read
*/
@ -858,6 +901,10 @@ compaction_process_finish_read(struct ftl_nv_cache_compactor *compactor)
union ftl_md_vss *md;
ftl_addr current_addr;
const uint64_t num_entries = wr->num_blocks;
uint64_t tsc = spdk_thread_get_last_tsc(spdk_get_thread());
chunk->compaction_length_tsc += tsc - chunk->compaction_start_tsc;
chunk->compaction_start_tsc = tsc;
dev = SPDK_CONTAINEROF(compactor->nv_cache,
struct spdk_ftl_dev, nv_cache);
@ -1092,6 +1139,8 @@ ftl_nv_cache_write(struct ftl_io *io)
ftl_nv_cache_pin_cb, io,
&io->l2p_pin_ctx);
dev->nv_cache.throttle.blocks_submitted += io->num_blocks;
return true;
}
@ -1217,6 +1266,44 @@ ftl_nv_cache_set_addr(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr addr)
ftl_bitmap_set(dev->valid_map, addr);
}
static void
ftl_nv_cache_throttle_update(struct ftl_nv_cache *nv_cache)
{
double err;
double modifier;
err = ((double)nv_cache->chunk_free_count - nv_cache->chunk_free_target) / nv_cache->chunk_count;
modifier = FTL_NV_CACHE_THROTTLE_MODIFIER_KP * err;
if (modifier < FTL_NV_CACHE_THROTTLE_MODIFIER_MIN) {
modifier = FTL_NV_CACHE_THROTTLE_MODIFIER_MIN;
} else if (modifier > FTL_NV_CACHE_THROTTLE_MODIFIER_MAX) {
modifier = FTL_NV_CACHE_THROTTLE_MODIFIER_MAX;
}
if (spdk_unlikely(nv_cache->compaction_sma == 0 || nv_cache->compaction_active_count == 0)) {
nv_cache->throttle.blocks_submitted_limit = UINT64_MAX;
} else {
double blocks_per_interval = nv_cache->compaction_sma * nv_cache->throttle.interval_tsc /
FTL_BLOCK_SIZE;
nv_cache->throttle.blocks_submitted_limit = blocks_per_interval * (1.0 + modifier);
}
}
static void
ftl_nv_cache_process_throttle(struct ftl_nv_cache *nv_cache)
{
uint64_t tsc = spdk_thread_get_last_tsc(spdk_get_thread());
if (spdk_unlikely(!nv_cache->throttle.start_tsc)) {
nv_cache->throttle.start_tsc = tsc;
} else if (tsc - nv_cache->throttle.start_tsc >= nv_cache->throttle.interval_tsc) {
ftl_nv_cache_throttle_update(nv_cache);
nv_cache->throttle.start_tsc = tsc;
nv_cache->throttle.blocks_submitted = 0;
}
}
static void ftl_chunk_open(struct ftl_nv_cache_chunk *chunk);
void
@ -1254,9 +1341,11 @@ ftl_nv_cache_process(struct spdk_ftl_dev *dev)
ftl_nv_cache_compaction_reset(compactor);
}
}
ftl_nv_cache_process_throttle(nv_cache);
}
bool
static bool
ftl_nv_cache_full(struct ftl_nv_cache *nv_cache)
{
if (0 == nv_cache->chunk_open_count && NULL == nv_cache->chunk_current) {
@ -1266,6 +1355,19 @@ ftl_nv_cache_full(struct ftl_nv_cache *nv_cache)
}
}
bool
ftl_nv_cache_throttle(struct spdk_ftl_dev *dev)
{
struct ftl_nv_cache *nv_cache = &dev->nv_cache;
if (dev->nv_cache.throttle.blocks_submitted >= nv_cache->throttle.blocks_submitted_limit ||
ftl_nv_cache_full(nv_cache)) {
return true;
}
return false;
}
static void
chunk_free_p2l_map(struct ftl_nv_cache_chunk *chunk)
{

View File

@ -28,6 +28,24 @@
#define FTL_NV_CACHE_NUM_COMPACTORS 8
/*
* Parameters controlling nv cache write throttling.
*
* The write throttle limit value is calculated as follows:
* limit = compaction_average_bw * (1.0 + modifier)
*
* The modifier depends on the number of free chunks vs the configured threshold. Its value is
* zero if the number of free chunks is at the threshold, negative if below and positive if above.
*/
/* Interval in miliseconds between write throttle updates. */
#define FTL_NV_CACHE_THROTTLE_INTERVAL_MS 20
/* Throttle modifier proportional gain */
#define FTL_NV_CACHE_THROTTLE_MODIFIER_KP 20
/* Min and max modifier values */
#define FTL_NV_CACHE_THROTTLE_MODIFIER_MIN -0.8
#define FTL_NV_CACHE_THROTTLE_MODIFIER_MAX 0.5
struct ftl_nvcache_restore;
typedef void (*ftl_nv_cache_restore_fn)(struct ftl_nvcache_restore *, int, void *cb_arg);
@ -90,6 +108,12 @@ struct ftl_nv_cache_chunk {
/* This flag is used to indicate chunk is used in recovery */
bool recovery;
/* Compaction start time */
uint64_t compaction_start_tsc;
/* Compaction duration */
uint64_t compaction_length_tsc;
/* For writing metadata */
struct ftl_md_io_entry_ctx md_persist_entry_ctx;
};
@ -169,6 +193,27 @@ struct ftl_nv_cache {
struct ftl_nv_cache_chunk *chunks;
uint64_t last_seq_id;
uint64_t chunk_free_target;
/* Simple moving average of recent compaction velocity values */
double compaction_sma;
#define FTL_NV_CACHE_COMPACTION_SMA_N (FTL_NV_CACHE_NUM_COMPACTORS * 2)
/* Circular buffer holding values for calculating compaction SMA */
struct compaction_bw_stats {
double buf[FTL_NV_CACHE_COMPACTION_SMA_N];
ptrdiff_t first;
size_t count;
double sum;
} compaction_recent_bw;
struct {
uint64_t interval_tsc;
uint64_t start_tsc;
uint64_t blocks_submitted;
uint64_t blocks_submitted_limit;
} throttle;
};
int ftl_nv_cache_init(struct spdk_ftl_dev *dev);
@ -177,7 +222,7 @@ bool ftl_nv_cache_write(struct ftl_io *io);
void ftl_nv_cache_fill_md(struct ftl_io *io);
int ftl_nv_cache_read(struct ftl_io *io, ftl_addr addr, uint32_t num_blocks,
spdk_bdev_io_completion_cb cb, void *cb_arg);
bool ftl_nv_cache_full(struct ftl_nv_cache *nv_cache);
bool ftl_nv_cache_throttle(struct spdk_ftl_dev *dev);
void ftl_nv_cache_process(struct spdk_ftl_dev *dev);
void ftl_chunk_map_set_lba(struct ftl_nv_cache_chunk *chunk,

View File

@ -24,6 +24,7 @@ static const struct spdk_ftl_conf g_default_conf = {
.user_io_pool_size = 2048,
.nv_cache = {
.chunk_compaction_threshold = 80,
.chunk_free_target = 5,
},
.fast_shutdown = true,
};
@ -138,5 +139,9 @@ ftl_conf_is_valid(const struct spdk_ftl_conf *conf)
return false;
}
if (conf->nv_cache.chunk_free_target == 0 || conf->nv_cache.chunk_free_target > 100) {
return false;
}
return true;
}

View File

@ -144,6 +144,7 @@ DEFINE_STUB(ftl_nv_cache_acquire_trim_seq_id, uint64_t, (struct ftl_nv_cache *nv
DEFINE_STUB_V(ftl_md_persist, (struct ftl_md *md));
DEFINE_STUB_V(spdk_bdev_io_get_nvme_status, (const struct spdk_bdev_io *bdev_io, uint32_t *cdw0,
int *sct, int *sc));
DEFINE_STUB(ftl_nv_cache_throttle, bool, (struct spdk_ftl_dev *dev), true);
static void
adjust_bitmap(struct ftl_bitmap **bitmap, uint64_t *bit)