lib/ftl: use per-io_channel write buffers
Replaced single global write buffer with the per-io_channel write buffers. This means that the "rwb" module and all of its references were removed and replaced with the recently added interfaces. Change-Id: Idc899d3a4d63a8a2bede1ac26549ed06e9a2e784 Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com> Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/909 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Reviewed-by: Wojciech Malikowski <wojciech.malikowski@intel.com>
This commit is contained in:
parent
925cc3b8a9
commit
336505abbf
@ -66,8 +66,8 @@ struct spdk_ftl_conf {
|
||||
/* Number of reserved addresses not exposed to the user */
|
||||
size_t lba_rsvd;
|
||||
|
||||
/* Write buffer size */
|
||||
size_t rwb_size;
|
||||
/* Size of the per-io_channel write buffer */
|
||||
size_t write_buffer_size;
|
||||
|
||||
/* Threshold for opening new band */
|
||||
size_t band_thld;
|
||||
@ -87,9 +87,6 @@ struct spdk_ftl_conf {
|
||||
/* User writes limits */
|
||||
struct spdk_ftl_limit limits[SPDK_FTL_LIMIT_MAX];
|
||||
|
||||
/* Number of interleaving units per ws_opt */
|
||||
size_t num_interleave_units;
|
||||
|
||||
/* Allow for partial recovery from open bands instead of returning error */
|
||||
bool allow_open_bands;
|
||||
|
||||
|
@ -34,7 +34,7 @@
|
||||
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../..)
|
||||
include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
|
||||
|
||||
C_SRCS = ftl_band.c ftl_core.c ftl_debug.c ftl_io.c ftl_rwb.c ftl_reloc.c \
|
||||
C_SRCS = ftl_band.c ftl_core.c ftl_debug.c ftl_io.c ftl_reloc.c \
|
||||
ftl_restore.c ftl_init.c ftl_trace.c
|
||||
|
||||
LIBNAME = ftl
|
||||
|
@ -698,7 +698,6 @@ ftl_io_init_md_read(struct spdk_ftl_dev *dev, struct ftl_addr addr,
|
||||
struct ftl_io_init_opts opts = {
|
||||
.dev = dev,
|
||||
.io = NULL,
|
||||
.rwb_batch = NULL,
|
||||
.band = band,
|
||||
.size = sizeof(*io),
|
||||
.flags = FTL_IO_MD | FTL_IO_PHYSICAL_MODE,
|
||||
@ -734,7 +733,6 @@ ftl_io_init_md_write(struct spdk_ftl_dev *dev, struct ftl_band *band,
|
||||
struct ftl_io_init_opts opts = {
|
||||
.dev = dev,
|
||||
.io = NULL,
|
||||
.rwb_batch = NULL,
|
||||
.band = band,
|
||||
.size = sizeof(struct ftl_io),
|
||||
.flags = FTL_IO_MD | FTL_IO_PHYSICAL_MODE,
|
||||
|
@ -44,7 +44,6 @@
|
||||
#include "ftl_core.h"
|
||||
#include "ftl_band.h"
|
||||
#include "ftl_io.h"
|
||||
#include "ftl_rwb.h"
|
||||
#include "ftl_debug.h"
|
||||
#include "ftl_reloc.h"
|
||||
|
||||
@ -116,19 +115,6 @@ struct ftl_flush {
|
||||
LIST_ENTRY(ftl_flush) list_entry;
|
||||
};
|
||||
|
||||
static int
|
||||
ftl_rwb_flags_from_io(const struct ftl_io *io)
|
||||
{
|
||||
int valid_flags = FTL_IO_INTERNAL | FTL_IO_WEAK | FTL_IO_PAD;
|
||||
return io->flags & valid_flags;
|
||||
}
|
||||
|
||||
static int
|
||||
ftl_rwb_entry_weak(const struct ftl_rwb_entry *entry)
|
||||
{
|
||||
return entry->flags & FTL_IO_WEAK;
|
||||
}
|
||||
|
||||
static void
|
||||
ftl_wptr_free(struct ftl_wptr *wptr)
|
||||
{
|
||||
@ -160,9 +146,9 @@ ftl_remove_wptr(struct ftl_wptr *wptr)
|
||||
ftl_wptr_free(wptr);
|
||||
}
|
||||
|
||||
struct ftl_wbuf_entry *ftl_acquire_wbuf_entry(struct ftl_io_channel *io_channel, int io_flags);
|
||||
static void ftl_evict_cache_entry(struct spdk_ftl_dev *dev, struct ftl_wbuf_entry *entry);
|
||||
|
||||
struct ftl_wbuf_entry *
|
||||
static struct ftl_wbuf_entry *
|
||||
ftl_acquire_wbuf_entry(struct ftl_io_channel *io_channel, int io_flags)
|
||||
{
|
||||
struct ftl_wbuf_entry *entry;
|
||||
@ -184,6 +170,8 @@ ftl_acquire_wbuf_entry(struct ftl_io_channel *io_channel, int io_flags)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ftl_evict_cache_entry(io_channel->dev, entry);
|
||||
|
||||
entry->io_flags = io_flags;
|
||||
entry->addr.offset = FTL_ADDR_INVALID;
|
||||
entry->lba = FTL_LBA_INVALID;
|
||||
@ -205,9 +193,7 @@ ftl_release_wbuf_entry(struct ftl_wbuf_entry *entry)
|
||||
spdk_ring_enqueue(io_channel->free_queue, (void **)&entry, 1, NULL);
|
||||
}
|
||||
|
||||
struct ftl_batch *ftl_get_next_batch(struct spdk_ftl_dev *dev);
|
||||
|
||||
struct ftl_batch *
|
||||
static struct ftl_batch *
|
||||
ftl_get_next_batch(struct spdk_ftl_dev *dev)
|
||||
{
|
||||
struct ftl_batch *batch = dev->current_batch;
|
||||
@ -289,9 +275,7 @@ ftl_get_next_batch(struct spdk_ftl_dev *dev)
|
||||
return batch;
|
||||
}
|
||||
|
||||
void ftl_release_batch(struct spdk_ftl_dev *dev, struct ftl_batch *batch);
|
||||
|
||||
void
|
||||
static void
|
||||
ftl_release_batch(struct spdk_ftl_dev *dev, struct ftl_batch *batch)
|
||||
{
|
||||
struct ftl_wbuf_entry *entry;
|
||||
@ -306,9 +290,7 @@ ftl_release_batch(struct spdk_ftl_dev *dev, struct ftl_batch *batch)
|
||||
TAILQ_INSERT_TAIL(&dev->free_batches, batch, tailq);
|
||||
}
|
||||
|
||||
struct ftl_wbuf_entry *ftl_get_entry_from_addr(struct spdk_ftl_dev *dev, struct ftl_addr addr);
|
||||
|
||||
struct ftl_wbuf_entry *
|
||||
static struct ftl_wbuf_entry *
|
||||
ftl_get_entry_from_addr(struct spdk_ftl_dev *dev, struct ftl_addr addr)
|
||||
{
|
||||
struct ftl_io_channel *ioch;
|
||||
@ -325,9 +307,7 @@ ftl_get_entry_from_addr(struct spdk_ftl_dev *dev, struct ftl_addr addr)
|
||||
return &ioch->wbuf_entries[entry_offset];
|
||||
}
|
||||
|
||||
struct ftl_addr ftl_get_addr_from_entry(struct ftl_wbuf_entry *entry);
|
||||
|
||||
struct ftl_addr
|
||||
static struct ftl_addr
|
||||
ftl_get_addr_from_entry(struct ftl_wbuf_entry *entry)
|
||||
{
|
||||
struct ftl_io_channel *ioch = entry->ioch;
|
||||
@ -911,7 +891,7 @@ ftl_get_limit(const struct spdk_ftl_dev *dev, int type)
|
||||
}
|
||||
|
||||
static bool
|
||||
ftl_cache_lba_valid(struct spdk_ftl_dev *dev, struct ftl_rwb_entry *entry)
|
||||
ftl_cache_lba_valid(struct spdk_ftl_dev *dev, struct ftl_wbuf_entry *entry)
|
||||
{
|
||||
struct ftl_addr addr;
|
||||
|
||||
@ -921,7 +901,7 @@ ftl_cache_lba_valid(struct spdk_ftl_dev *dev, struct ftl_rwb_entry *entry)
|
||||
}
|
||||
|
||||
addr = ftl_l2p_get(dev, entry->lba);
|
||||
if (!(ftl_addr_cached(addr) && addr.cache_offset == entry->pos)) {
|
||||
if (!(ftl_addr_cached(addr) && entry == ftl_get_entry_from_addr(dev, addr))) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -929,11 +909,11 @@ ftl_cache_lba_valid(struct spdk_ftl_dev *dev, struct ftl_rwb_entry *entry)
|
||||
}
|
||||
|
||||
static void
|
||||
ftl_evict_cache_entry(struct spdk_ftl_dev *dev, struct ftl_rwb_entry *entry)
|
||||
ftl_evict_cache_entry(struct spdk_ftl_dev *dev, struct ftl_wbuf_entry *entry)
|
||||
{
|
||||
pthread_spin_lock(&entry->lock);
|
||||
|
||||
if (!ftl_rwb_entry_valid(entry)) {
|
||||
if (!entry->valid) {
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
@ -946,43 +926,31 @@ ftl_evict_cache_entry(struct spdk_ftl_dev *dev, struct ftl_rwb_entry *entry)
|
||||
|
||||
ftl_l2p_set(dev, entry->lba, entry->addr);
|
||||
clear:
|
||||
ftl_rwb_entry_invalidate(entry);
|
||||
entry->valid = false;
|
||||
unlock:
|
||||
pthread_spin_unlock(&entry->lock);
|
||||
}
|
||||
|
||||
static struct ftl_rwb_entry *
|
||||
ftl_acquire_entry(struct spdk_ftl_dev *dev, int flags)
|
||||
{
|
||||
struct ftl_rwb_entry *entry;
|
||||
|
||||
entry = ftl_rwb_acquire(dev->rwb, ftl_rwb_type_from_flags(flags));
|
||||
if (!entry) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ftl_evict_cache_entry(dev, entry);
|
||||
|
||||
entry->flags = flags;
|
||||
return entry;
|
||||
}
|
||||
|
||||
static void
|
||||
ftl_rwb_pad(struct spdk_ftl_dev *dev, size_t size)
|
||||
ftl_pad_wbuf(struct spdk_ftl_dev *dev, size_t size)
|
||||
{
|
||||
struct ftl_rwb_entry *entry;
|
||||
struct ftl_wbuf_entry *entry;
|
||||
struct ftl_io_channel *ioch;
|
||||
int flags = FTL_IO_PAD | FTL_IO_INTERNAL;
|
||||
|
||||
ioch = ftl_io_channel_get_ctx(ftl_get_io_channel(dev));
|
||||
|
||||
for (size_t i = 0; i < size; ++i) {
|
||||
entry = ftl_acquire_entry(dev, flags);
|
||||
entry = ftl_acquire_wbuf_entry(ioch, flags);
|
||||
if (!entry) {
|
||||
break;
|
||||
}
|
||||
|
||||
entry->lba = FTL_LBA_INVALID;
|
||||
entry->addr = ftl_to_addr(FTL_ADDR_INVALID);
|
||||
memset(entry->data, 0, FTL_BLOCK_SIZE);
|
||||
ftl_rwb_push(entry);
|
||||
memset(entry->payload, 0, FTL_BLOCK_SIZE);
|
||||
|
||||
spdk_ring_enqueue(ioch->submit_queue, (void **)&entry, 1, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1000,37 +968,45 @@ static void
|
||||
ftl_wptr_pad_band(struct ftl_wptr *wptr)
|
||||
{
|
||||
struct spdk_ftl_dev *dev = wptr->dev;
|
||||
size_t size = ftl_rwb_num_pending(dev->rwb);
|
||||
size_t blocks_left, rwb_size, pad_size;
|
||||
struct ftl_batch *batch = dev->current_batch;
|
||||
struct ftl_io_channel *ioch;
|
||||
size_t size, pad_size, blocks_left;
|
||||
|
||||
size = batch != NULL ? batch->num_entries : 0;
|
||||
TAILQ_FOREACH(ioch, &dev->ioch_queue, tailq) {
|
||||
size += spdk_ring_count(ioch->submit_queue);
|
||||
}
|
||||
|
||||
ioch = ftl_io_channel_get_ctx(ftl_get_io_channel(dev));
|
||||
|
||||
blocks_left = ftl_wptr_user_blocks_left(wptr);
|
||||
assert(size <= blocks_left);
|
||||
assert(blocks_left % dev->xfer_size == 0);
|
||||
rwb_size = ftl_rwb_size(dev->rwb) - size;
|
||||
pad_size = spdk_min(blocks_left - size, rwb_size);
|
||||
pad_size = spdk_min(blocks_left - size, spdk_ring_count(ioch->free_queue));
|
||||
|
||||
/* Pad write buffer until band is full */
|
||||
ftl_rwb_pad(dev, pad_size);
|
||||
ftl_pad_wbuf(dev, pad_size);
|
||||
}
|
||||
|
||||
static void
|
||||
ftl_wptr_process_shutdown(struct ftl_wptr *wptr)
|
||||
{
|
||||
struct spdk_ftl_dev *dev = wptr->dev;
|
||||
size_t size = ftl_rwb_num_pending(dev->rwb);
|
||||
size_t num_active = dev->xfer_size * ftl_rwb_get_active_batches(dev->rwb);
|
||||
struct ftl_batch *batch = dev->current_batch;
|
||||
struct ftl_io_channel *ioch;
|
||||
size_t size;
|
||||
|
||||
num_active = num_active ? num_active : dev->xfer_size;
|
||||
if (size >= num_active) {
|
||||
size = batch != NULL ? batch->num_entries : 0;
|
||||
TAILQ_FOREACH(ioch, &dev->ioch_queue, tailq) {
|
||||
size += spdk_ring_count(ioch->submit_queue);
|
||||
}
|
||||
|
||||
if (size >= dev->xfer_size) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* If we reach this point we need to remove free bands */
|
||||
/* and pad current wptr band to the end */
|
||||
if (ftl_rwb_get_active_batches(dev->rwb) <= 1) {
|
||||
ftl_remove_free_bands(dev);
|
||||
}
|
||||
|
||||
ftl_remove_free_bands(dev);
|
||||
ftl_wptr_pad_band(wptr);
|
||||
}
|
||||
|
||||
@ -1048,12 +1024,11 @@ void
|
||||
ftl_apply_limits(struct spdk_ftl_dev *dev)
|
||||
{
|
||||
const struct spdk_ftl_limit *limit;
|
||||
struct ftl_io_channel *ioch;
|
||||
struct ftl_stats *stats = &dev->stats;
|
||||
size_t rwb_limit[FTL_RWB_TYPE_MAX];
|
||||
uint32_t qdepth_limit = 100;
|
||||
int i;
|
||||
|
||||
ftl_rwb_get_limits(dev->rwb, rwb_limit);
|
||||
|
||||
/* Clear existing limit */
|
||||
dev->limit = SPDK_FTL_LIMIT_MAX;
|
||||
|
||||
@ -1061,19 +1036,18 @@ ftl_apply_limits(struct spdk_ftl_dev *dev)
|
||||
limit = ftl_get_limit(dev, i);
|
||||
|
||||
if (dev->num_free <= limit->thld) {
|
||||
rwb_limit[FTL_RWB_TYPE_USER] =
|
||||
(limit->limit * ftl_rwb_entry_cnt(dev->rwb)) / 100;
|
||||
qdepth_limit = limit->limit;
|
||||
stats->limits[i]++;
|
||||
dev->limit = i;
|
||||
goto apply;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Clear the limits, since we don't need to apply them anymore */
|
||||
rwb_limit[FTL_RWB_TYPE_USER] = ftl_rwb_entry_cnt(dev->rwb);
|
||||
apply:
|
||||
ftl_trace_limits(dev, rwb_limit, dev->num_free);
|
||||
ftl_rwb_set_limits(dev->rwb, rwb_limit);
|
||||
ftl_trace_limits(dev, dev->limit, dev->num_free);
|
||||
TAILQ_FOREACH(ioch, &dev->ioch_queue, tailq) {
|
||||
__atomic_store_n(&ioch->qdepth_limit, (qdepth_limit * ioch->num_entries) / 100,
|
||||
__ATOMIC_SEQ_CST);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
@ -1129,12 +1103,11 @@ static int
|
||||
ftl_cache_read(struct ftl_io *io, uint64_t lba,
|
||||
struct ftl_addr addr, void *buf)
|
||||
{
|
||||
struct ftl_rwb *rwb = io->dev->rwb;
|
||||
struct ftl_rwb_entry *entry;
|
||||
struct ftl_wbuf_entry *entry;
|
||||
struct ftl_addr naddr;
|
||||
int rc = 0;
|
||||
|
||||
entry = ftl_rwb_entry_from_offset(rwb, addr.cache_offset);
|
||||
entry = ftl_get_entry_from_addr(io->dev, addr);
|
||||
pthread_spin_lock(&entry->lock);
|
||||
|
||||
naddr = ftl_l2p_get(io->dev, lba);
|
||||
@ -1143,7 +1116,7 @@ ftl_cache_read(struct ftl_io *io, uint64_t lba,
|
||||
goto out;
|
||||
}
|
||||
|
||||
memcpy(buf, entry->data, FTL_BLOCK_SIZE);
|
||||
memcpy(buf, entry->payload, FTL_BLOCK_SIZE);
|
||||
out:
|
||||
pthread_spin_unlock(&entry->lock);
|
||||
return rc;
|
||||
@ -1211,7 +1184,7 @@ ftl_submit_read(struct ftl_io *io)
|
||||
|
||||
/* We might need to retry the read from scratch (e.g. */
|
||||
/* because write was under way and completed before */
|
||||
/* we could read it from rwb */
|
||||
/* we could read it from the write buffer */
|
||||
if (ftl_read_retry(rc)) {
|
||||
continue;
|
||||
}
|
||||
@ -1268,13 +1241,13 @@ ftl_complete_flush(struct ftl_flush *flush)
|
||||
}
|
||||
|
||||
static void
|
||||
ftl_process_flush(struct spdk_ftl_dev *dev, struct ftl_rwb_batch *batch)
|
||||
ftl_process_flush(struct spdk_ftl_dev *dev, struct ftl_batch *batch)
|
||||
{
|
||||
struct ftl_flush *flush, *tflush;
|
||||
size_t offset;
|
||||
|
||||
LIST_FOREACH_SAFE(flush, &dev->flush_list, list_entry, tflush) {
|
||||
offset = ftl_rwb_batch_get_offset(batch);
|
||||
offset = batch->index;
|
||||
|
||||
if (spdk_bit_array_get(flush->bmap, offset)) {
|
||||
spdk_bit_array_clear(flush->bmap, offset);
|
||||
@ -1541,13 +1514,13 @@ ftl_nv_cache_scrub(struct ftl_nv_cache *nv_cache, spdk_bdev_io_completion_cb cb_
|
||||
static void
|
||||
ftl_write_fail(struct ftl_io *io, int status)
|
||||
{
|
||||
struct ftl_rwb_batch *batch = io->rwb_batch;
|
||||
struct ftl_batch *batch = io->batch;
|
||||
struct spdk_ftl_dev *dev = io->dev;
|
||||
struct ftl_rwb_entry *entry;
|
||||
struct ftl_wbuf_entry *entry;
|
||||
struct ftl_band *band;
|
||||
char buf[128];
|
||||
|
||||
entry = ftl_rwb_batch_first_entry(batch);
|
||||
entry = TAILQ_FIRST(&batch->entries);
|
||||
|
||||
band = ftl_band_from_addr(io->dev, entry->addr);
|
||||
SPDK_ERRLOG("Write failed @addr: %s, status: %d\n",
|
||||
@ -1556,21 +1529,21 @@ ftl_write_fail(struct ftl_io *io, int status)
|
||||
/* Close the band and, halt wptr and defrag */
|
||||
ftl_halt_writes(dev, band);
|
||||
|
||||
ftl_rwb_foreach(entry, batch) {
|
||||
TAILQ_FOREACH(entry, &batch->entries, tailq) {
|
||||
/* Invalidate meta set by process_writes() */
|
||||
ftl_invalidate_addr(dev, entry->addr);
|
||||
}
|
||||
|
||||
/* Reset the batch back to the the RWB to resend it later */
|
||||
ftl_rwb_batch_revert(batch);
|
||||
/* Reset the batch back to the write buffer to resend it later */
|
||||
TAILQ_INSERT_TAIL(&dev->pending_batches, batch, tailq);
|
||||
}
|
||||
|
||||
static void
|
||||
ftl_write_cb(struct ftl_io *io, void *arg, int status)
|
||||
{
|
||||
struct spdk_ftl_dev *dev = io->dev;
|
||||
struct ftl_rwb_batch *batch = io->rwb_batch;
|
||||
struct ftl_rwb_entry *entry;
|
||||
struct ftl_batch *batch = io->batch;
|
||||
struct ftl_wbuf_entry *entry;
|
||||
struct ftl_band *band;
|
||||
struct ftl_addr prev_addr, addr = io->addr;
|
||||
|
||||
@ -1582,9 +1555,9 @@ ftl_write_cb(struct ftl_io *io, void *arg, int status)
|
||||
assert(io->num_blocks == dev->xfer_size);
|
||||
assert(!(io->flags & FTL_IO_MD));
|
||||
|
||||
ftl_rwb_foreach(entry, batch) {
|
||||
TAILQ_FOREACH(entry, &batch->entries, tailq) {
|
||||
band = entry->band;
|
||||
if (!(entry->flags & FTL_IO_PAD)) {
|
||||
if (!(entry->io_flags & FTL_IO_PAD)) {
|
||||
/* Verify that the LBA is set for user blocks */
|
||||
assert(entry->lba != FTL_LBA_INVALID);
|
||||
}
|
||||
@ -1600,12 +1573,13 @@ ftl_write_cb(struct ftl_io *io, void *arg, int status)
|
||||
prev_addr = ftl_l2p_get(dev, entry->lba);
|
||||
|
||||
/* If the l2p was updated in the meantime, don't update band's metadata */
|
||||
if (ftl_addr_cached(prev_addr) && prev_addr.cache_offset == entry->pos) {
|
||||
if (ftl_addr_cached(prev_addr) &&
|
||||
entry == ftl_get_entry_from_addr(dev, prev_addr)) {
|
||||
/* Setting entry's cache bit needs to be done after metadata */
|
||||
/* within the band is updated to make sure that writes */
|
||||
/* invalidating the entry clear the metadata as well */
|
||||
ftl_band_set_addr(io->band, entry->lba, entry->addr);
|
||||
ftl_rwb_entry_set_valid(entry);
|
||||
entry->valid = true;
|
||||
}
|
||||
pthread_spin_unlock(&entry->lock);
|
||||
}
|
||||
@ -1617,24 +1591,24 @@ ftl_write_cb(struct ftl_io *io, void *arg, int status)
|
||||
}
|
||||
|
||||
ftl_process_flush(dev, batch);
|
||||
ftl_rwb_batch_release(batch);
|
||||
ftl_release_batch(dev, batch);
|
||||
}
|
||||
|
||||
static void
|
||||
ftl_update_rwb_stats(struct spdk_ftl_dev *dev, const struct ftl_rwb_entry *entry)
|
||||
ftl_update_stats(struct spdk_ftl_dev *dev, const struct ftl_wbuf_entry *entry)
|
||||
{
|
||||
if (!ftl_rwb_entry_internal(entry)) {
|
||||
if (entry->io_flags & FTL_IO_INTERNAL) {
|
||||
dev->stats.write_user++;
|
||||
}
|
||||
dev->stats.write_total++;
|
||||
}
|
||||
|
||||
static void
|
||||
ftl_update_l2p(struct spdk_ftl_dev *dev, const struct ftl_rwb_entry *entry,
|
||||
ftl_update_l2p(struct spdk_ftl_dev *dev, const struct ftl_wbuf_entry *entry,
|
||||
struct ftl_addr addr)
|
||||
{
|
||||
struct ftl_addr prev_addr;
|
||||
struct ftl_rwb_entry *prev;
|
||||
struct ftl_wbuf_entry *prev;
|
||||
struct ftl_band *band;
|
||||
int valid;
|
||||
|
||||
@ -1646,13 +1620,13 @@ ftl_update_l2p(struct spdk_ftl_dev *dev, const struct ftl_rwb_entry *entry,
|
||||
|
||||
/* If the L2P's physical address is different than what we expected we don't need to */
|
||||
/* do anything (someone's already overwritten our data). */
|
||||
if (ftl_rwb_entry_weak(entry) && !ftl_addr_cmp(prev_addr, entry->addr)) {
|
||||
if ((entry->io_flags & FTL_IO_WEAK) && !ftl_addr_cmp(prev_addr, entry->addr)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (ftl_addr_cached(prev_addr)) {
|
||||
assert(!ftl_rwb_entry_weak(entry));
|
||||
prev = ftl_rwb_entry_from_offset(dev->rwb, prev_addr.cache_offset);
|
||||
assert(!(entry->io_flags & FTL_IO_WEAK));
|
||||
prev = ftl_get_entry_from_addr(dev, prev_addr);
|
||||
pthread_spin_lock(&prev->lock);
|
||||
|
||||
/* Re-read the L2P under the lock to protect against updates */
|
||||
@ -1666,9 +1640,9 @@ ftl_update_l2p(struct spdk_ftl_dev *dev, const struct ftl_rwb_entry *entry,
|
||||
}
|
||||
|
||||
/* If previous entry is part of cache, remove and invalidate it */
|
||||
if (ftl_rwb_entry_valid(prev)) {
|
||||
if (prev->valid) {
|
||||
ftl_invalidate_addr(dev, prev->addr);
|
||||
ftl_rwb_entry_invalidate(prev);
|
||||
prev->valid = false;
|
||||
}
|
||||
|
||||
ftl_l2p_set(dev, entry->lba, addr);
|
||||
@ -1686,7 +1660,7 @@ ftl_update_l2p(struct spdk_ftl_dev *dev, const struct ftl_rwb_entry *entry,
|
||||
|
||||
/* If the address has been invalidated already, we don't want to update */
|
||||
/* the L2P for weak writes, as it means the write is no longer valid. */
|
||||
if (!ftl_rwb_entry_weak(entry) || valid) {
|
||||
if (!(entry->io_flags & FTL_IO_WEAK) || valid) {
|
||||
ftl_l2p_set(dev, entry->lba, addr);
|
||||
}
|
||||
|
||||
@ -1702,7 +1676,6 @@ ftl_io_init_child_write(struct ftl_io *parent, struct ftl_addr addr, ftl_io_fn c
|
||||
.dev = dev,
|
||||
.io = NULL,
|
||||
.parent = parent,
|
||||
.rwb_batch = NULL,
|
||||
.band = parent->band,
|
||||
.size = sizeof(struct ftl_io),
|
||||
.flags = 0,
|
||||
@ -1804,8 +1777,6 @@ ftl_submit_write(struct ftl_wptr *wptr, struct ftl_io *io)
|
||||
int rc = 0;
|
||||
|
||||
assert(io->num_blocks % dev->xfer_size == 0);
|
||||
/* Only one child write make sense in case of user write */
|
||||
assert((io->flags & FTL_IO_MD) || io->iov_cnt == 1);
|
||||
|
||||
while (io->iov_pos < io->iov_cnt) {
|
||||
/* There are no guarantees of the order of completion of NVMe IO submission queue */
|
||||
@ -1841,31 +1812,43 @@ ftl_submit_write(struct ftl_wptr *wptr, struct ftl_io *io)
|
||||
static void
|
||||
ftl_flush_pad_batch(struct spdk_ftl_dev *dev)
|
||||
{
|
||||
struct ftl_rwb *rwb = dev->rwb;
|
||||
size_t size, num_entries;
|
||||
struct ftl_batch *batch = dev->current_batch;
|
||||
struct ftl_io_channel *ioch;
|
||||
size_t size = 0, num_entries = 0;
|
||||
|
||||
size = ftl_rwb_num_acquired(rwb, FTL_RWB_TYPE_INTERNAL) +
|
||||
ftl_rwb_num_acquired(rwb, FTL_RWB_TYPE_USER);
|
||||
assert(batch != NULL);
|
||||
assert(batch->num_entries < dev->xfer_size);
|
||||
|
||||
/* There must be something in the RWB, otherwise the flush */
|
||||
/* wouldn't be waiting for anything */
|
||||
assert(size > 0);
|
||||
|
||||
/* Only add padding when there's less than xfer size */
|
||||
/* entries in the buffer. Otherwise we just have to wait */
|
||||
/* for the entries to become ready. */
|
||||
num_entries = ftl_rwb_get_active_batches(dev->rwb) * dev->xfer_size;
|
||||
if (size < num_entries) {
|
||||
ftl_rwb_pad(dev, num_entries - (size % num_entries));
|
||||
TAILQ_FOREACH(ioch, &dev->ioch_queue, tailq) {
|
||||
size += spdk_ring_count(ioch->submit_queue);
|
||||
}
|
||||
|
||||
num_entries = dev->xfer_size - batch->num_entries;
|
||||
if (size < num_entries) {
|
||||
ftl_pad_wbuf(dev, num_entries - size);
|
||||
}
|
||||
}
|
||||
|
||||
static bool
|
||||
ftl_check_io_channel_flush(struct spdk_ftl_dev *dev)
|
||||
{
|
||||
struct ftl_io_channel *ioch;
|
||||
|
||||
TAILQ_FOREACH(ioch, &dev->ioch_queue, tailq) {
|
||||
if (ioch->flush && spdk_ring_count(ioch->free_queue) != ioch->num_entries) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int
|
||||
ftl_wptr_process_writes(struct ftl_wptr *wptr)
|
||||
{
|
||||
struct spdk_ftl_dev *dev = wptr->dev;
|
||||
struct ftl_rwb_batch *batch;
|
||||
struct ftl_rwb_entry *entry;
|
||||
struct ftl_batch *batch;
|
||||
struct ftl_wbuf_entry *entry;
|
||||
struct ftl_io *io;
|
||||
|
||||
if (spdk_unlikely(!TAILQ_EMPTY(&wptr->pending_queue))) {
|
||||
@ -1890,33 +1873,33 @@ ftl_wptr_process_writes(struct ftl_wptr *wptr)
|
||||
ftl_wptr_pad_band(wptr);
|
||||
}
|
||||
|
||||
batch = ftl_rwb_pop(dev->rwb);
|
||||
batch = ftl_get_next_batch(dev);
|
||||
if (!batch) {
|
||||
/* If there are queued flush requests we need to pad the RWB to */
|
||||
/* If there are queued flush requests we need to pad the write buffer to */
|
||||
/* force out remaining entries */
|
||||
if (!LIST_EMPTY(&dev->flush_list)) {
|
||||
if (!LIST_EMPTY(&dev->flush_list) || ftl_check_io_channel_flush(dev)) {
|
||||
ftl_flush_pad_batch(dev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
io = ftl_io_rwb_init(dev, wptr->addr, wptr->band, batch, ftl_write_cb);
|
||||
io = ftl_io_wbuf_init(dev, wptr->addr, wptr->band, batch, ftl_write_cb);
|
||||
if (!io) {
|
||||
goto error;
|
||||
}
|
||||
|
||||
ftl_rwb_foreach(entry, batch) {
|
||||
TAILQ_FOREACH(entry, &batch->entries, tailq) {
|
||||
/* Update band's relocation stats if the IO comes from reloc */
|
||||
if (entry->flags & FTL_IO_WEAK) {
|
||||
if (entry->io_flags & FTL_IO_WEAK) {
|
||||
if (!spdk_bit_array_get(wptr->band->reloc_bitmap, entry->band->id)) {
|
||||
spdk_bit_array_set(wptr->band->reloc_bitmap, entry->band->id);
|
||||
entry->band->num_reloc_bands++;
|
||||
}
|
||||
}
|
||||
|
||||
ftl_trace_rwb_pop(dev, entry);
|
||||
ftl_update_rwb_stats(dev, entry);
|
||||
ftl_trace_wbuf_pop(dev, entry);
|
||||
ftl_update_stats(dev, entry);
|
||||
}
|
||||
|
||||
SPDK_DEBUGLOG(SPDK_LOG_FTL_CORE, "Write addr:%lx\n", wptr->addr.offset);
|
||||
@ -1931,7 +1914,7 @@ ftl_wptr_process_writes(struct ftl_wptr *wptr)
|
||||
|
||||
return dev->xfer_size;
|
||||
error:
|
||||
ftl_rwb_batch_revert(batch);
|
||||
TAILQ_INSERT_TAIL(&dev->pending_batches, batch, tailq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1961,11 +1944,11 @@ ftl_process_writes(struct spdk_ftl_dev *dev)
|
||||
}
|
||||
|
||||
static void
|
||||
ftl_rwb_entry_fill(struct ftl_rwb_entry *entry, struct ftl_io *io)
|
||||
ftl_fill_wbuf_entry(struct ftl_wbuf_entry *entry, struct ftl_io *io)
|
||||
{
|
||||
memcpy(entry->data, ftl_io_iovec_addr(io), FTL_BLOCK_SIZE);
|
||||
memcpy(entry->payload, ftl_io_iovec_addr(io), FTL_BLOCK_SIZE);
|
||||
|
||||
if (ftl_rwb_entry_weak(entry)) {
|
||||
if (entry->io_flags & FTL_IO_WEAK) {
|
||||
entry->band = ftl_band_from_addr(io->dev, io->addr);
|
||||
entry->addr = ftl_band_next_addr(entry->band, io->addr, io->pos);
|
||||
entry->band->num_reloc_blocks++;
|
||||
@ -1973,20 +1956,14 @@ ftl_rwb_entry_fill(struct ftl_rwb_entry *entry, struct ftl_io *io)
|
||||
|
||||
entry->trace = io->trace;
|
||||
entry->lba = ftl_io_current_lba(io);
|
||||
|
||||
if (entry->md) {
|
||||
memcpy(entry->md, &entry->lba, sizeof(entry->lba));
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
ftl_rwb_fill(struct ftl_io *io)
|
||||
ftl_wbuf_fill(struct ftl_io *io)
|
||||
{
|
||||
struct spdk_ftl_dev *dev = io->dev;
|
||||
struct ftl_io_channel *ioch;
|
||||
struct ftl_rwb_entry *entry;
|
||||
struct ftl_addr addr = { .cached = 1 };
|
||||
int flags = ftl_rwb_flags_from_io(io);
|
||||
struct ftl_wbuf_entry *entry;
|
||||
|
||||
ioch = ftl_io_channel_get_ctx(io->ioch);
|
||||
|
||||
@ -1996,24 +1973,22 @@ ftl_rwb_fill(struct ftl_io *io)
|
||||
continue;
|
||||
}
|
||||
|
||||
entry = ftl_acquire_entry(dev, flags);
|
||||
entry = ftl_acquire_wbuf_entry(ioch, io->flags);
|
||||
if (!entry) {
|
||||
TAILQ_INSERT_TAIL(&ioch->retry_queue, io, ioch_entry);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ftl_rwb_entry_fill(entry, io);
|
||||
ftl_fill_wbuf_entry(entry, io);
|
||||
|
||||
addr.cache_offset = entry->pos;
|
||||
|
||||
ftl_trace_rwb_fill(dev, io);
|
||||
ftl_update_l2p(dev, entry, addr);
|
||||
ftl_trace_wbuf_fill(dev, io);
|
||||
ftl_update_l2p(dev, entry, ftl_get_addr_from_entry(entry));
|
||||
ftl_io_advance(io, 1);
|
||||
|
||||
/* Needs to be done after L2P is updated to avoid race with */
|
||||
/* write completion callback when it's processed faster than */
|
||||
/* L2P is set in update_l2p(). */
|
||||
ftl_rwb_push(entry);
|
||||
spdk_ring_enqueue(ioch->submit_queue, (void **)&entry, 1, NULL);
|
||||
}
|
||||
|
||||
if (ftl_io_done(io)) {
|
||||
@ -2170,9 +2145,9 @@ ftl_io_write(struct ftl_io *io)
|
||||
{
|
||||
struct spdk_ftl_dev *dev = io->dev;
|
||||
|
||||
/* For normal IOs we just need to copy the data onto the rwb */
|
||||
/* For normal IOs we just need to copy the data onto the write buffer */
|
||||
if (!(io->flags & FTL_IO_MD)) {
|
||||
ftl_io_call_foreach_child(io, ftl_rwb_fill);
|
||||
ftl_io_call_foreach_child(io, ftl_wbuf_fill);
|
||||
} else {
|
||||
/* Metadata has its own buffer, so it doesn't have to be copied, so just */
|
||||
/* send it the the core thread and schedule the write immediately */
|
||||
@ -2257,14 +2232,13 @@ static struct ftl_flush *
|
||||
ftl_flush_init(struct spdk_ftl_dev *dev, spdk_ftl_fn cb_fn, void *cb_arg)
|
||||
{
|
||||
struct ftl_flush *flush;
|
||||
struct ftl_rwb *rwb = dev->rwb;
|
||||
|
||||
flush = calloc(1, sizeof(*flush));
|
||||
if (!flush) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
flush->bmap = spdk_bit_array_create(ftl_rwb_num_batches(rwb));
|
||||
flush->bmap = spdk_bit_array_create(FTL_BATCH_COUNT);
|
||||
if (!flush->bmap) {
|
||||
goto error;
|
||||
}
|
||||
@ -2284,27 +2258,26 @@ _ftl_flush(void *ctx)
|
||||
{
|
||||
struct ftl_flush *flush = ctx;
|
||||
struct spdk_ftl_dev *dev = flush->dev;
|
||||
struct ftl_rwb *rwb = dev->rwb;
|
||||
struct ftl_rwb_batch *batch;
|
||||
uint32_t i;
|
||||
|
||||
/* Attach flush object to all non-empty batches */
|
||||
ftl_rwb_foreach_batch(batch, rwb) {
|
||||
if (!ftl_rwb_batch_empty(batch)) {
|
||||
spdk_bit_array_set(flush->bmap, ftl_rwb_batch_get_offset(batch));
|
||||
for (i = 0; i < FTL_BATCH_COUNT; ++i) {
|
||||
if (dev->batch_array[i].num_entries > 0) {
|
||||
spdk_bit_array_set(flush->bmap, i);
|
||||
flush->num_req++;
|
||||
}
|
||||
}
|
||||
|
||||
LIST_INSERT_HEAD(&dev->flush_list, flush, list_entry);
|
||||
|
||||
/* If the RWB was already empty, the flush can be completed right away */
|
||||
/* If the write buffer was already empty, the flush can be completed right away */
|
||||
if (!flush->num_req) {
|
||||
ftl_complete_flush(flush);
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
ftl_flush_rwb(struct spdk_ftl_dev *dev, spdk_ftl_fn cb_fn, void *cb_arg)
|
||||
ftl_flush_wbuf(struct spdk_ftl_dev *dev, spdk_ftl_fn cb_fn, void *cb_arg)
|
||||
{
|
||||
struct ftl_flush *flush;
|
||||
|
||||
@ -2324,7 +2297,7 @@ spdk_ftl_flush(struct spdk_ftl_dev *dev, spdk_ftl_fn cb_fn, void *cb_arg)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
return ftl_flush_rwb(dev, cb_fn, cb_arg);
|
||||
return ftl_flush_wbuf(dev, cb_fn, cb_arg);
|
||||
}
|
||||
|
||||
bool
|
||||
|
@ -193,8 +193,6 @@ struct spdk_ftl_dev {
|
||||
|
||||
/* Transfer unit size */
|
||||
size_t xfer_size;
|
||||
/* Ring write buffer */
|
||||
struct ftl_rwb *rwb;
|
||||
|
||||
/* Current user write limit */
|
||||
int limit;
|
||||
@ -271,7 +269,7 @@ typedef void (*ftl_restore_fn)(struct ftl_restore *, int, void *cb_arg);
|
||||
void ftl_apply_limits(struct spdk_ftl_dev *dev);
|
||||
void ftl_io_read(struct ftl_io *io);
|
||||
void ftl_io_write(struct ftl_io *io);
|
||||
int ftl_flush_rwb(struct spdk_ftl_dev *dev, spdk_ftl_fn cb_fn, void *cb_arg);
|
||||
int ftl_flush_wbuf(struct spdk_ftl_dev *dev, spdk_ftl_fn cb_fn, void *cb_arg);
|
||||
int ftl_current_limit(const struct spdk_ftl_dev *dev);
|
||||
int ftl_invalidate_addr(struct spdk_ftl_dev *dev, struct ftl_addr addr);
|
||||
int ftl_task_core(void *ctx);
|
||||
|
@ -37,7 +37,6 @@
|
||||
#include "ftl_addr.h"
|
||||
#include "ftl_band.h"
|
||||
#include "ftl_core.h"
|
||||
#include "ftl_rwb.h"
|
||||
|
||||
#if defined(DEBUG)
|
||||
/* Debug flags - enabled when defined */
|
||||
|
@ -46,7 +46,6 @@
|
||||
#include "ftl_core.h"
|
||||
#include "ftl_io.h"
|
||||
#include "ftl_reloc.h"
|
||||
#include "ftl_rwb.h"
|
||||
#include "ftl_band.h"
|
||||
#include "ftl_debug.h"
|
||||
|
||||
@ -102,8 +101,8 @@ static const struct spdk_ftl_conf g_default_conf = {
|
||||
.invalid_thld = 10,
|
||||
/* 20% spare blocks */
|
||||
.lba_rsvd = 20,
|
||||
/* 6M write buffer */
|
||||
.rwb_size = 6 * 1024 * 1024,
|
||||
/* 6M write buffer per each IO channel */
|
||||
.write_buffer_size = 6 * 1024 * 1024,
|
||||
/* 90% band fill threshold */
|
||||
.band_thld = 90,
|
||||
/* Max 32 IO depth per band relocate */
|
||||
@ -112,9 +111,6 @@ static const struct spdk_ftl_conf g_default_conf = {
|
||||
.max_active_relocs = 3,
|
||||
/* IO pool size per user thread (this should be adjusted to thread IO qdepth) */
|
||||
.user_io_pool_size = 2048,
|
||||
/* Number of interleaving units per ws_opt */
|
||||
/* 1 for default and 3 for 3D TLC NAND */
|
||||
.num_interleave_units = 1,
|
||||
/*
|
||||
* If clear ftl will return error when restoring after a dirty shutdown
|
||||
* If set, last band will be padded, ftl will restore based only on closed bands - this
|
||||
@ -159,13 +155,10 @@ ftl_check_conf(const struct spdk_ftl_dev *dev, const struct spdk_ftl_conf *conf)
|
||||
if (conf->lba_rsvd == 0) {
|
||||
return -1;
|
||||
}
|
||||
if (conf->rwb_size == 0) {
|
||||
if (conf->write_buffer_size == 0) {
|
||||
return -1;
|
||||
}
|
||||
if (conf->rwb_size % FTL_BLOCK_SIZE != 0) {
|
||||
return -1;
|
||||
}
|
||||
if (dev->xfer_size % conf->num_interleave_units != 0) {
|
||||
if (conf->write_buffer_size % FTL_BLOCK_SIZE != 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -975,7 +968,7 @@ ftl_io_channel_init_wbuf(struct ftl_io_channel *ioch)
|
||||
uint32_t i;
|
||||
int rc;
|
||||
|
||||
ioch->num_entries = dev->conf.rwb_size / FTL_BLOCK_SIZE;
|
||||
ioch->num_entries = dev->conf.write_buffer_size / FTL_BLOCK_SIZE;
|
||||
ioch->wbuf_entries = calloc(ioch->num_entries, sizeof(*ioch->wbuf_entries));
|
||||
if (ioch->wbuf_entries == NULL) {
|
||||
SPDK_ERRLOG("Failed to allocate write buffer entry array\n");
|
||||
@ -983,7 +976,7 @@ ftl_io_channel_init_wbuf(struct ftl_io_channel *ioch)
|
||||
}
|
||||
|
||||
ioch->qdepth_limit = ioch->num_entries;
|
||||
ioch->wbuf_payload = spdk_zmalloc(dev->conf.rwb_size, FTL_BLOCK_SIZE, NULL,
|
||||
ioch->wbuf_payload = spdk_zmalloc(dev->conf.write_buffer_size, FTL_BLOCK_SIZE, NULL,
|
||||
SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
|
||||
if (ioch->wbuf_payload == NULL) {
|
||||
SPDK_ERRLOG("Failed to allocate write buffer payload\n");
|
||||
@ -1189,6 +1182,9 @@ ftl_io_channel_destroy_cb(void *io_device, void *ctx)
|
||||
struct _ftl_io_channel *_ioch = ctx;
|
||||
struct ftl_io_channel *ioch = _ioch->ioch;
|
||||
|
||||
/* Mark the IO channel as being flush to force out any unwritten entries */
|
||||
ioch->flush = true;
|
||||
|
||||
_ftl_io_channel_destroy_cb(ioch);
|
||||
}
|
||||
|
||||
@ -1350,8 +1346,7 @@ ftl_dev_free_sync(struct spdk_ftl_dev *dev)
|
||||
pthread_mutex_unlock(&g_ftl_queue_lock);
|
||||
|
||||
assert(LIST_EMPTY(&dev->wptr_list));
|
||||
assert(ftl_rwb_num_acquired(dev->rwb, FTL_RWB_TYPE_INTERNAL) == 0);
|
||||
assert(ftl_rwb_num_acquired(dev->rwb, FTL_RWB_TYPE_USER) == 0);
|
||||
assert(dev->current_batch == NULL);
|
||||
|
||||
ftl_dev_dump_bands(dev);
|
||||
ftl_dev_dump_stats(dev);
|
||||
@ -1376,7 +1371,6 @@ ftl_dev_free_sync(struct spdk_ftl_dev *dev)
|
||||
}
|
||||
spdk_mempool_free(dev->lba_request_pool);
|
||||
|
||||
ftl_rwb_free(dev->rwb);
|
||||
ftl_reloc_free(dev->reloc);
|
||||
|
||||
ftl_release_bdev(dev->nv_cache.bdev_desc);
|
||||
@ -1470,12 +1464,6 @@ spdk_ftl_dev_init(const struct spdk_ftl_dev_init_opts *_opts, spdk_ftl_init_fn c
|
||||
goto fail_sync;
|
||||
}
|
||||
|
||||
dev->rwb = ftl_rwb_init(&dev->conf, dev->xfer_size, dev->md_size, ftl_get_num_punits(dev));
|
||||
if (!dev->rwb) {
|
||||
SPDK_ERRLOG("Unable to initialize rwb structures\n");
|
||||
goto fail_sync;
|
||||
}
|
||||
|
||||
dev->reloc = ftl_reloc_init(dev);
|
||||
if (!dev->reloc) {
|
||||
SPDK_ERRLOG("Unable to initialize reloc structures\n");
|
||||
@ -1610,8 +1598,6 @@ _spdk_ftl_dev_free(struct spdk_ftl_dev *dev, spdk_ftl_init_fn cb_fn, void *cb_ar
|
||||
fini_ctx->cb_arg = cb_arg;
|
||||
fini_ctx->thread = thread;
|
||||
|
||||
ftl_rwb_disable_interleaving(dev->rwb);
|
||||
|
||||
spdk_thread_send_msg(ftl_get_core_thread(dev), ftl_add_halt_poller, fini_ctx);
|
||||
return 0;
|
||||
}
|
||||
|
@ -38,7 +38,6 @@
|
||||
|
||||
#include "ftl_io.h"
|
||||
#include "ftl_core.h"
|
||||
#include "ftl_rwb.h"
|
||||
#include "ftl_band.h"
|
||||
#include "ftl_debug.h"
|
||||
|
||||
@ -243,7 +242,7 @@ ftl_io_init_internal(const struct ftl_io_init_opts *opts)
|
||||
ftl_io_clear(io);
|
||||
ftl_io_init(io, dev, opts->cb_fn, opts->cb_ctx, opts->flags | FTL_IO_INTERNAL, opts->type);
|
||||
|
||||
io->rwb_batch = opts->rwb_batch;
|
||||
io->batch = opts->batch;
|
||||
io->band = opts->band;
|
||||
io->md = opts->md;
|
||||
io->iov = &io->iov_buf[0];
|
||||
@ -281,30 +280,26 @@ ftl_io_init_internal(const struct ftl_io_init_opts *opts)
|
||||
}
|
||||
|
||||
struct ftl_io *
|
||||
ftl_io_rwb_init(struct spdk_ftl_dev *dev, struct ftl_addr addr, struct ftl_band *band,
|
||||
struct ftl_rwb_batch *batch, ftl_io_fn cb)
|
||||
ftl_io_wbuf_init(struct spdk_ftl_dev *dev, struct ftl_addr addr, struct ftl_band *band,
|
||||
struct ftl_batch *batch, ftl_io_fn cb)
|
||||
{
|
||||
struct ftl_io *io;
|
||||
struct ftl_io_init_opts opts = {
|
||||
.dev = dev,
|
||||
.io = NULL,
|
||||
.rwb_batch = batch,
|
||||
.batch = batch,
|
||||
.band = band,
|
||||
.size = sizeof(struct ftl_io),
|
||||
.flags = 0,
|
||||
.type = FTL_IO_WRITE,
|
||||
.num_blocks = dev->xfer_size,
|
||||
.cb_fn = cb,
|
||||
.iovs = {
|
||||
{
|
||||
.iov_base = ftl_rwb_batch_get_data(batch),
|
||||
.iov_len = dev->xfer_size * FTL_BLOCK_SIZE,
|
||||
}
|
||||
},
|
||||
.iovcnt = 1,
|
||||
.md = ftl_rwb_batch_get_md(batch),
|
||||
.iovcnt = dev->xfer_size,
|
||||
.md = batch->metadata,
|
||||
};
|
||||
|
||||
memcpy(opts.iovs, batch->iov, sizeof(struct iovec) * dev->xfer_size);
|
||||
|
||||
io = ftl_io_init_internal(&opts);
|
||||
if (!io) {
|
||||
return NULL;
|
||||
@ -322,7 +317,6 @@ ftl_io_erase_init(struct ftl_band *band, size_t num_blocks, ftl_io_fn cb)
|
||||
struct ftl_io_init_opts opts = {
|
||||
.dev = band->dev,
|
||||
.io = NULL,
|
||||
.rwb_batch = NULL,
|
||||
.band = band,
|
||||
.size = sizeof(struct ftl_io),
|
||||
.flags = FTL_IO_PHYSICAL_MODE,
|
||||
@ -505,7 +499,7 @@ ftl_io_clear(struct ftl_io *io)
|
||||
ftl_io_reset(io);
|
||||
|
||||
io->flags = 0;
|
||||
io->rwb_batch = NULL;
|
||||
io->batch = NULL;
|
||||
io->band = NULL;
|
||||
}
|
||||
|
||||
|
@ -42,8 +42,8 @@
|
||||
#include "ftl_trace.h"
|
||||
|
||||
struct spdk_ftl_dev;
|
||||
struct ftl_rwb_batch;
|
||||
struct ftl_band;
|
||||
struct ftl_batch;
|
||||
struct ftl_io;
|
||||
|
||||
typedef int (*ftl_md_pack_fn)(struct ftl_band *);
|
||||
@ -101,8 +101,8 @@ struct ftl_io_init_opts {
|
||||
/* IO type */
|
||||
enum ftl_io_type type;
|
||||
|
||||
/* RWB entry */
|
||||
struct ftl_rwb_batch *rwb_batch;
|
||||
/* Transfer batch, set for IO going through the write buffer */
|
||||
struct ftl_batch *batch;
|
||||
|
||||
/* Band to which the IO is directed */
|
||||
struct ftl_band *band;
|
||||
@ -190,6 +190,8 @@ struct ftl_io_channel {
|
||||
uint32_t qdepth_limit;
|
||||
/* Current number of concurrent user writes */
|
||||
uint32_t qdepth_current;
|
||||
/* Means that the IO channel is being flushed */
|
||||
bool flush;
|
||||
};
|
||||
|
||||
/* General IO descriptor */
|
||||
@ -235,8 +237,8 @@ struct ftl_io {
|
||||
/* Offset within the iovec (in blocks) */
|
||||
size_t iov_off;
|
||||
|
||||
/* RWB entry (valid only for RWB-based IO) */
|
||||
struct ftl_rwb_batch *rwb_batch;
|
||||
/* Transfer batch (valid only for writes going through the write buffer) */
|
||||
struct ftl_batch *batch;
|
||||
|
||||
/* Band this IO is being written to */
|
||||
struct ftl_band *band;
|
||||
@ -331,9 +333,8 @@ void ftl_io_advance(struct ftl_io *io, size_t num_blocks);
|
||||
size_t ftl_iovec_num_blocks(struct iovec *iov, size_t iov_cnt);
|
||||
void *ftl_io_iovec_addr(struct ftl_io *io);
|
||||
size_t ftl_io_iovec_len_left(struct ftl_io *io);
|
||||
struct ftl_io *ftl_io_rwb_init(struct spdk_ftl_dev *dev, struct ftl_addr addr,
|
||||
struct ftl_band *band,
|
||||
struct ftl_rwb_batch *entry, ftl_io_fn cb);
|
||||
struct ftl_io *ftl_io_wbuf_init(struct spdk_ftl_dev *dev, struct ftl_addr addr,
|
||||
struct ftl_band *band, struct ftl_batch *batch, ftl_io_fn cb);
|
||||
struct ftl_io *ftl_io_erase_init(struct ftl_band *band, size_t num_blocks, ftl_io_fn cb);
|
||||
struct ftl_io *ftl_io_user_init(struct spdk_io_channel *ioch, uint64_t lba, size_t num_blocks,
|
||||
struct iovec *iov, size_t iov_cnt, spdk_ftl_fn cb_fn,
|
||||
|
@ -38,7 +38,6 @@
|
||||
#include "ftl_reloc.h"
|
||||
#include "ftl_core.h"
|
||||
#include "ftl_io.h"
|
||||
#include "ftl_rwb.h"
|
||||
#include "ftl_band.h"
|
||||
#include "ftl_debug.h"
|
||||
|
||||
|
@ -547,7 +547,7 @@ ftl_nv_cache_band_flush_cb(void *ctx, int status)
|
||||
}
|
||||
|
||||
static void
|
||||
ftl_nv_cache_rwb_flush_cb(void *ctx, int status)
|
||||
ftl_nv_cache_wbuf_flush_cb(void *ctx, int status)
|
||||
{
|
||||
struct ftl_nv_cache_restore *restore = ctx;
|
||||
struct ftl_nv_cache *nv_cache = restore->nv_cache;
|
||||
@ -590,7 +590,7 @@ ftl_nv_cache_recovery_done(struct ftl_nv_cache_restore *restore)
|
||||
range_current->start_addr < range_prev->last_addr)) {
|
||||
SPDK_DEBUGLOG(SPDK_LOG_FTL_INIT, "Non-volatile cache inconsistency detected\n");
|
||||
|
||||
rc = ftl_flush_rwb(dev, ftl_nv_cache_rwb_flush_cb, restore);
|
||||
rc = ftl_flush_wbuf(dev, ftl_nv_cache_wbuf_flush_cb, restore);
|
||||
if (spdk_unlikely(rc != 0)) {
|
||||
SPDK_ERRLOG("Unable to flush the write buffer: %s\n", spdk_strerror(-rc));
|
||||
ftl_nv_cache_restore_complete(restore, rc);
|
||||
@ -1134,7 +1134,6 @@ ftl_restore_init_pad_io(struct ftl_restore_band *rband, void *buffer,
|
||||
struct ftl_io_init_opts opts = {
|
||||
.dev = dev,
|
||||
.io = NULL,
|
||||
.rwb_batch = NULL,
|
||||
.band = band,
|
||||
.size = sizeof(struct ftl_io),
|
||||
.flags = flags,
|
||||
|
@ -1,581 +0,0 @@
|
||||
/*-
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright (c) Intel Corporation.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "spdk/stdinc.h"
|
||||
#include "spdk/env.h"
|
||||
#include "spdk/util.h"
|
||||
|
||||
#include "ftl_rwb.h"
|
||||
#include "ftl_core.h"
|
||||
|
||||
struct ftl_rwb_batch {
|
||||
/* Parent RWB */
|
||||
struct ftl_rwb *rwb;
|
||||
|
||||
/* Position within RWB */
|
||||
unsigned int pos;
|
||||
|
||||
/* Number of acquired entries */
|
||||
unsigned int num_acquired;
|
||||
|
||||
/* Number of entries ready for submission */
|
||||
unsigned int num_ready;
|
||||
|
||||
/* RWB entry list */
|
||||
LIST_HEAD(, ftl_rwb_entry) entry_list;
|
||||
|
||||
/* Entry buffer */
|
||||
struct ftl_rwb_entry *entries;
|
||||
|
||||
/* Data buffer */
|
||||
void *buffer;
|
||||
|
||||
/* Metadata buffer */
|
||||
void *md_buffer;
|
||||
|
||||
/* Queue entry */
|
||||
STAILQ_ENTRY(ftl_rwb_batch) stailq;
|
||||
};
|
||||
|
||||
struct ftl_rwb {
|
||||
/* Number of batches */
|
||||
size_t num_batches;
|
||||
|
||||
/* Information for interleaving */
|
||||
size_t interleave_offset;
|
||||
/* Maximum number of active batches */
|
||||
size_t max_active_batches;
|
||||
|
||||
/* Number of entries per batch */
|
||||
size_t xfer_size;
|
||||
/* Metadata's size */
|
||||
size_t md_size;
|
||||
|
||||
/* Number of acquired entries */
|
||||
unsigned int num_acquired[FTL_RWB_TYPE_MAX];
|
||||
/* Number of acquired but not yet submitted entries */
|
||||
unsigned int num_pending;
|
||||
/* User/internal limits */
|
||||
size_t limits[FTL_RWB_TYPE_MAX];
|
||||
|
||||
/* Active batch queue */
|
||||
STAILQ_HEAD(, ftl_rwb_batch) active_queue;
|
||||
/* Number of active batches */
|
||||
unsigned int num_active_batches;
|
||||
|
||||
/* Free batch queue */
|
||||
STAILQ_HEAD(, ftl_rwb_batch) free_queue;
|
||||
/* Number of active batches */
|
||||
unsigned int num_free_batches;
|
||||
|
||||
/* Submission batch queue */
|
||||
struct spdk_ring *submit_queue;
|
||||
/* High-priority batch queue */
|
||||
struct spdk_ring *prio_queue;
|
||||
|
||||
/* Batch buffer */
|
||||
struct ftl_rwb_batch *batches;
|
||||
|
||||
/* RWB lock */
|
||||
pthread_spinlock_t lock;
|
||||
};
|
||||
|
||||
static int
|
||||
ftl_rwb_batch_full(const struct ftl_rwb_batch *batch, size_t batch_size)
|
||||
{
|
||||
struct ftl_rwb *rwb = batch->rwb;
|
||||
assert(batch_size <= rwb->xfer_size);
|
||||
return batch_size == rwb->xfer_size;
|
||||
}
|
||||
|
||||
static int
|
||||
ftl_rwb_batch_init_entry(struct ftl_rwb_batch *batch, size_t pos)
|
||||
{
|
||||
struct ftl_rwb *rwb = batch->rwb;
|
||||
struct ftl_rwb_entry *entry, *prev;
|
||||
size_t batch_offset = pos % rwb->xfer_size;
|
||||
|
||||
entry = &batch->entries[batch_offset];
|
||||
entry->pos = pos;
|
||||
entry->data = ((char *)batch->buffer) + FTL_BLOCK_SIZE * batch_offset;
|
||||
entry->md = rwb->md_size ? ((char *)batch->md_buffer) + rwb->md_size * batch_offset : NULL;
|
||||
entry->batch = batch;
|
||||
entry->rwb = batch->rwb;
|
||||
|
||||
if (pthread_spin_init(&entry->lock, PTHREAD_PROCESS_PRIVATE)) {
|
||||
SPDK_ERRLOG("Spinlock initialization failure\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (batch_offset > 0) {
|
||||
prev = &batch->entries[batch_offset - 1];
|
||||
LIST_INSERT_AFTER(prev, entry, list_entry);
|
||||
} else {
|
||||
LIST_INSERT_HEAD(&batch->entry_list, entry, list_entry);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ftl_rwb_batch_init(struct ftl_rwb *rwb, struct ftl_rwb_batch *batch, unsigned int pos)
|
||||
{
|
||||
size_t md_size, i;
|
||||
|
||||
md_size = rwb->md_size * rwb->xfer_size;
|
||||
batch->rwb = rwb;
|
||||
batch->pos = pos;
|
||||
|
||||
batch->entries = calloc(rwb->xfer_size, sizeof(*batch->entries));
|
||||
if (!batch->entries) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
LIST_INIT(&batch->entry_list);
|
||||
|
||||
batch->buffer = spdk_dma_zmalloc(FTL_BLOCK_SIZE * rwb->xfer_size, 0, NULL);
|
||||
if (!batch->buffer) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (md_size > 0) {
|
||||
batch->md_buffer = spdk_dma_zmalloc(md_size, 0, NULL);
|
||||
if (!batch->md_buffer) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < rwb->xfer_size; ++i) {
|
||||
if (ftl_rwb_batch_init_entry(batch, pos * rwb->xfer_size + i)) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ftl_rwb *
|
||||
ftl_rwb_init(const struct spdk_ftl_conf *conf, size_t xfer_size, size_t md_size, size_t num_punits)
|
||||
{
|
||||
struct ftl_rwb *rwb = NULL;
|
||||
struct ftl_rwb_batch *batch;
|
||||
size_t i;
|
||||
|
||||
rwb = calloc(1, sizeof(*rwb));
|
||||
if (!rwb) {
|
||||
SPDK_ERRLOG("Memory allocation failure\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (pthread_spin_init(&rwb->lock, PTHREAD_PROCESS_PRIVATE)) {
|
||||
SPDK_ERRLOG("Spinlock initialization failure\n");
|
||||
free(rwb);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
assert(conf->rwb_size % xfer_size == 0);
|
||||
rwb->xfer_size = xfer_size;
|
||||
rwb->interleave_offset = xfer_size / conf->num_interleave_units;
|
||||
rwb->max_active_batches = conf->num_interleave_units == 1 ? 1 : num_punits;
|
||||
rwb->md_size = md_size;
|
||||
rwb->num_batches = conf->rwb_size / (FTL_BLOCK_SIZE * xfer_size) + rwb->max_active_batches;
|
||||
|
||||
rwb->batches = calloc(rwb->num_batches, sizeof(*rwb->batches));
|
||||
if (!rwb->batches) {
|
||||
goto error;
|
||||
}
|
||||
|
||||
rwb->submit_queue = spdk_ring_create(SPDK_RING_TYPE_MP_SC,
|
||||
spdk_align32pow2(rwb->num_batches + 1),
|
||||
SPDK_ENV_SOCKET_ID_ANY);
|
||||
if (!rwb->submit_queue) {
|
||||
SPDK_ERRLOG("Failed to create submission queue\n");
|
||||
goto error;
|
||||
}
|
||||
|
||||
rwb->prio_queue = spdk_ring_create(SPDK_RING_TYPE_MP_SC,
|
||||
spdk_align32pow2(rwb->num_batches + 1),
|
||||
SPDK_ENV_SOCKET_ID_ANY);
|
||||
if (!rwb->prio_queue) {
|
||||
SPDK_ERRLOG("Failed to create high-prio submission queue\n");
|
||||
goto error;
|
||||
}
|
||||
|
||||
STAILQ_INIT(&rwb->free_queue);
|
||||
STAILQ_INIT(&rwb->active_queue);
|
||||
|
||||
for (i = 0; i < rwb->num_batches; ++i) {
|
||||
batch = &rwb->batches[i];
|
||||
|
||||
if (ftl_rwb_batch_init(rwb, batch, i)) {
|
||||
SPDK_ERRLOG("Failed to initialize RWB entry buffer\n");
|
||||
goto error;
|
||||
}
|
||||
|
||||
STAILQ_INSERT_TAIL(&rwb->free_queue, batch, stailq);
|
||||
rwb->num_free_batches++;
|
||||
}
|
||||
|
||||
for (unsigned int i = 0; i < FTL_RWB_TYPE_MAX; ++i) {
|
||||
rwb->limits[i] = ftl_rwb_entry_cnt(rwb);
|
||||
}
|
||||
|
||||
return rwb;
|
||||
error:
|
||||
ftl_rwb_free(rwb);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void
|
||||
ftl_rwb_free(struct ftl_rwb *rwb)
|
||||
{
|
||||
struct ftl_rwb_entry *entry;
|
||||
struct ftl_rwb_batch *batch;
|
||||
|
||||
if (!rwb) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (rwb->batches) {
|
||||
for (size_t i = 0; i < rwb->num_batches; ++i) {
|
||||
batch = &rwb->batches[i];
|
||||
|
||||
if (batch->entries) {
|
||||
ftl_rwb_foreach(entry, batch) {
|
||||
pthread_spin_destroy(&entry->lock);
|
||||
}
|
||||
|
||||
free(batch->entries);
|
||||
}
|
||||
|
||||
spdk_dma_free(batch->buffer);
|
||||
spdk_dma_free(batch->md_buffer);
|
||||
}
|
||||
}
|
||||
|
||||
pthread_spin_destroy(&rwb->lock);
|
||||
spdk_ring_free(rwb->submit_queue);
|
||||
spdk_ring_free(rwb->prio_queue);
|
||||
free(rwb->batches);
|
||||
free(rwb);
|
||||
}
|
||||
|
||||
void
|
||||
ftl_rwb_batch_release(struct ftl_rwb_batch *batch)
|
||||
{
|
||||
struct ftl_rwb *rwb = batch->rwb;
|
||||
struct ftl_rwb_entry *entry;
|
||||
unsigned int num_acquired __attribute__((unused));
|
||||
|
||||
batch->num_ready = 0;
|
||||
batch->num_acquired = 0;
|
||||
|
||||
ftl_rwb_foreach(entry, batch) {
|
||||
num_acquired = __atomic_fetch_sub(&rwb->num_acquired[ftl_rwb_entry_type(entry)], 1,
|
||||
__ATOMIC_SEQ_CST);
|
||||
entry->band = NULL;
|
||||
assert(num_acquired > 0);
|
||||
}
|
||||
|
||||
pthread_spin_lock(&rwb->lock);
|
||||
STAILQ_INSERT_TAIL(&rwb->free_queue, batch, stailq);
|
||||
rwb->num_free_batches++;
|
||||
pthread_spin_unlock(&rwb->lock);
|
||||
}
|
||||
|
||||
size_t
|
||||
ftl_rwb_entry_cnt(const struct ftl_rwb *rwb)
|
||||
{
|
||||
return rwb->num_batches * rwb->xfer_size;
|
||||
}
|
||||
|
||||
size_t
|
||||
ftl_rwb_num_batches(const struct ftl_rwb *rwb)
|
||||
{
|
||||
return rwb->num_batches;
|
||||
}
|
||||
|
||||
size_t
|
||||
ftl_rwb_size(const struct ftl_rwb *rwb)
|
||||
{
|
||||
return rwb->num_batches * rwb->xfer_size;
|
||||
}
|
||||
|
||||
size_t
|
||||
ftl_rwb_batch_get_offset(const struct ftl_rwb_batch *batch)
|
||||
{
|
||||
return batch->pos;
|
||||
}
|
||||
|
||||
void
|
||||
ftl_rwb_set_limits(struct ftl_rwb *rwb,
|
||||
const size_t limit[FTL_RWB_TYPE_MAX])
|
||||
{
|
||||
assert(limit[FTL_RWB_TYPE_USER] <= ftl_rwb_entry_cnt(rwb));
|
||||
assert(limit[FTL_RWB_TYPE_INTERNAL] <= ftl_rwb_entry_cnt(rwb));
|
||||
memcpy(rwb->limits, limit, sizeof(rwb->limits));
|
||||
}
|
||||
|
||||
void
|
||||
ftl_rwb_get_limits(struct ftl_rwb *rwb,
|
||||
size_t limit[FTL_RWB_TYPE_MAX])
|
||||
{
|
||||
memcpy(limit, rwb->limits, sizeof(rwb->limits));
|
||||
}
|
||||
|
||||
size_t
|
||||
ftl_rwb_num_acquired(struct ftl_rwb *rwb, enum ftl_rwb_entry_type type)
|
||||
{
|
||||
return __atomic_load_n(&rwb->num_acquired[type], __ATOMIC_SEQ_CST);
|
||||
}
|
||||
|
||||
size_t
|
||||
ftl_rwb_get_active_batches(const struct ftl_rwb *rwb)
|
||||
{
|
||||
return rwb->num_active_batches;
|
||||
}
|
||||
|
||||
void
|
||||
ftl_rwb_batch_revert(struct ftl_rwb_batch *batch)
|
||||
{
|
||||
struct ftl_rwb *rwb = batch->rwb;
|
||||
|
||||
if (spdk_ring_enqueue(rwb->prio_queue, (void **)&batch, 1, NULL) != 1) {
|
||||
assert(0 && "Should never happen");
|
||||
}
|
||||
|
||||
__atomic_fetch_add(&rwb->num_pending, rwb->xfer_size, __ATOMIC_SEQ_CST);
|
||||
}
|
||||
|
||||
unsigned int
|
||||
ftl_rwb_num_pending(struct ftl_rwb *rwb)
|
||||
{
|
||||
return __atomic_load_n(&rwb->num_pending, __ATOMIC_SEQ_CST);
|
||||
}
|
||||
|
||||
void
|
||||
ftl_rwb_push(struct ftl_rwb_entry *entry)
|
||||
{
|
||||
struct ftl_rwb_batch *batch = entry->batch;
|
||||
struct ftl_rwb *rwb = batch->rwb;
|
||||
size_t batch_size;
|
||||
|
||||
batch_size = __atomic_fetch_add(&batch->num_ready, 1, __ATOMIC_SEQ_CST) + 1;
|
||||
|
||||
/* Once all of the entries are put back, push the batch on the */
|
||||
/* submission queue */
|
||||
if (ftl_rwb_batch_full(batch, batch_size)) {
|
||||
if (spdk_ring_enqueue(rwb->submit_queue, (void **)&batch, 1, NULL) != 1) {
|
||||
assert(0 && "Should never happen");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
ftl_rwb_check_limits(struct ftl_rwb *rwb, enum ftl_rwb_entry_type type)
|
||||
{
|
||||
return ftl_rwb_num_acquired(rwb, type) >= rwb->limits[type];
|
||||
}
|
||||
|
||||
static struct ftl_rwb_batch *
|
||||
_ftl_rwb_acquire_batch(struct ftl_rwb *rwb)
|
||||
{
|
||||
struct ftl_rwb_batch *batch;
|
||||
size_t i;
|
||||
|
||||
if (rwb->num_free_batches < rwb->max_active_batches) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (i = 0; i < rwb->max_active_batches; i++) {
|
||||
batch = STAILQ_FIRST(&rwb->free_queue);
|
||||
STAILQ_REMOVE(&rwb->free_queue, batch, ftl_rwb_batch, stailq);
|
||||
rwb->num_free_batches--;
|
||||
|
||||
STAILQ_INSERT_TAIL(&rwb->active_queue, batch, stailq);
|
||||
rwb->num_active_batches++;
|
||||
}
|
||||
|
||||
return STAILQ_FIRST(&rwb->active_queue);
|
||||
}
|
||||
|
||||
struct ftl_rwb_entry *
|
||||
ftl_rwb_acquire(struct ftl_rwb *rwb, enum ftl_rwb_entry_type type)
|
||||
{
|
||||
struct ftl_rwb_entry *entry = NULL;
|
||||
struct ftl_rwb_batch *current;
|
||||
|
||||
if (ftl_rwb_check_limits(rwb, type)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pthread_spin_lock(&rwb->lock);
|
||||
|
||||
current = STAILQ_FIRST(&rwb->active_queue);
|
||||
if (!current) {
|
||||
current = _ftl_rwb_acquire_batch(rwb);
|
||||
if (!current) {
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
|
||||
entry = ¤t->entries[current->num_acquired++];
|
||||
|
||||
if (current->num_acquired >= rwb->xfer_size) {
|
||||
/* If the whole batch is filled, */
|
||||
/* remove the current batch from active_queue */
|
||||
/* since it will need to move to submit_queue */
|
||||
STAILQ_REMOVE(&rwb->active_queue, current, ftl_rwb_batch, stailq);
|
||||
rwb->num_active_batches--;
|
||||
} else if (current->num_acquired % rwb->interleave_offset == 0) {
|
||||
/* If the current batch is filled by the interleaving offset, */
|
||||
/* move the current batch at the tail of active_queue */
|
||||
/* to place the next logical blocks into another batch. */
|
||||
STAILQ_REMOVE(&rwb->active_queue, current, ftl_rwb_batch, stailq);
|
||||
STAILQ_INSERT_TAIL(&rwb->active_queue, current, stailq);
|
||||
}
|
||||
|
||||
pthread_spin_unlock(&rwb->lock);
|
||||
__atomic_fetch_add(&rwb->num_acquired[type], 1, __ATOMIC_SEQ_CST);
|
||||
__atomic_fetch_add(&rwb->num_pending, 1, __ATOMIC_SEQ_CST);
|
||||
return entry;
|
||||
error:
|
||||
pthread_spin_unlock(&rwb->lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void
|
||||
ftl_rwb_disable_interleaving(struct ftl_rwb *rwb)
|
||||
{
|
||||
struct ftl_rwb_batch *batch, *temp;
|
||||
|
||||
pthread_spin_lock(&rwb->lock);
|
||||
rwb->max_active_batches = 1;
|
||||
rwb->interleave_offset = rwb->xfer_size;
|
||||
|
||||
STAILQ_FOREACH_SAFE(batch, &rwb->active_queue, stailq, temp) {
|
||||
if (batch->num_acquired == 0) {
|
||||
STAILQ_REMOVE(&rwb->active_queue, batch, ftl_rwb_batch, stailq);
|
||||
rwb->num_active_batches--;
|
||||
|
||||
assert(batch->num_ready == 0);
|
||||
assert(batch->num_acquired == 0);
|
||||
|
||||
STAILQ_INSERT_TAIL(&rwb->free_queue, batch, stailq);
|
||||
rwb->num_free_batches++;
|
||||
}
|
||||
}
|
||||
pthread_spin_unlock(&rwb->lock);
|
||||
}
|
||||
|
||||
struct ftl_rwb_batch *
|
||||
ftl_rwb_pop(struct ftl_rwb *rwb)
|
||||
{
|
||||
struct ftl_rwb_batch *batch = NULL;
|
||||
unsigned int num_pending __attribute__((unused));
|
||||
|
||||
if (spdk_ring_dequeue(rwb->prio_queue, (void **)&batch, 1) == 1) {
|
||||
num_pending = __atomic_fetch_sub(&rwb->num_pending, rwb->xfer_size,
|
||||
__ATOMIC_SEQ_CST);
|
||||
assert(num_pending > 0);
|
||||
return batch;
|
||||
}
|
||||
|
||||
if (spdk_ring_dequeue(rwb->submit_queue, (void **)&batch, 1) == 1) {
|
||||
num_pending = __atomic_fetch_sub(&rwb->num_pending, rwb->xfer_size,
|
||||
__ATOMIC_SEQ_CST);
|
||||
assert(num_pending > 0);
|
||||
return batch;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct ftl_rwb_batch *
|
||||
_ftl_rwb_next_batch(struct ftl_rwb *rwb, size_t pos)
|
||||
{
|
||||
if (pos >= rwb->num_batches) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return &rwb->batches[pos];
|
||||
}
|
||||
|
||||
struct ftl_rwb_batch *
|
||||
ftl_rwb_next_batch(struct ftl_rwb_batch *batch)
|
||||
{
|
||||
return _ftl_rwb_next_batch(batch->rwb, batch->pos + 1);
|
||||
}
|
||||
|
||||
struct ftl_rwb_batch *
|
||||
ftl_rwb_first_batch(struct ftl_rwb *rwb)
|
||||
{
|
||||
return _ftl_rwb_next_batch(rwb, 0);
|
||||
}
|
||||
|
||||
int
|
||||
ftl_rwb_batch_empty(struct ftl_rwb_batch *batch)
|
||||
{
|
||||
return __atomic_load_n(&batch->num_ready, __ATOMIC_SEQ_CST) == 0;
|
||||
}
|
||||
|
||||
void *
|
||||
ftl_rwb_batch_get_data(struct ftl_rwb_batch *batch)
|
||||
{
|
||||
return batch->buffer;
|
||||
}
|
||||
|
||||
void *
|
||||
ftl_rwb_batch_get_md(struct ftl_rwb_batch *batch)
|
||||
{
|
||||
return batch->md_buffer;
|
||||
}
|
||||
|
||||
struct ftl_rwb_entry *
|
||||
ftl_rwb_entry_from_offset(struct ftl_rwb *rwb, size_t offset)
|
||||
{
|
||||
unsigned int b_off, e_off;
|
||||
|
||||
b_off = offset / rwb->xfer_size;
|
||||
e_off = offset % rwb->xfer_size;
|
||||
|
||||
assert(b_off < rwb->num_batches);
|
||||
|
||||
return &rwb->batches[b_off].entries[e_off];
|
||||
}
|
||||
|
||||
struct ftl_rwb_entry *
|
||||
ftl_rwb_batch_first_entry(struct ftl_rwb_batch *batch)
|
||||
{
|
||||
return LIST_FIRST(&batch->entry_list);
|
||||
}
|
@ -1,171 +0,0 @@
|
||||
/*-
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright (c) Intel Corporation.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef FTL_RWB_H
|
||||
#define FTL_RWB_H
|
||||
|
||||
#include "spdk/stdinc.h"
|
||||
#include "spdk/queue.h"
|
||||
|
||||
#include "ftl_io.h"
|
||||
#include "ftl_addr.h"
|
||||
#include "ftl_trace.h"
|
||||
|
||||
struct ftl_rwb;
|
||||
struct ftl_rwb_batch;
|
||||
struct ftl_band;
|
||||
struct spdk_ftl_conf;
|
||||
|
||||
enum ftl_rwb_entry_type {
|
||||
FTL_RWB_TYPE_INTERNAL,
|
||||
FTL_RWB_TYPE_USER,
|
||||
FTL_RWB_TYPE_MAX
|
||||
};
|
||||
|
||||
/* Write buffer entry */
|
||||
struct ftl_rwb_entry {
|
||||
/* Owner rwb */
|
||||
struct ftl_rwb *rwb;
|
||||
|
||||
/* Batch containing the entry */
|
||||
struct ftl_rwb_batch *batch;
|
||||
|
||||
/* Logical address */
|
||||
uint64_t lba;
|
||||
|
||||
/* Physical address */
|
||||
struct ftl_addr addr;
|
||||
|
||||
/* Band the data is moved from (only valid when relocating data) */
|
||||
struct ftl_band *band;
|
||||
|
||||
/* Position within the rwb's buffer */
|
||||
unsigned int pos;
|
||||
|
||||
/* Data pointer */
|
||||
void *data;
|
||||
|
||||
/* Metadata pointer */
|
||||
void *md;
|
||||
|
||||
/* Data/state lock */
|
||||
pthread_spinlock_t lock;
|
||||
|
||||
/* Flags */
|
||||
unsigned int flags;
|
||||
|
||||
/* Indicates whether the entry is part of cache and is assigned a physical address */
|
||||
bool valid;
|
||||
|
||||
/* Trace group id */
|
||||
uint64_t trace;
|
||||
|
||||
/* Batch list entry */
|
||||
LIST_ENTRY(ftl_rwb_entry) list_entry;
|
||||
};
|
||||
|
||||
struct ftl_rwb *ftl_rwb_init(const struct spdk_ftl_conf *conf, size_t xfer_size,
|
||||
size_t md_size, size_t num_punits);
|
||||
size_t ftl_rwb_get_active_batches(const struct ftl_rwb *rwb);
|
||||
void ftl_rwb_free(struct ftl_rwb *rwb);
|
||||
void ftl_rwb_batch_release(struct ftl_rwb_batch *batch);
|
||||
void ftl_rwb_push(struct ftl_rwb_entry *entry);
|
||||
size_t ftl_rwb_entry_cnt(const struct ftl_rwb *rwb);
|
||||
void ftl_rwb_set_limits(struct ftl_rwb *rwb, const size_t limit[FTL_RWB_TYPE_MAX]);
|
||||
void ftl_rwb_get_limits(struct ftl_rwb *rwb, size_t limit[FTL_RWB_TYPE_MAX]);
|
||||
size_t ftl_rwb_num_acquired(struct ftl_rwb *rwb, enum ftl_rwb_entry_type type);
|
||||
size_t ftl_rwb_num_batches(const struct ftl_rwb *rwb);
|
||||
size_t ftl_rwb_size(const struct ftl_rwb *rwb);
|
||||
struct ftl_rwb_entry *ftl_rwb_acquire(struct ftl_rwb *rwb, enum ftl_rwb_entry_type type);
|
||||
struct ftl_rwb_batch *ftl_rwb_pop(struct ftl_rwb *rwb);
|
||||
struct ftl_rwb_batch *ftl_rwb_first_batch(struct ftl_rwb *rwb);
|
||||
struct ftl_rwb_batch *ftl_rwb_next_batch(struct ftl_rwb_batch *batch);
|
||||
int ftl_rwb_batch_empty(struct ftl_rwb_batch *batch);
|
||||
struct ftl_rwb_entry *ftl_rwb_entry_from_offset(struct ftl_rwb *rwb, size_t offset);
|
||||
size_t ftl_rwb_batch_get_offset(const struct ftl_rwb_batch *batch);
|
||||
void ftl_rwb_batch_revert(struct ftl_rwb_batch *batch);
|
||||
struct ftl_rwb_entry *ftl_rwb_batch_first_entry(struct ftl_rwb_batch *batch);
|
||||
void *ftl_rwb_batch_get_data(struct ftl_rwb_batch *batch);
|
||||
void *ftl_rwb_batch_get_md(struct ftl_rwb_batch *batch);
|
||||
void ftl_rwb_disable_interleaving(struct ftl_rwb *rwb);
|
||||
unsigned int ftl_rwb_num_pending(struct ftl_rwb *rwb);
|
||||
|
||||
static inline void
|
||||
_ftl_rwb_entry_set_valid(struct ftl_rwb_entry *entry, bool valid)
|
||||
{
|
||||
__atomic_store_n(&entry->valid, valid, __ATOMIC_SEQ_CST);
|
||||
}
|
||||
|
||||
static inline void
|
||||
ftl_rwb_entry_set_valid(struct ftl_rwb_entry *entry)
|
||||
{
|
||||
_ftl_rwb_entry_set_valid(entry, true);
|
||||
}
|
||||
|
||||
static inline void
|
||||
ftl_rwb_entry_invalidate(struct ftl_rwb_entry *entry)
|
||||
{
|
||||
_ftl_rwb_entry_set_valid(entry, false);
|
||||
}
|
||||
|
||||
static inline int
|
||||
ftl_rwb_entry_valid(struct ftl_rwb_entry *entry)
|
||||
{
|
||||
return __atomic_load_n(&entry->valid, __ATOMIC_SEQ_CST);
|
||||
}
|
||||
|
||||
static inline enum ftl_rwb_entry_type
|
||||
ftl_rwb_type_from_flags(int flags) {
|
||||
return (flags & FTL_IO_INTERNAL) ? FTL_RWB_TYPE_INTERNAL : FTL_RWB_TYPE_USER;
|
||||
}
|
||||
|
||||
static inline enum ftl_rwb_entry_type
|
||||
ftl_rwb_entry_type(const struct ftl_rwb_entry *entry) {
|
||||
return ftl_rwb_type_from_flags(entry->flags);
|
||||
}
|
||||
|
||||
static inline int
|
||||
ftl_rwb_entry_internal(const struct ftl_rwb_entry *entry)
|
||||
{
|
||||
return ftl_rwb_entry_type(entry) == FTL_RWB_TYPE_INTERNAL;
|
||||
}
|
||||
|
||||
#define ftl_rwb_foreach(entry, batch) \
|
||||
for (entry = ftl_rwb_batch_first_entry(batch); \
|
||||
entry; entry = LIST_NEXT(entry, list_entry))
|
||||
|
||||
#define ftl_rwb_foreach_batch(batch, rwb) \
|
||||
for (batch = ftl_rwb_first_batch(rwb); batch; \
|
||||
batch = ftl_rwb_next_batch(batch))
|
||||
|
||||
#endif /* FTL_RWB_H */
|
@ -37,7 +37,6 @@
|
||||
#include "ftl_trace.h"
|
||||
#include "ftl_io.h"
|
||||
#include "ftl_band.h"
|
||||
#include "ftl_rwb.h"
|
||||
|
||||
#define OWNER_FTL 0x20
|
||||
#define TRACE_GROUP_FTL 0x6
|
||||
@ -53,7 +52,7 @@ enum ftl_trace_source {
|
||||
#define FTL_TRACE_BAND_DEFRAG(src) FTL_TPOINT_ID(0, src)
|
||||
#define FTL_TRACE_BAND_WRITE(src) FTL_TPOINT_ID(1, src)
|
||||
#define FTL_TRACE_LIMITS(src) FTL_TPOINT_ID(2, src)
|
||||
#define FTL_TRACE_RWB_POP(src) FTL_TPOINT_ID(3, src)
|
||||
#define FTL_TRACE_WBUF_POP(src) FTL_TPOINT_ID(3, src)
|
||||
|
||||
#define FTL_TRACE_READ_SCHEDULE(src) FTL_TPOINT_ID(4, src)
|
||||
#define FTL_TRACE_READ_SUBMISSION(src) FTL_TPOINT_ID(5, src)
|
||||
@ -66,7 +65,7 @@ enum ftl_trace_source {
|
||||
#define FTL_TRACE_MD_READ_COMPLETION(src) FTL_TPOINT_ID(11, src)
|
||||
|
||||
#define FTL_TRACE_WRITE_SCHEDULE(src) FTL_TPOINT_ID(12, src)
|
||||
#define FTL_TRACE_WRITE_RWB_FILL(src) FTL_TPOINT_ID(13, src)
|
||||
#define FTL_TRACE_WRITE_WBUF_FILL(src) FTL_TPOINT_ID(13, src)
|
||||
#define FTL_TRACE_WRITE_SUBMISSION(src) FTL_TPOINT_ID(14, src)
|
||||
#define FTL_TRACE_WRITE_COMPLETION(src) FTL_TPOINT_ID(15, src)
|
||||
|
||||
@ -96,7 +95,7 @@ SPDK_TRACE_REGISTER_FN(ftl_trace_func, "ftl", TRACE_GROUP_FTL)
|
||||
spdk_trace_register_description(descbuf, FTL_TRACE_LIMITS(i),
|
||||
OWNER_FTL, OBJECT_NONE, 0, 0, "limits: ");
|
||||
snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "rwb_pop");
|
||||
spdk_trace_register_description(descbuf, FTL_TRACE_RWB_POP(i),
|
||||
spdk_trace_register_description(descbuf, FTL_TRACE_WBUF_POP(i),
|
||||
OWNER_FTL, OBJECT_NONE, 0, 0, "lba: ");
|
||||
|
||||
snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "md_read_sched");
|
||||
@ -139,7 +138,7 @@ SPDK_TRACE_REGISTER_FN(ftl_trace_func, "ftl", TRACE_GROUP_FTL)
|
||||
spdk_trace_register_description(descbuf, FTL_TRACE_WRITE_SCHEDULE(i),
|
||||
OWNER_FTL, OBJECT_NONE, 0, 0, "lba: ");
|
||||
snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "rwb_fill");
|
||||
spdk_trace_register_description(descbuf, FTL_TRACE_WRITE_RWB_FILL(i),
|
||||
spdk_trace_register_description(descbuf, FTL_TRACE_WRITE_WBUF_FILL(i),
|
||||
OWNER_FTL, OBJECT_NONE, 0, 0, "lba: ");
|
||||
snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "write_submit");
|
||||
spdk_trace_register_description(descbuf, FTL_TRACE_WRITE_SUBMISSION(i),
|
||||
@ -228,25 +227,25 @@ ftl_trace_lba_io_init(struct spdk_ftl_dev *dev, const struct ftl_io *io)
|
||||
}
|
||||
|
||||
void
|
||||
ftl_trace_rwb_fill(struct spdk_ftl_dev *dev, const struct ftl_io *io)
|
||||
ftl_trace_wbuf_fill(struct spdk_ftl_dev *dev, const struct ftl_io *io)
|
||||
{
|
||||
assert(io->trace != FTL_TRACE_INVALID_ID);
|
||||
|
||||
spdk_trace_record(FTL_TRACE_WRITE_RWB_FILL(ftl_trace_io_source(io)), io->trace,
|
||||
spdk_trace_record(FTL_TRACE_WRITE_WBUF_FILL(ftl_trace_io_source(io)), io->trace,
|
||||
0, 0, ftl_io_current_lba(io));
|
||||
}
|
||||
|
||||
void
|
||||
ftl_trace_rwb_pop(struct spdk_ftl_dev *dev, const struct ftl_rwb_entry *entry)
|
||||
ftl_trace_wbuf_pop(struct spdk_ftl_dev *dev, const struct ftl_wbuf_entry *entry)
|
||||
{
|
||||
uint16_t tpoint_id;
|
||||
|
||||
assert(entry->trace != FTL_TRACE_INVALID_ID);
|
||||
|
||||
if (ftl_rwb_entry_internal(entry)) {
|
||||
tpoint_id = FTL_TRACE_RWB_POP(FTL_TRACE_SOURCE_INTERNAL);
|
||||
if (entry->io_flags & FTL_IO_INTERNAL) {
|
||||
tpoint_id = FTL_TRACE_WBUF_POP(FTL_TRACE_SOURCE_INTERNAL);
|
||||
} else {
|
||||
tpoint_id = FTL_TRACE_RWB_POP(FTL_TRACE_SOURCE_USER);
|
||||
tpoint_id = FTL_TRACE_WBUF_POP(FTL_TRACE_SOURCE_USER);
|
||||
}
|
||||
|
||||
spdk_trace_record(tpoint_id, entry->trace, 0, entry->addr.offset, entry->lba);
|
||||
@ -341,12 +340,12 @@ ftl_trace_submission(struct spdk_ftl_dev *dev, const struct ftl_io *io, struct f
|
||||
}
|
||||
|
||||
void
|
||||
ftl_trace_limits(struct spdk_ftl_dev *dev, const size_t *limits, size_t num_free)
|
||||
ftl_trace_limits(struct spdk_ftl_dev *dev, int limit, size_t num_free)
|
||||
{
|
||||
struct ftl_trace *trace = &dev->stats.trace;
|
||||
|
||||
spdk_trace_record(FTL_TRACE_LIMITS(FTL_TRACE_SOURCE_INTERNAL), ftl_trace_next_id(trace),
|
||||
num_free, limits[FTL_RWB_TYPE_INTERNAL], limits[FTL_RWB_TYPE_USER]);
|
||||
num_free, limit, 0);
|
||||
}
|
||||
|
||||
uint64_t
|
||||
|
@ -52,21 +52,21 @@ struct ftl_trace {
|
||||
struct spdk_ftl_dev;
|
||||
struct ftl_trace;
|
||||
struct ftl_io;
|
||||
struct ftl_rwb_entry;
|
||||
struct ftl_wbuf_entry;
|
||||
struct ftl_band;
|
||||
|
||||
uint64_t ftl_trace_alloc_id(struct spdk_ftl_dev *dev);
|
||||
void ftl_trace_defrag_band(struct spdk_ftl_dev *dev, const struct ftl_band *band);
|
||||
void ftl_trace_write_band(struct spdk_ftl_dev *dev, const struct ftl_band *band);
|
||||
void ftl_trace_lba_io_init(struct spdk_ftl_dev *dev, const struct ftl_io *io);
|
||||
void ftl_trace_rwb_fill(struct spdk_ftl_dev *dev, const struct ftl_io *io);
|
||||
void ftl_trace_rwb_pop(struct spdk_ftl_dev *dev, const struct ftl_rwb_entry *entry);
|
||||
void ftl_trace_wbuf_fill(struct spdk_ftl_dev *dev, const struct ftl_io *io);
|
||||
void ftl_trace_wbuf_pop(struct spdk_ftl_dev *dev, const struct ftl_wbuf_entry *entry);
|
||||
void ftl_trace_submission(struct spdk_ftl_dev *dev,
|
||||
const struct ftl_io *io,
|
||||
struct ftl_addr addr, size_t addr_cnt);
|
||||
void ftl_trace_completion(struct spdk_ftl_dev *dev,
|
||||
const struct ftl_io *io,
|
||||
enum ftl_trace_completion type);
|
||||
void ftl_trace_limits(struct spdk_ftl_dev *dev, const size_t *limits, size_t num_free);
|
||||
void ftl_trace_limits(struct spdk_ftl_dev *dev, int limit, size_t num_free);
|
||||
|
||||
#endif /* FTL_TRACE_H */
|
||||
|
@ -34,7 +34,7 @@
|
||||
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
|
||||
include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
|
||||
|
||||
DIRS-y = ftl_rwb.c ftl_ppa ftl_band.c ftl_reloc.c ftl_wptr ftl_md ftl_io.c
|
||||
DIRS-y = ftl_ppa ftl_band.c ftl_reloc.c ftl_wptr ftl_md ftl_io.c
|
||||
|
||||
.PHONY: all clean $(DIRS-y)
|
||||
|
||||
|
@ -40,7 +40,6 @@
|
||||
#include "ftl/ftl_init.c"
|
||||
#include "ftl/ftl_core.c"
|
||||
#include "ftl/ftl_band.c"
|
||||
#include "ftl/ftl_rwb.c"
|
||||
|
||||
DEFINE_STUB(ftl_trace_alloc_id, uint64_t, (struct spdk_ftl_dev *dev), 0);
|
||||
DEFINE_STUB_V(ftl_trace_completion, (struct spdk_ftl_dev *dev, const struct ftl_io *io,
|
||||
@ -58,7 +57,7 @@ DEFINE_STUB(spdk_bdev_zone_management, int, (struct spdk_bdev_desc *desc,
|
||||
DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
|
||||
DEFINE_STUB_V(ftl_trace_submission, (struct spdk_ftl_dev *dev, const struct ftl_io *io,
|
||||
struct ftl_addr addr, size_t addr_cnt));
|
||||
DEFINE_STUB_V(ftl_trace_limits, (struct spdk_ftl_dev *dev, const size_t *limits, size_t num_free));
|
||||
DEFINE_STUB_V(ftl_trace_limits, (struct spdk_ftl_dev *dev, int limit, size_t num_free));
|
||||
DEFINE_STUB(spdk_bdev_read_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
|
||||
void *buf, uint64_t offset_blocks, uint64_t num_blocks,
|
||||
spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
|
||||
@ -75,7 +74,7 @@ DEFINE_STUB(spdk_bdev_get_num_blocks, uint64_t, (const struct spdk_bdev *bdev),
|
||||
DEFINE_STUB(spdk_bdev_get_md_size, uint32_t, (const struct spdk_bdev *bdev), 0);
|
||||
DEFINE_STUB(spdk_bdev_get_block_size, uint32_t, (const struct spdk_bdev *bdev), 4096);
|
||||
DEFINE_STUB(ftl_band_validate_md, bool, (struct ftl_band *band), true);
|
||||
DEFINE_STUB_V(ftl_trace_rwb_fill, (struct spdk_ftl_dev *dev, const struct ftl_io *io));
|
||||
DEFINE_STUB_V(ftl_trace_wbuf_fill, (struct spdk_ftl_dev *dev, const struct ftl_io *io));
|
||||
|
||||
struct spdk_io_channel *
|
||||
spdk_bdev_get_io_channel(struct spdk_bdev_desc *bdev_desc)
|
||||
@ -660,7 +659,7 @@ test_acquire_entry(void)
|
||||
|
||||
dev = setup_device(num_io_channels, 16);
|
||||
|
||||
num_entries = dev->conf.rwb_size / FTL_BLOCK_SIZE;
|
||||
num_entries = dev->conf.write_buffer_size / FTL_BLOCK_SIZE;
|
||||
entries = calloc(num_entries * num_io_channels, sizeof(*entries));
|
||||
SPDK_CU_ASSERT_FATAL(entries != NULL);
|
||||
ioch_array = calloc(num_io_channels, sizeof(*ioch_array));
|
||||
@ -967,7 +966,7 @@ test_entry_address(void)
|
||||
ioch_array = calloc(num_io_channels, sizeof(*ioch_array));
|
||||
SPDK_CU_ASSERT_FATAL(ioch_array != NULL);
|
||||
|
||||
num_entries = dev->conf.rwb_size / FTL_BLOCK_SIZE;
|
||||
num_entries = dev->conf.write_buffer_size / FTL_BLOCK_SIZE;
|
||||
entry_array = calloc(num_entries, sizeof(*entry_array));
|
||||
SPDK_CU_ASSERT_FATAL(entry_array != NULL);
|
||||
|
||||
|
1
test/unit/lib/ftl/ftl_rwb.c/.gitignore
vendored
1
test/unit/lib/ftl/ftl_rwb.c/.gitignore
vendored
@ -1 +0,0 @@
|
||||
ftl_rwb_ut
|
@ -1,38 +0,0 @@
|
||||
#
|
||||
# BSD LICENSE
|
||||
#
|
||||
# Copyright (c) Intel Corporation.
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions
|
||||
# are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in
|
||||
# the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Intel Corporation nor the names of its
|
||||
# contributors may be used to endorse or promote products derived
|
||||
# from this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
|
||||
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
|
||||
|
||||
TEST_FILE = ftl_rwb_ut.c
|
||||
|
||||
include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
|
@ -1,589 +0,0 @@
|
||||
/*-
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright (c) Intel Corporation.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "spdk/stdinc.h"
|
||||
|
||||
#include "spdk_cunit.h"
|
||||
#include "common/lib/test_env.c"
|
||||
|
||||
#include "ftl/ftl_rwb.c"
|
||||
|
||||
struct ftl_rwb_ut {
|
||||
/* configurations */
|
||||
struct spdk_ftl_conf conf;
|
||||
size_t metadata_size;
|
||||
size_t num_punits;
|
||||
size_t xfer_size;
|
||||
|
||||
/* the fields below are calculated by the configurations */
|
||||
size_t max_batches;
|
||||
size_t max_active_batches;
|
||||
size_t max_entries;
|
||||
size_t max_allocable_entries;
|
||||
size_t interleave_offset;
|
||||
size_t num_entries_per_worker;
|
||||
};
|
||||
|
||||
static struct ftl_rwb *g_rwb;
|
||||
static struct ftl_rwb_ut g_ut;
|
||||
|
||||
static int _init_suite(void);
|
||||
|
||||
static int
|
||||
init_suite1(void)
|
||||
{
|
||||
g_ut.conf.rwb_size = 1024 * 1024;
|
||||
g_ut.conf.num_interleave_units = 1;
|
||||
g_ut.metadata_size = 64;
|
||||
g_ut.num_punits = 4;
|
||||
g_ut.xfer_size = 16;
|
||||
|
||||
return _init_suite();
|
||||
}
|
||||
|
||||
static int
|
||||
init_suite2(void)
|
||||
{
|
||||
g_ut.conf.rwb_size = 2 * 1024 * 1024;
|
||||
g_ut.conf.num_interleave_units = 4;
|
||||
g_ut.metadata_size = 64;
|
||||
g_ut.num_punits = 8;
|
||||
g_ut.xfer_size = 16;
|
||||
|
||||
return _init_suite();
|
||||
}
|
||||
|
||||
static int
|
||||
_init_suite(void)
|
||||
{
|
||||
struct spdk_ftl_conf *conf = &g_ut.conf;
|
||||
|
||||
if (conf->num_interleave_units == 0 ||
|
||||
g_ut.xfer_size % conf->num_interleave_units ||
|
||||
g_ut.num_punits == 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
g_ut.max_batches = conf->rwb_size / (FTL_BLOCK_SIZE * g_ut.xfer_size);
|
||||
if (conf->num_interleave_units > 1) {
|
||||
g_ut.max_batches += g_ut.num_punits;
|
||||
g_ut.max_active_batches = g_ut.num_punits;
|
||||
} else {
|
||||
g_ut.max_batches++;
|
||||
g_ut.max_active_batches = 1;
|
||||
}
|
||||
|
||||
g_ut.max_entries = g_ut.max_batches * g_ut.xfer_size;
|
||||
g_ut.max_allocable_entries = (g_ut.max_batches / g_ut.max_active_batches) *
|
||||
g_ut.max_active_batches * g_ut.xfer_size;
|
||||
|
||||
g_ut.interleave_offset = g_ut.xfer_size / conf->num_interleave_units;
|
||||
|
||||
/* if max_batches is less than max_active_batches * 2, */
|
||||
/* test_rwb_limits_applied will be failed. */
|
||||
if (g_ut.max_batches < g_ut.max_active_batches * 2) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
g_ut.num_entries_per_worker = 16 * g_ut.max_allocable_entries;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
setup_rwb(void)
|
||||
{
|
||||
g_rwb = ftl_rwb_init(&g_ut.conf, g_ut.xfer_size,
|
||||
g_ut.metadata_size, g_ut.num_punits);
|
||||
SPDK_CU_ASSERT_FATAL(g_rwb != NULL);
|
||||
}
|
||||
|
||||
static void
|
||||
cleanup_rwb(void)
|
||||
{
|
||||
ftl_rwb_free(g_rwb);
|
||||
g_rwb = NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
test_rwb_acquire(void)
|
||||
{
|
||||
struct ftl_rwb_entry *entry;
|
||||
size_t i;
|
||||
|
||||
setup_rwb();
|
||||
/* Verify that it's possible to acquire all of the entries */
|
||||
for (i = 0; i < g_ut.max_allocable_entries; ++i) {
|
||||
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
|
||||
SPDK_CU_ASSERT_FATAL(entry);
|
||||
ftl_rwb_push(entry);
|
||||
}
|
||||
|
||||
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
|
||||
CU_ASSERT_PTR_NULL(entry);
|
||||
cleanup_rwb();
|
||||
}
|
||||
|
||||
static void
|
||||
test_rwb_pop(void)
|
||||
{
|
||||
struct ftl_rwb_entry *entry;
|
||||
struct ftl_rwb_batch *batch;
|
||||
size_t entry_count, i, i_reset = 0, i_offset = 0;
|
||||
uint64_t expected_lba;
|
||||
|
||||
setup_rwb();
|
||||
|
||||
/* Acquire all entries */
|
||||
for (i = 0; i < g_ut.max_allocable_entries; ++i) {
|
||||
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
|
||||
|
||||
SPDK_CU_ASSERT_FATAL(entry);
|
||||
entry->lba = i;
|
||||
ftl_rwb_push(entry);
|
||||
}
|
||||
|
||||
/* Pop all batches and free them */
|
||||
for (i = 0; i < g_ut.max_allocable_entries / g_ut.xfer_size; ++i) {
|
||||
batch = ftl_rwb_pop(g_rwb);
|
||||
SPDK_CU_ASSERT_FATAL(batch);
|
||||
entry_count = 0;
|
||||
|
||||
ftl_rwb_foreach(entry, batch) {
|
||||
if (i % g_ut.max_active_batches == 0) {
|
||||
i_offset = i * g_ut.xfer_size;
|
||||
}
|
||||
|
||||
if (entry_count % g_ut.interleave_offset == 0) {
|
||||
i_reset = i % g_ut.max_active_batches +
|
||||
(entry_count / g_ut.interleave_offset) *
|
||||
g_ut.max_active_batches;
|
||||
}
|
||||
|
||||
expected_lba = i_offset +
|
||||
i_reset * g_ut.interleave_offset +
|
||||
entry_count % g_ut.interleave_offset;
|
||||
|
||||
CU_ASSERT_EQUAL(entry->lba, expected_lba);
|
||||
entry_count++;
|
||||
}
|
||||
|
||||
CU_ASSERT_EQUAL(entry_count, g_ut.xfer_size);
|
||||
ftl_rwb_batch_release(batch);
|
||||
}
|
||||
|
||||
/* Acquire all entries once more */
|
||||
for (i = 0; i < g_ut.max_allocable_entries; ++i) {
|
||||
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
|
||||
SPDK_CU_ASSERT_FATAL(entry);
|
||||
ftl_rwb_push(entry);
|
||||
}
|
||||
|
||||
/* Pop one batch and check we can acquire xfer_size entries */
|
||||
for (i = 0; i < g_ut.max_active_batches; i++) {
|
||||
batch = ftl_rwb_pop(g_rwb);
|
||||
SPDK_CU_ASSERT_FATAL(batch);
|
||||
ftl_rwb_batch_release(batch);
|
||||
}
|
||||
|
||||
for (i = 0; i < g_ut.xfer_size * g_ut.max_active_batches; ++i) {
|
||||
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
|
||||
|
||||
SPDK_CU_ASSERT_FATAL(entry);
|
||||
ftl_rwb_push(entry);
|
||||
}
|
||||
|
||||
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
|
||||
CU_ASSERT_PTR_NULL(entry);
|
||||
|
||||
/* Pop and Release all batches */
|
||||
for (i = 0; i < g_ut.max_allocable_entries / g_ut.xfer_size; ++i) {
|
||||
batch = ftl_rwb_pop(g_rwb);
|
||||
SPDK_CU_ASSERT_FATAL(batch);
|
||||
ftl_rwb_batch_release(batch);
|
||||
}
|
||||
|
||||
cleanup_rwb();
|
||||
}
|
||||
|
||||
static void
|
||||
test_rwb_disable_interleaving(void)
|
||||
{
|
||||
struct ftl_rwb_entry *entry;
|
||||
struct ftl_rwb_batch *batch;
|
||||
size_t entry_count, i;
|
||||
|
||||
setup_rwb();
|
||||
|
||||
ftl_rwb_disable_interleaving(g_rwb);
|
||||
|
||||
/* Acquire all entries and assign sequential lbas */
|
||||
for (i = 0; i < g_ut.max_allocable_entries; ++i) {
|
||||
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
|
||||
|
||||
SPDK_CU_ASSERT_FATAL(entry);
|
||||
entry->lba = i;
|
||||
ftl_rwb_push(entry);
|
||||
}
|
||||
|
||||
/* Check for expected lbas */
|
||||
for (i = 0; i < g_ut.max_allocable_entries / g_ut.xfer_size; ++i) {
|
||||
batch = ftl_rwb_pop(g_rwb);
|
||||
SPDK_CU_ASSERT_FATAL(batch);
|
||||
entry_count = 0;
|
||||
|
||||
ftl_rwb_foreach(entry, batch) {
|
||||
CU_ASSERT_EQUAL(entry->lba, i * g_ut.xfer_size + entry_count);
|
||||
entry_count++;
|
||||
}
|
||||
|
||||
CU_ASSERT_EQUAL(entry_count, g_ut.xfer_size);
|
||||
ftl_rwb_batch_release(batch);
|
||||
}
|
||||
|
||||
cleanup_rwb();
|
||||
}
|
||||
|
||||
static void
|
||||
test_rwb_batch_revert(void)
|
||||
{
|
||||
struct ftl_rwb_batch *batch;
|
||||
struct ftl_rwb_entry *entry;
|
||||
size_t i;
|
||||
|
||||
setup_rwb();
|
||||
for (i = 0; i < g_ut.max_allocable_entries; ++i) {
|
||||
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
|
||||
SPDK_CU_ASSERT_FATAL(entry);
|
||||
ftl_rwb_push(entry);
|
||||
}
|
||||
|
||||
/* Pop one batch and revert it */
|
||||
batch = ftl_rwb_pop(g_rwb);
|
||||
SPDK_CU_ASSERT_FATAL(batch);
|
||||
|
||||
ftl_rwb_batch_revert(batch);
|
||||
|
||||
/* Verify all of the batches */
|
||||
for (i = 0; i < g_ut.max_allocable_entries / g_ut.xfer_size; ++i) {
|
||||
batch = ftl_rwb_pop(g_rwb);
|
||||
CU_ASSERT_PTR_NOT_NULL_FATAL(batch);
|
||||
}
|
||||
cleanup_rwb();
|
||||
}
|
||||
|
||||
static void
|
||||
test_rwb_entry_from_offset(void)
|
||||
{
|
||||
struct ftl_rwb_entry *entry;
|
||||
struct ftl_addr addr = { .cached = 1 };
|
||||
size_t i;
|
||||
|
||||
setup_rwb();
|
||||
for (i = 0; i < g_ut.max_allocable_entries; ++i) {
|
||||
addr.cache_offset = i;
|
||||
|
||||
entry = ftl_rwb_entry_from_offset(g_rwb, i);
|
||||
CU_ASSERT_EQUAL(addr.cache_offset, entry->pos);
|
||||
}
|
||||
cleanup_rwb();
|
||||
}
|
||||
|
||||
static void *
|
||||
test_rwb_worker(void *ctx)
|
||||
{
|
||||
struct ftl_rwb_entry *entry;
|
||||
unsigned int *num_done = ctx;
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < g_ut.num_entries_per_worker; ++i) {
|
||||
while (1) {
|
||||
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
|
||||
if (entry) {
|
||||
entry->flags = 0;
|
||||
ftl_rwb_push(entry);
|
||||
break;
|
||||
} else {
|
||||
/* Allow other threads to run under valgrind */
|
||||
pthread_yield();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
__atomic_fetch_add(num_done, 1, __ATOMIC_SEQ_CST);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
test_rwb_parallel(void)
|
||||
{
|
||||
struct ftl_rwb_batch *batch;
|
||||
struct ftl_rwb_entry *entry;
|
||||
#define NUM_PARALLEL_WORKERS 4
|
||||
pthread_t workers[NUM_PARALLEL_WORKERS];
|
||||
unsigned int num_done = 0;
|
||||
size_t i, num_entries = 0;
|
||||
bool all_done = false;
|
||||
int rc;
|
||||
|
||||
setup_rwb();
|
||||
for (i = 0; i < NUM_PARALLEL_WORKERS; ++i) {
|
||||
rc = pthread_create(&workers[i], NULL, test_rwb_worker, (void *)&num_done);
|
||||
CU_ASSERT_TRUE(rc == 0);
|
||||
}
|
||||
|
||||
while (1) {
|
||||
batch = ftl_rwb_pop(g_rwb);
|
||||
if (batch) {
|
||||
ftl_rwb_foreach(entry, batch) {
|
||||
num_entries++;
|
||||
}
|
||||
|
||||
ftl_rwb_batch_release(batch);
|
||||
} else {
|
||||
if (NUM_PARALLEL_WORKERS == __atomic_load_n(&num_done, __ATOMIC_SEQ_CST)) {
|
||||
if (!all_done) {
|
||||
/* Pop all left entries from rwb */
|
||||
all_done = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
for (i = 0; i < NUM_PARALLEL_WORKERS; ++i) {
|
||||
pthread_join(workers[i], NULL);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
/* Allow other threads to run under valgrind */
|
||||
pthread_yield();
|
||||
}
|
||||
}
|
||||
|
||||
CU_ASSERT_TRUE(num_entries == NUM_PARALLEL_WORKERS * g_ut.num_entries_per_worker);
|
||||
cleanup_rwb();
|
||||
}
|
||||
|
||||
static void
|
||||
test_rwb_limits_base(void)
|
||||
{
|
||||
struct ftl_rwb_entry *entry;
|
||||
size_t limits[FTL_RWB_TYPE_MAX];
|
||||
|
||||
setup_rwb();
|
||||
ftl_rwb_get_limits(g_rwb, limits);
|
||||
CU_ASSERT_TRUE(limits[FTL_RWB_TYPE_INTERNAL] == ftl_rwb_entry_cnt(g_rwb));
|
||||
CU_ASSERT_TRUE(limits[FTL_RWB_TYPE_USER] == ftl_rwb_entry_cnt(g_rwb));
|
||||
|
||||
/* Verify it's possible to acquire both type of entries */
|
||||
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_INTERNAL);
|
||||
CU_ASSERT_PTR_NOT_NULL_FATAL(entry);
|
||||
|
||||
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
|
||||
CU_ASSERT_PTR_NOT_NULL_FATAL(entry);
|
||||
cleanup_rwb();
|
||||
}
|
||||
|
||||
static void
|
||||
test_rwb_limits_set(void)
|
||||
{
|
||||
size_t limits[FTL_RWB_TYPE_MAX], check[FTL_RWB_TYPE_MAX];
|
||||
size_t i;
|
||||
|
||||
setup_rwb();
|
||||
|
||||
/* Check valid limits */
|
||||
ftl_rwb_get_limits(g_rwb, limits);
|
||||
memcpy(check, limits, sizeof(limits));
|
||||
ftl_rwb_set_limits(g_rwb, limits);
|
||||
ftl_rwb_get_limits(g_rwb, limits);
|
||||
CU_ASSERT(memcmp(check, limits, sizeof(limits)) == 0);
|
||||
|
||||
for (i = 0; i < FTL_RWB_TYPE_MAX; ++i) {
|
||||
ftl_rwb_get_limits(g_rwb, limits);
|
||||
limits[i] = 0;
|
||||
}
|
||||
|
||||
memcpy(check, limits, sizeof(limits));
|
||||
ftl_rwb_set_limits(g_rwb, limits);
|
||||
ftl_rwb_get_limits(g_rwb, limits);
|
||||
CU_ASSERT(memcmp(check, limits, sizeof(limits)) == 0);
|
||||
cleanup_rwb();
|
||||
}
|
||||
|
||||
static void
|
||||
test_rwb_limits_applied(void)
|
||||
{
|
||||
struct ftl_rwb_entry *entry;
|
||||
struct ftl_rwb_batch *batch;
|
||||
size_t limits[FTL_RWB_TYPE_MAX];
|
||||
const size_t test_limit = g_ut.xfer_size * g_ut.max_active_batches;
|
||||
size_t i;
|
||||
|
||||
setup_rwb();
|
||||
|
||||
/* Check that it's impossible to acquire any entries when the limits are */
|
||||
/* set to 0 */
|
||||
ftl_rwb_get_limits(g_rwb, limits);
|
||||
limits[FTL_RWB_TYPE_USER] = 0;
|
||||
ftl_rwb_set_limits(g_rwb, limits);
|
||||
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
|
||||
CU_ASSERT_PTR_NULL(entry);
|
||||
|
||||
limits[FTL_RWB_TYPE_USER] = ftl_rwb_entry_cnt(g_rwb);
|
||||
limits[FTL_RWB_TYPE_INTERNAL] = 0;
|
||||
ftl_rwb_set_limits(g_rwb, limits);
|
||||
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_INTERNAL);
|
||||
CU_ASSERT_PTR_NULL(entry);
|
||||
|
||||
/* Check positive limits */
|
||||
limits[FTL_RWB_TYPE_USER] = ftl_rwb_entry_cnt(g_rwb);
|
||||
limits[FTL_RWB_TYPE_INTERNAL] = test_limit;
|
||||
ftl_rwb_set_limits(g_rwb, limits);
|
||||
for (i = 0; i < test_limit; ++i) {
|
||||
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_INTERNAL);
|
||||
SPDK_CU_ASSERT_FATAL(entry);
|
||||
entry->flags = FTL_IO_INTERNAL;
|
||||
ftl_rwb_push(entry);
|
||||
}
|
||||
|
||||
/* Now we expect null, since we've reached threshold */
|
||||
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_INTERNAL);
|
||||
CU_ASSERT_PTR_NULL(entry);
|
||||
|
||||
for (i = 0; i < test_limit / g_ut.xfer_size; ++i) {
|
||||
/* Complete the entries and check we can retrieve the entries once again */
|
||||
batch = ftl_rwb_pop(g_rwb);
|
||||
SPDK_CU_ASSERT_FATAL(batch);
|
||||
ftl_rwb_batch_release(batch);
|
||||
}
|
||||
|
||||
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_INTERNAL);
|
||||
SPDK_CU_ASSERT_FATAL(entry);
|
||||
entry->flags = FTL_IO_INTERNAL;
|
||||
|
||||
/* Set the same limit but this time for user entries */
|
||||
limits[FTL_RWB_TYPE_USER] = test_limit;
|
||||
limits[FTL_RWB_TYPE_INTERNAL] = ftl_rwb_entry_cnt(g_rwb);
|
||||
ftl_rwb_set_limits(g_rwb, limits);
|
||||
for (i = 0; i < test_limit; ++i) {
|
||||
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
|
||||
SPDK_CU_ASSERT_FATAL(entry);
|
||||
ftl_rwb_push(entry);
|
||||
}
|
||||
|
||||
/* Now we expect null, since we've reached threshold */
|
||||
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
|
||||
CU_ASSERT_PTR_NULL(entry);
|
||||
|
||||
/* Check that we're still able to acquire a number of internal entries */
|
||||
/* while the user entires are being throttled */
|
||||
for (i = 0; i < g_ut.xfer_size; ++i) {
|
||||
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_INTERNAL);
|
||||
SPDK_CU_ASSERT_FATAL(entry);
|
||||
}
|
||||
|
||||
cleanup_rwb();
|
||||
}
|
||||
|
||||
int
|
||||
main(int argc, char **argv)
|
||||
{
|
||||
CU_pSuite suite1, suite2;
|
||||
unsigned int num_failures;
|
||||
|
||||
if (CU_initialize_registry() != CUE_SUCCESS) {
|
||||
return CU_get_error();
|
||||
}
|
||||
|
||||
suite1 = CU_add_suite("suite1", init_suite1, NULL);
|
||||
if (!suite1) {
|
||||
CU_cleanup_registry();
|
||||
return CU_get_error();
|
||||
}
|
||||
|
||||
suite2 = CU_add_suite("suite2", init_suite2, NULL);
|
||||
if (!suite2) {
|
||||
CU_cleanup_registry();
|
||||
return CU_get_error();
|
||||
}
|
||||
|
||||
if (
|
||||
CU_add_test(suite1, "test_rwb_acquire",
|
||||
test_rwb_acquire) == NULL
|
||||
|| CU_add_test(suite1, "test_rwb_pop",
|
||||
test_rwb_pop) == NULL
|
||||
|| CU_add_test(suite1, "test_rwb_disable_interleaving",
|
||||
test_rwb_disable_interleaving) == NULL
|
||||
|| CU_add_test(suite1, "test_rwb_batch_revert",
|
||||
test_rwb_batch_revert) == NULL
|
||||
|| CU_add_test(suite1, "test_rwb_entry_from_offset",
|
||||
test_rwb_entry_from_offset) == NULL
|
||||
|| CU_add_test(suite1, "test_rwb_parallel",
|
||||
test_rwb_parallel) == NULL
|
||||
|| CU_add_test(suite1, "test_rwb_limits_base",
|
||||
test_rwb_limits_base) == NULL
|
||||
|| CU_add_test(suite1, "test_rwb_limits_set",
|
||||
test_rwb_limits_set) == NULL
|
||||
|| CU_add_test(suite1, "test_rwb_limits_applied",
|
||||
test_rwb_limits_applied) == NULL
|
||||
|| CU_add_test(suite2, "test_rwb_acquire",
|
||||
test_rwb_acquire) == NULL
|
||||
|| CU_add_test(suite2, "test_rwb_pop",
|
||||
test_rwb_pop) == NULL
|
||||
|| CU_add_test(suite2, "test_rwb_disable_interleaving",
|
||||
test_rwb_disable_interleaving) == NULL
|
||||
|| CU_add_test(suite2, "test_rwb_batch_revert",
|
||||
test_rwb_batch_revert) == NULL
|
||||
|| CU_add_test(suite2, "test_rwb_entry_from_offset",
|
||||
test_rwb_entry_from_offset) == NULL
|
||||
|| CU_add_test(suite2, "test_rwb_parallel",
|
||||
test_rwb_parallel) == NULL
|
||||
|| CU_add_test(suite2, "test_rwb_limits_base",
|
||||
test_rwb_limits_base) == NULL
|
||||
|| CU_add_test(suite2, "test_rwb_limits_set",
|
||||
test_rwb_limits_set) == NULL
|
||||
|| CU_add_test(suite2, "test_rwb_limits_applied",
|
||||
test_rwb_limits_applied) == NULL
|
||||
) {
|
||||
CU_cleanup_registry();
|
||||
return CU_get_error();
|
||||
}
|
||||
|
||||
CU_basic_set_mode(CU_BRM_VERBOSE);
|
||||
CU_basic_run_tests();
|
||||
num_failures = CU_get_number_of_failures();
|
||||
CU_cleanup_registry();
|
||||
|
||||
return num_failures;
|
||||
}
|
@ -62,11 +62,8 @@ DEFINE_STUB_V(ftl_reloc_add, (struct ftl_reloc *reloc, struct ftl_band *band, si
|
||||
DEFINE_STUB_V(ftl_trace_write_band, (struct spdk_ftl_dev *dev, const struct ftl_band *band));
|
||||
DEFINE_STUB_V(ftl_trace_submission, (struct spdk_ftl_dev *dev, const struct ftl_io *io,
|
||||
struct ftl_addr addr, size_t addr_cnt));
|
||||
DEFINE_STUB_V(ftl_rwb_get_limits, (struct ftl_rwb *rwb, size_t limit[FTL_RWB_TYPE_MAX]));
|
||||
DEFINE_STUB_V(ftl_io_process_error, (struct ftl_io *io, const struct spdk_nvme_cpl *status));
|
||||
DEFINE_STUB_V(ftl_trace_limits, (struct spdk_ftl_dev *dev, const size_t *limits, size_t num_free));
|
||||
DEFINE_STUB(ftl_rwb_entry_cnt, size_t, (const struct ftl_rwb *rwb), 0);
|
||||
DEFINE_STUB_V(ftl_rwb_set_limits, (struct ftl_rwb *rwb, const size_t limit[FTL_RWB_TYPE_MAX]));
|
||||
DEFINE_STUB_V(ftl_trace_limits, (struct spdk_ftl_dev *dev, int limit, size_t num_free));
|
||||
DEFINE_STUB(spdk_bdev_get_num_blocks, uint64_t, (const struct spdk_bdev *bdev), 0);
|
||||
DEFINE_STUB(spdk_bdev_zone_management, int, (struct spdk_bdev_desc *desc,
|
||||
struct spdk_io_channel *ch,
|
||||
|
@ -41,7 +41,6 @@ function unittest_event {
|
||||
}
|
||||
|
||||
function unittest_ftl {
|
||||
$valgrind $testdir/lib/ftl/ftl_rwb.c/ftl_rwb_ut
|
||||
$valgrind $testdir/lib/ftl/ftl_ppa/ftl_ppa_ut
|
||||
$valgrind $testdir/lib/ftl/ftl_band.c/ftl_band_ut
|
||||
$valgrind $testdir/lib/ftl/ftl_reloc.c/ftl_reloc_ut
|
||||
|
Loading…
Reference in New Issue
Block a user