lib/ftl: consider 3D TLC NAND read unit size
For the latest TLC NAND, one write buffer unit (rwb batch) needs to be spread over three PUs instead of being allocated to a single PU for better sequential read performance since the optimal write size(ws_opt) of 3D TLC NAND is 3 times bigger than the optimal read size(rs_opt). I added num_interleave_units in 'struct spdk_ftl_conf' to configure the number of interleaving units per ws_opt. If num_interleave_units is set as 1, the whole of the ws_opt blocks are placed sequentially around each PU. If num_interleave_units is set as N, the 1/N of the ws_opt blocks are staggered. So consecutively numbered blocks are separated by ws_opt / num_interleave_units. The sequential read performance is improved from 1.9GiB/s up to 2.97GiB/S with this patch on our system. No performance degradation is observed on sequential writes or 4KB random reads/writes. Please refer to the Trello card for more details. https://trello.com/c/Osol93ZU Change-Id: I371e72067b278ef43c3ac87a3d9ce9010d3fcb15 Signed-off-by: Claire J. In <claire.in@circuitblvd.com> Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/450976 Reviewed-by: Young Tack Jin <youngtack.jin@circuitblvd.com> Reviewed-by: Konrad Sztyber <konrad.sztyber@intel.com> Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com> Reviewed-by: Wojciech Malikowski <wojciech.malikowski@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
parent
d6ec6850e2
commit
658d118c06
@ -86,6 +86,9 @@ struct spdk_ftl_conf {
|
|||||||
/* User writes limits */
|
/* User writes limits */
|
||||||
struct spdk_ftl_limit limits[SPDK_FTL_LIMIT_MAX];
|
struct spdk_ftl_limit limits[SPDK_FTL_LIMIT_MAX];
|
||||||
} defrag;
|
} defrag;
|
||||||
|
|
||||||
|
/* Number of interleaving units per ws_opt */
|
||||||
|
size_t num_interleave_units;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Range of parallel units (inclusive) */
|
/* Range of parallel units (inclusive) */
|
||||||
|
@ -609,17 +609,23 @@ ftl_process_shutdown(struct spdk_ftl_dev *dev)
|
|||||||
{
|
{
|
||||||
size_t size = ftl_rwb_num_acquired(dev->rwb, FTL_RWB_TYPE_INTERNAL) +
|
size_t size = ftl_rwb_num_acquired(dev->rwb, FTL_RWB_TYPE_INTERNAL) +
|
||||||
ftl_rwb_num_acquired(dev->rwb, FTL_RWB_TYPE_USER);
|
ftl_rwb_num_acquired(dev->rwb, FTL_RWB_TYPE_USER);
|
||||||
|
size_t num_active = dev->xfer_size * ftl_rwb_get_active_batches(dev->rwb);
|
||||||
|
|
||||||
if (size >= dev->xfer_size) {
|
num_active = num_active ? num_active : dev->xfer_size;
|
||||||
|
if (size >= num_active) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If we reach this point we need to remove free bands */
|
/* If we reach this point we need to remove free bands */
|
||||||
/* and pad current wptr band to the end */
|
/* and pad current wptr band to the end */
|
||||||
ftl_remove_free_bands(dev);
|
if (ftl_rwb_get_active_batches(dev->rwb) <= 1) {
|
||||||
|
ftl_remove_free_bands(dev);
|
||||||
|
}
|
||||||
|
|
||||||
/* Pad write buffer until band is full */
|
/* Pad write buffer until band is full */
|
||||||
ftl_rwb_pad(dev, dev->xfer_size - size);
|
/* TODO : It would be better to request padding to as many as PUs possible */
|
||||||
|
/* instead of requesting to one PU at a time */
|
||||||
|
ftl_rwb_pad(dev, num_active - size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
@ -1268,7 +1274,7 @@ static void
|
|||||||
ftl_flush_pad_batch(struct spdk_ftl_dev *dev)
|
ftl_flush_pad_batch(struct spdk_ftl_dev *dev)
|
||||||
{
|
{
|
||||||
struct ftl_rwb *rwb = dev->rwb;
|
struct ftl_rwb *rwb = dev->rwb;
|
||||||
size_t size;
|
size_t size, num_entries;
|
||||||
|
|
||||||
size = ftl_rwb_num_acquired(rwb, FTL_RWB_TYPE_INTERNAL) +
|
size = ftl_rwb_num_acquired(rwb, FTL_RWB_TYPE_INTERNAL) +
|
||||||
ftl_rwb_num_acquired(rwb, FTL_RWB_TYPE_USER);
|
ftl_rwb_num_acquired(rwb, FTL_RWB_TYPE_USER);
|
||||||
@ -1280,8 +1286,9 @@ ftl_flush_pad_batch(struct spdk_ftl_dev *dev)
|
|||||||
/* Only add padding when there's less than xfer size */
|
/* Only add padding when there's less than xfer size */
|
||||||
/* entries in the buffer. Otherwise we just have to wait */
|
/* entries in the buffer. Otherwise we just have to wait */
|
||||||
/* for the entries to become ready. */
|
/* for the entries to become ready. */
|
||||||
if (size < dev->xfer_size) {
|
num_entries = ftl_rwb_get_active_batches(dev->rwb) * dev->xfer_size;
|
||||||
ftl_rwb_pad(dev, dev->xfer_size - (size % dev->xfer_size));
|
if (size < num_entries) {
|
||||||
|
ftl_rwb_pad(dev, num_entries - (size % num_entries));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -87,6 +87,9 @@ static const struct spdk_ftl_conf g_default_conf = {
|
|||||||
.max_active_relocs = 3,
|
.max_active_relocs = 3,
|
||||||
/* IO pool size per user thread (this should be adjusted to thread IO qdepth) */
|
/* IO pool size per user thread (this should be adjusted to thread IO qdepth) */
|
||||||
.user_io_pool_size = 2048,
|
.user_io_pool_size = 2048,
|
||||||
|
/* Number of interleaving units per ws_opt */
|
||||||
|
/* 1 for default and 3 for 3D TLC NAND */
|
||||||
|
.num_interleave_units = 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void ftl_dev_free_sync(struct spdk_ftl_dev *dev);
|
static void ftl_dev_free_sync(struct spdk_ftl_dev *dev);
|
||||||
@ -116,7 +119,8 @@ ftl_band_init_md(struct ftl_band *band)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
ftl_check_conf(const struct spdk_ftl_conf *conf)
|
ftl_check_conf(const struct spdk_ftl_conf *conf,
|
||||||
|
const struct spdk_ocssd_geometry_data *geo)
|
||||||
{
|
{
|
||||||
size_t i;
|
size_t i;
|
||||||
|
|
||||||
@ -135,6 +139,9 @@ ftl_check_conf(const struct spdk_ftl_conf *conf)
|
|||||||
if (conf->rwb_size % FTL_BLOCK_SIZE != 0) {
|
if (conf->rwb_size % FTL_BLOCK_SIZE != 0) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
if (geo->ws_opt % conf->num_interleave_units != 0) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < SPDK_FTL_LIMIT_MAX; ++i) {
|
for (i = 0; i < SPDK_FTL_LIMIT_MAX; ++i) {
|
||||||
if (conf->defrag.limits[i].limit > 100) {
|
if (conf->defrag.limits[i].limit > 100) {
|
||||||
@ -157,7 +164,7 @@ ftl_check_init_opts(const struct spdk_ftl_dev_init_opts *opts,
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ftl_check_conf(opts->conf)) {
|
if (ftl_check_conf(opts->conf, geo)) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -952,7 +959,7 @@ spdk_ftl_dev_init(const struct spdk_ftl_dev_init_opts *_opts, spdk_ftl_init_fn c
|
|||||||
goto fail_sync;
|
goto fail_sync;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev->rwb = ftl_rwb_init(&dev->conf, dev->geo.ws_opt, dev->md_size);
|
dev->rwb = ftl_rwb_init(&dev->conf, dev->geo.ws_opt, dev->md_size, ftl_dev_num_punits(dev));
|
||||||
if (!dev->rwb) {
|
if (!dev->rwb) {
|
||||||
SPDK_ERRLOG("Unable to initialize rwb structures\n");
|
SPDK_ERRLOG("Unable to initialize rwb structures\n");
|
||||||
goto fail_sync;
|
goto fail_sync;
|
||||||
@ -1095,6 +1102,8 @@ spdk_ftl_dev_free(struct spdk_ftl_dev *dev, spdk_ftl_fn cb, void *cb_arg)
|
|||||||
dev->halt_arg = cb_arg;
|
dev->halt_arg = cb_arg;
|
||||||
dev->halt = 1;
|
dev->halt = 1;
|
||||||
|
|
||||||
|
ftl_rwb_disable_interleaving(dev->rwb);
|
||||||
|
|
||||||
spdk_thread_send_msg(ftl_get_core_thread(dev), ftl_add_halt_poller, dev);
|
spdk_thread_send_msg(ftl_get_core_thread(dev), ftl_add_halt_poller, dev);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -70,6 +70,12 @@ struct ftl_rwb_batch {
|
|||||||
struct ftl_rwb {
|
struct ftl_rwb {
|
||||||
/* Number of batches */
|
/* Number of batches */
|
||||||
size_t num_batches;
|
size_t num_batches;
|
||||||
|
|
||||||
|
/* Information for interleaving */
|
||||||
|
size_t interleave_offset;
|
||||||
|
/* Maximum number of active batches */
|
||||||
|
size_t max_active_batches;
|
||||||
|
|
||||||
/* Number of entries per batch */
|
/* Number of entries per batch */
|
||||||
size_t xfer_size;
|
size_t xfer_size;
|
||||||
/* Metadata's size */
|
/* Metadata's size */
|
||||||
@ -80,11 +86,16 @@ struct ftl_rwb {
|
|||||||
/* User/internal limits */
|
/* User/internal limits */
|
||||||
size_t limits[FTL_RWB_TYPE_MAX];
|
size_t limits[FTL_RWB_TYPE_MAX];
|
||||||
|
|
||||||
/* Current batch */
|
/* Active batch queue */
|
||||||
struct ftl_rwb_batch *current;
|
STAILQ_HEAD(, ftl_rwb_batch) active_queue;
|
||||||
|
/* Number of active batches */
|
||||||
|
unsigned int num_active_batches;
|
||||||
|
|
||||||
/* Free batch queue */
|
/* Free batch queue */
|
||||||
STAILQ_HEAD(, ftl_rwb_batch) free_queue;
|
STAILQ_HEAD(, ftl_rwb_batch) free_queue;
|
||||||
|
/* Number of active batches */
|
||||||
|
unsigned int num_free_batches;
|
||||||
|
|
||||||
/* Submission batch queue */
|
/* Submission batch queue */
|
||||||
struct spdk_ring *submit_queue;
|
struct spdk_ring *submit_queue;
|
||||||
/* High-priority batch queue */
|
/* High-priority batch queue */
|
||||||
@ -174,9 +185,9 @@ ftl_rwb_batch_init(struct ftl_rwb *rwb, struct ftl_rwb_batch *batch, unsigned in
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct ftl_rwb *
|
struct ftl_rwb *
|
||||||
ftl_rwb_init(const struct spdk_ftl_conf *conf, size_t xfer_size, size_t md_size)
|
ftl_rwb_init(const struct spdk_ftl_conf *conf, size_t xfer_size, size_t md_size, size_t num_punits)
|
||||||
{
|
{
|
||||||
struct ftl_rwb *rwb;
|
struct ftl_rwb *rwb = NULL;
|
||||||
struct ftl_rwb_batch *batch;
|
struct ftl_rwb_batch *batch;
|
||||||
size_t i;
|
size_t i;
|
||||||
|
|
||||||
@ -194,8 +205,10 @@ ftl_rwb_init(const struct spdk_ftl_conf *conf, size_t xfer_size, size_t md_size)
|
|||||||
|
|
||||||
assert(conf->rwb_size % xfer_size == 0);
|
assert(conf->rwb_size % xfer_size == 0);
|
||||||
rwb->xfer_size = xfer_size;
|
rwb->xfer_size = xfer_size;
|
||||||
|
rwb->interleave_offset = xfer_size / conf->num_interleave_units;
|
||||||
|
rwb->max_active_batches = conf->num_interleave_units == 1 ? 1 : num_punits;
|
||||||
rwb->md_size = md_size;
|
rwb->md_size = md_size;
|
||||||
rwb->num_batches = conf->rwb_size / (FTL_BLOCK_SIZE * xfer_size);
|
rwb->num_batches = conf->rwb_size / (FTL_BLOCK_SIZE * xfer_size) + rwb->max_active_batches;
|
||||||
|
|
||||||
rwb->batches = calloc(rwb->num_batches, sizeof(*rwb->batches));
|
rwb->batches = calloc(rwb->num_batches, sizeof(*rwb->batches));
|
||||||
if (!rwb->batches) {
|
if (!rwb->batches) {
|
||||||
@ -219,6 +232,7 @@ ftl_rwb_init(const struct spdk_ftl_conf *conf, size_t xfer_size, size_t md_size)
|
|||||||
}
|
}
|
||||||
|
|
||||||
STAILQ_INIT(&rwb->free_queue);
|
STAILQ_INIT(&rwb->free_queue);
|
||||||
|
STAILQ_INIT(&rwb->active_queue);
|
||||||
|
|
||||||
for (i = 0; i < rwb->num_batches; ++i) {
|
for (i = 0; i < rwb->num_batches; ++i) {
|
||||||
batch = &rwb->batches[i];
|
batch = &rwb->batches[i];
|
||||||
@ -229,6 +243,7 @@ ftl_rwb_init(const struct spdk_ftl_conf *conf, size_t xfer_size, size_t md_size)
|
|||||||
}
|
}
|
||||||
|
|
||||||
STAILQ_INSERT_TAIL(&rwb->free_queue, batch, stailq);
|
STAILQ_INSERT_TAIL(&rwb->free_queue, batch, stailq);
|
||||||
|
rwb->num_free_batches++;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (unsigned int i = 0; i < FTL_RWB_TYPE_MAX; ++i) {
|
for (unsigned int i = 0; i < FTL_RWB_TYPE_MAX; ++i) {
|
||||||
@ -293,6 +308,7 @@ ftl_rwb_batch_release(struct ftl_rwb_batch *batch)
|
|||||||
|
|
||||||
pthread_spin_lock(&rwb->lock);
|
pthread_spin_lock(&rwb->lock);
|
||||||
STAILQ_INSERT_TAIL(&rwb->free_queue, batch, stailq);
|
STAILQ_INSERT_TAIL(&rwb->free_queue, batch, stailq);
|
||||||
|
rwb->num_free_batches++;
|
||||||
pthread_spin_unlock(&rwb->lock);
|
pthread_spin_unlock(&rwb->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -336,6 +352,12 @@ ftl_rwb_num_acquired(struct ftl_rwb *rwb, enum ftl_rwb_entry_type type)
|
|||||||
return __atomic_load_n(&rwb->num_acquired[type], __ATOMIC_SEQ_CST);
|
return __atomic_load_n(&rwb->num_acquired[type], __ATOMIC_SEQ_CST);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
size_t
|
||||||
|
ftl_rwb_get_active_batches(const struct ftl_rwb *rwb)
|
||||||
|
{
|
||||||
|
return rwb->num_active_batches;
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
ftl_rwb_batch_revert(struct ftl_rwb_batch *batch)
|
ftl_rwb_batch_revert(struct ftl_rwb_batch *batch)
|
||||||
{
|
{
|
||||||
@ -370,6 +392,28 @@ ftl_rwb_check_limits(struct ftl_rwb *rwb, enum ftl_rwb_entry_type type)
|
|||||||
return ftl_rwb_num_acquired(rwb, type) >= rwb->limits[type];
|
return ftl_rwb_num_acquired(rwb, type) >= rwb->limits[type];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct ftl_rwb_batch *
|
||||||
|
_ftl_rwb_acquire_batch(struct ftl_rwb *rwb)
|
||||||
|
{
|
||||||
|
struct ftl_rwb_batch *batch;
|
||||||
|
size_t i;
|
||||||
|
|
||||||
|
if (rwb->num_free_batches < rwb->max_active_batches) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < rwb->max_active_batches; i++) {
|
||||||
|
batch = STAILQ_FIRST(&rwb->free_queue);
|
||||||
|
STAILQ_REMOVE(&rwb->free_queue, batch, ftl_rwb_batch, stailq);
|
||||||
|
rwb->num_free_batches--;
|
||||||
|
|
||||||
|
STAILQ_INSERT_TAIL(&rwb->active_queue, batch, stailq);
|
||||||
|
rwb->num_active_batches++;
|
||||||
|
}
|
||||||
|
|
||||||
|
return STAILQ_FIRST(&rwb->active_queue);
|
||||||
|
}
|
||||||
|
|
||||||
struct ftl_rwb_entry *
|
struct ftl_rwb_entry *
|
||||||
ftl_rwb_acquire(struct ftl_rwb *rwb, enum ftl_rwb_entry_type type)
|
ftl_rwb_acquire(struct ftl_rwb *rwb, enum ftl_rwb_entry_type type)
|
||||||
{
|
{
|
||||||
@ -382,22 +426,28 @@ ftl_rwb_acquire(struct ftl_rwb *rwb, enum ftl_rwb_entry_type type)
|
|||||||
|
|
||||||
pthread_spin_lock(&rwb->lock);
|
pthread_spin_lock(&rwb->lock);
|
||||||
|
|
||||||
current = rwb->current;
|
current = STAILQ_FIRST(&rwb->active_queue);
|
||||||
if (!current) {
|
if (!current) {
|
||||||
current = STAILQ_FIRST(&rwb->free_queue);
|
current = _ftl_rwb_acquire_batch(rwb);
|
||||||
if (!current) {
|
if (!current) {
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
|
||||||
STAILQ_REMOVE(&rwb->free_queue, current, ftl_rwb_batch, stailq);
|
|
||||||
rwb->current = current;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
entry = ¤t->entries[current->num_acquired++];
|
entry = ¤t->entries[current->num_acquired++];
|
||||||
|
|
||||||
/* If the whole batch is filled, clear the current batch pointer */
|
|
||||||
if (current->num_acquired >= rwb->xfer_size) {
|
if (current->num_acquired >= rwb->xfer_size) {
|
||||||
rwb->current = NULL;
|
/* If the whole batch is filled, */
|
||||||
|
/* remove the current batch from active_queue */
|
||||||
|
/* since it will need to move to submit_queue */
|
||||||
|
STAILQ_REMOVE(&rwb->active_queue, current, ftl_rwb_batch, stailq);
|
||||||
|
rwb->num_active_batches--;
|
||||||
|
} else if (current->num_acquired % rwb->interleave_offset == 0) {
|
||||||
|
/* If the current batch is filled by the interleaving offset, */
|
||||||
|
/* move the current batch at the tail of active_queue */
|
||||||
|
/* to place the next logical blocks into another batch. */
|
||||||
|
STAILQ_REMOVE(&rwb->active_queue, current, ftl_rwb_batch, stailq);
|
||||||
|
STAILQ_INSERT_TAIL(&rwb->active_queue, current, stailq);
|
||||||
}
|
}
|
||||||
|
|
||||||
pthread_spin_unlock(&rwb->lock);
|
pthread_spin_unlock(&rwb->lock);
|
||||||
@ -408,6 +458,30 @@ error:
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
ftl_rwb_disable_interleaving(struct ftl_rwb *rwb)
|
||||||
|
{
|
||||||
|
struct ftl_rwb_batch *batch, *temp;
|
||||||
|
|
||||||
|
pthread_spin_lock(&rwb->lock);
|
||||||
|
rwb->max_active_batches = 1;
|
||||||
|
rwb->interleave_offset = rwb->xfer_size;
|
||||||
|
|
||||||
|
STAILQ_FOREACH_SAFE(batch, &rwb->active_queue, stailq, temp) {
|
||||||
|
if (batch->num_acquired == 0) {
|
||||||
|
STAILQ_REMOVE(&rwb->active_queue, batch, ftl_rwb_batch, stailq);
|
||||||
|
rwb->num_active_batches--;
|
||||||
|
|
||||||
|
assert(batch->num_ready == 0);
|
||||||
|
assert(batch->num_acquired == 0);
|
||||||
|
|
||||||
|
STAILQ_INSERT_TAIL(&rwb->free_queue, batch, stailq);
|
||||||
|
rwb->num_free_batches++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pthread_spin_unlock(&rwb->lock);
|
||||||
|
}
|
||||||
|
|
||||||
struct ftl_rwb_batch *
|
struct ftl_rwb_batch *
|
||||||
ftl_rwb_pop(struct ftl_rwb *rwb)
|
ftl_rwb_pop(struct ftl_rwb *rwb)
|
||||||
{
|
{
|
||||||
|
@ -90,7 +90,9 @@ struct ftl_rwb_entry {
|
|||||||
LIST_ENTRY(ftl_rwb_entry) list_entry;
|
LIST_ENTRY(ftl_rwb_entry) list_entry;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ftl_rwb *ftl_rwb_init(const struct spdk_ftl_conf *conf, size_t xfer_size, size_t md_size);
|
struct ftl_rwb *ftl_rwb_init(const struct spdk_ftl_conf *conf, size_t xfer_size,
|
||||||
|
size_t md_size, size_t num_punits);
|
||||||
|
size_t ftl_rwb_get_active_batches(const struct ftl_rwb *rwb);
|
||||||
void ftl_rwb_free(struct ftl_rwb *rwb);
|
void ftl_rwb_free(struct ftl_rwb *rwb);
|
||||||
void ftl_rwb_batch_release(struct ftl_rwb_batch *batch);
|
void ftl_rwb_batch_release(struct ftl_rwb_batch *batch);
|
||||||
void ftl_rwb_push(struct ftl_rwb_entry *entry);
|
void ftl_rwb_push(struct ftl_rwb_entry *entry);
|
||||||
@ -110,6 +112,7 @@ void ftl_rwb_batch_revert(struct ftl_rwb_batch *batch);
|
|||||||
struct ftl_rwb_entry *ftl_rwb_batch_first_entry(struct ftl_rwb_batch *batch);
|
struct ftl_rwb_entry *ftl_rwb_batch_first_entry(struct ftl_rwb_batch *batch);
|
||||||
void *ftl_rwb_batch_get_data(struct ftl_rwb_batch *batch);
|
void *ftl_rwb_batch_get_data(struct ftl_rwb_batch *batch);
|
||||||
void *ftl_rwb_batch_get_md(struct ftl_rwb_batch *batch);
|
void *ftl_rwb_batch_get_md(struct ftl_rwb_batch *batch);
|
||||||
|
void ftl_rwb_disable_interleaving(struct ftl_rwb *rwb);
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
_ftl_rwb_entry_set_valid(struct ftl_rwb_entry *entry, bool valid)
|
_ftl_rwb_entry_set_valid(struct ftl_rwb_entry *entry, bool valid)
|
||||||
|
@ -38,19 +38,93 @@
|
|||||||
|
|
||||||
#include "ftl/ftl_rwb.c"
|
#include "ftl/ftl_rwb.c"
|
||||||
|
|
||||||
#define RWB_SIZE (1024 * 1024)
|
struct ftl_rwb_ut {
|
||||||
#define RWB_ENTRY_COUNT (RWB_SIZE / FTL_BLOCK_SIZE)
|
/* configurations */
|
||||||
#define XFER_SIZE 16
|
struct spdk_ftl_conf conf;
|
||||||
#define METADATA_SIZE 64
|
size_t metadata_size;
|
||||||
|
size_t num_punits;
|
||||||
|
size_t xfer_size;
|
||||||
|
|
||||||
|
/* the fields below are calculated by the configurations */
|
||||||
|
size_t max_batches;
|
||||||
|
size_t max_active_batches;
|
||||||
|
size_t max_entries;
|
||||||
|
size_t max_allocable_entries;
|
||||||
|
size_t interleave_offset;
|
||||||
|
size_t num_entries_per_worker;
|
||||||
|
};
|
||||||
|
|
||||||
static struct ftl_rwb *g_rwb;
|
static struct ftl_rwb *g_rwb;
|
||||||
|
static struct ftl_rwb_ut g_ut;
|
||||||
|
|
||||||
|
static int _init_suite(void);
|
||||||
|
|
||||||
|
static int
|
||||||
|
init_suite1(void)
|
||||||
|
{
|
||||||
|
g_ut.conf.rwb_size = 1024 * 1024;
|
||||||
|
g_ut.conf.num_interleave_units = 1;
|
||||||
|
g_ut.metadata_size = 64;
|
||||||
|
g_ut.num_punits = 4;
|
||||||
|
g_ut.xfer_size = 16;
|
||||||
|
|
||||||
|
return _init_suite();
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
init_suite2(void)
|
||||||
|
{
|
||||||
|
g_ut.conf.rwb_size = 2 * 1024 * 1024;
|
||||||
|
g_ut.conf.num_interleave_units = 4;
|
||||||
|
g_ut.metadata_size = 64;
|
||||||
|
g_ut.num_punits = 8;
|
||||||
|
g_ut.xfer_size = 16;
|
||||||
|
|
||||||
|
return _init_suite();
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
_init_suite(void)
|
||||||
|
{
|
||||||
|
struct spdk_ftl_conf *conf = &g_ut.conf;
|
||||||
|
|
||||||
|
if (conf->num_interleave_units == 0 ||
|
||||||
|
g_ut.xfer_size % conf->num_interleave_units ||
|
||||||
|
g_ut.num_punits == 0) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
g_ut.max_batches = conf->rwb_size / (FTL_BLOCK_SIZE * g_ut.xfer_size);
|
||||||
|
if (conf->num_interleave_units > 1) {
|
||||||
|
g_ut.max_batches += g_ut.num_punits;
|
||||||
|
g_ut.max_active_batches = g_ut.num_punits;
|
||||||
|
} else {
|
||||||
|
g_ut.max_batches++;
|
||||||
|
g_ut.max_active_batches = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
g_ut.max_entries = g_ut.max_batches * g_ut.xfer_size;
|
||||||
|
g_ut.max_allocable_entries = (g_ut.max_batches / g_ut.max_active_batches) *
|
||||||
|
g_ut.max_active_batches * g_ut.xfer_size;
|
||||||
|
|
||||||
|
g_ut.interleave_offset = g_ut.xfer_size / conf->num_interleave_units;
|
||||||
|
|
||||||
|
/* if max_batches is less than max_active_batches * 2, */
|
||||||
|
/* test_rwb_limits_applied will be failed. */
|
||||||
|
if (g_ut.max_batches < g_ut.max_active_batches * 2) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
g_ut.num_entries_per_worker = 16 * g_ut.max_allocable_entries;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
setup_rwb(void)
|
setup_rwb(void)
|
||||||
{
|
{
|
||||||
struct spdk_ftl_conf conf = { .rwb_size = RWB_SIZE };
|
g_rwb = ftl_rwb_init(&g_ut.conf, g_ut.xfer_size,
|
||||||
|
g_ut.metadata_size, g_ut.num_punits);
|
||||||
g_rwb = ftl_rwb_init(&conf, XFER_SIZE, METADATA_SIZE);
|
|
||||||
SPDK_CU_ASSERT_FATAL(g_rwb != NULL);
|
SPDK_CU_ASSERT_FATAL(g_rwb != NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -64,12 +138,12 @@ cleanup_rwb(void)
|
|||||||
static void
|
static void
|
||||||
test_rwb_acquire(void)
|
test_rwb_acquire(void)
|
||||||
{
|
{
|
||||||
size_t i;
|
|
||||||
struct ftl_rwb_entry *entry;
|
struct ftl_rwb_entry *entry;
|
||||||
|
size_t i;
|
||||||
|
|
||||||
setup_rwb();
|
setup_rwb();
|
||||||
/* Verify that it's possible to acquire all of the entries */
|
/* Verify that it's possible to acquire all of the entries */
|
||||||
for (i = 0; i < RWB_ENTRY_COUNT; ++i) {
|
for (i = 0; i < g_ut.max_allocable_entries; ++i) {
|
||||||
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
|
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
|
||||||
SPDK_CU_ASSERT_FATAL(entry);
|
SPDK_CU_ASSERT_FATAL(entry);
|
||||||
ftl_rwb_push(entry);
|
ftl_rwb_push(entry);
|
||||||
@ -85,52 +159,118 @@ test_rwb_pop(void)
|
|||||||
{
|
{
|
||||||
struct ftl_rwb_entry *entry;
|
struct ftl_rwb_entry *entry;
|
||||||
struct ftl_rwb_batch *batch;
|
struct ftl_rwb_batch *batch;
|
||||||
size_t entry_count, i;
|
size_t entry_count, i, i_reset = 0, i_offset = 0;
|
||||||
|
uint64_t expected_lba;
|
||||||
|
|
||||||
setup_rwb();
|
setup_rwb();
|
||||||
|
|
||||||
/* Acquire all entries */
|
/* Acquire all entries */
|
||||||
for (i = 0; i < RWB_ENTRY_COUNT; ++i) {
|
for (i = 0; i < g_ut.max_allocable_entries; ++i) {
|
||||||
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
|
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
|
||||||
|
|
||||||
SPDK_CU_ASSERT_FATAL(entry);
|
SPDK_CU_ASSERT_FATAL(entry);
|
||||||
entry->lba = i;
|
entry->lba = i;
|
||||||
ftl_rwb_push(entry);
|
ftl_rwb_push(entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Pop all batches and free them */
|
/* Pop all batches and free them */
|
||||||
for (i = 0; i < RWB_ENTRY_COUNT / XFER_SIZE; ++i) {
|
for (i = 0; i < g_ut.max_allocable_entries / g_ut.xfer_size; ++i) {
|
||||||
batch = ftl_rwb_pop(g_rwb);
|
batch = ftl_rwb_pop(g_rwb);
|
||||||
SPDK_CU_ASSERT_FATAL(batch);
|
SPDK_CU_ASSERT_FATAL(batch);
|
||||||
entry_count = 0;
|
entry_count = 0;
|
||||||
|
|
||||||
ftl_rwb_foreach(entry, batch) {
|
ftl_rwb_foreach(entry, batch) {
|
||||||
CU_ASSERT_EQUAL(entry->lba, i * XFER_SIZE + entry_count);
|
if (i % g_ut.max_active_batches == 0) {
|
||||||
|
i_offset = i * g_ut.xfer_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (entry_count % g_ut.interleave_offset == 0) {
|
||||||
|
i_reset = i % g_ut.max_active_batches +
|
||||||
|
(entry_count / g_ut.interleave_offset) *
|
||||||
|
g_ut.max_active_batches;
|
||||||
|
}
|
||||||
|
|
||||||
|
expected_lba = i_offset +
|
||||||
|
i_reset * g_ut.interleave_offset +
|
||||||
|
entry_count % g_ut.interleave_offset;
|
||||||
|
|
||||||
|
CU_ASSERT_EQUAL(entry->lba, expected_lba);
|
||||||
entry_count++;
|
entry_count++;
|
||||||
}
|
}
|
||||||
|
|
||||||
CU_ASSERT_EQUAL(entry_count, XFER_SIZE);
|
CU_ASSERT_EQUAL(entry_count, g_ut.xfer_size);
|
||||||
ftl_rwb_batch_release(batch);
|
ftl_rwb_batch_release(batch);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Acquire all entries once more */
|
/* Acquire all entries once more */
|
||||||
for (i = 0; i < RWB_ENTRY_COUNT; ++i) {
|
for (i = 0; i < g_ut.max_allocable_entries; ++i) {
|
||||||
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
|
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
|
||||||
SPDK_CU_ASSERT_FATAL(entry);
|
SPDK_CU_ASSERT_FATAL(entry);
|
||||||
ftl_rwb_push(entry);
|
ftl_rwb_push(entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Pop one batch and check we can acquire XFER_SIZE entries */
|
/* Pop one batch and check we can acquire xfer_size entries */
|
||||||
batch = ftl_rwb_pop(g_rwb);
|
for (i = 0; i < g_ut.max_active_batches; i++) {
|
||||||
SPDK_CU_ASSERT_FATAL(batch);
|
batch = ftl_rwb_pop(g_rwb);
|
||||||
ftl_rwb_batch_release(batch);
|
SPDK_CU_ASSERT_FATAL(batch);
|
||||||
|
ftl_rwb_batch_release(batch);
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < XFER_SIZE; ++i) {
|
for (i = 0; i < g_ut.xfer_size * g_ut.max_active_batches; ++i) {
|
||||||
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
|
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
|
||||||
|
|
||||||
SPDK_CU_ASSERT_FATAL(entry);
|
SPDK_CU_ASSERT_FATAL(entry);
|
||||||
ftl_rwb_push(entry);
|
ftl_rwb_push(entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
|
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
|
||||||
CU_ASSERT_PTR_NULL(entry);
|
CU_ASSERT_PTR_NULL(entry);
|
||||||
|
|
||||||
|
/* Pop and Release all batches */
|
||||||
|
for (i = 0; i < g_ut.max_allocable_entries / g_ut.xfer_size; ++i) {
|
||||||
|
batch = ftl_rwb_pop(g_rwb);
|
||||||
|
SPDK_CU_ASSERT_FATAL(batch);
|
||||||
|
ftl_rwb_batch_release(batch);
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanup_rwb();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
test_rwb_disable_interleaving(void)
|
||||||
|
{
|
||||||
|
struct ftl_rwb_entry *entry;
|
||||||
|
struct ftl_rwb_batch *batch;
|
||||||
|
size_t entry_count, i;
|
||||||
|
|
||||||
|
setup_rwb();
|
||||||
|
|
||||||
|
ftl_rwb_disable_interleaving(g_rwb);
|
||||||
|
|
||||||
|
/* Acquire all entries and assign sequential lbas */
|
||||||
|
for (i = 0; i < g_ut.max_allocable_entries; ++i) {
|
||||||
|
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
|
||||||
|
|
||||||
|
SPDK_CU_ASSERT_FATAL(entry);
|
||||||
|
entry->lba = i;
|
||||||
|
ftl_rwb_push(entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Check for expected lbas */
|
||||||
|
for (i = 0; i < g_ut.max_allocable_entries / g_ut.xfer_size; ++i) {
|
||||||
|
batch = ftl_rwb_pop(g_rwb);
|
||||||
|
SPDK_CU_ASSERT_FATAL(batch);
|
||||||
|
entry_count = 0;
|
||||||
|
|
||||||
|
ftl_rwb_foreach(entry, batch) {
|
||||||
|
CU_ASSERT_EQUAL(entry->lba, i * g_ut.xfer_size + entry_count);
|
||||||
|
entry_count++;
|
||||||
|
}
|
||||||
|
|
||||||
|
CU_ASSERT_EQUAL(entry_count, g_ut.xfer_size);
|
||||||
|
ftl_rwb_batch_release(batch);
|
||||||
|
}
|
||||||
|
|
||||||
cleanup_rwb();
|
cleanup_rwb();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -142,7 +282,7 @@ test_rwb_batch_revert(void)
|
|||||||
size_t i;
|
size_t i;
|
||||||
|
|
||||||
setup_rwb();
|
setup_rwb();
|
||||||
for (i = 0; i < RWB_ENTRY_COUNT; ++i) {
|
for (i = 0; i < g_ut.max_allocable_entries; ++i) {
|
||||||
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
|
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
|
||||||
SPDK_CU_ASSERT_FATAL(entry);
|
SPDK_CU_ASSERT_FATAL(entry);
|
||||||
ftl_rwb_push(entry);
|
ftl_rwb_push(entry);
|
||||||
@ -155,7 +295,7 @@ test_rwb_batch_revert(void)
|
|||||||
ftl_rwb_batch_revert(batch);
|
ftl_rwb_batch_revert(batch);
|
||||||
|
|
||||||
/* Verify all of the batches */
|
/* Verify all of the batches */
|
||||||
for (i = 0; i < RWB_ENTRY_COUNT / XFER_SIZE; ++i) {
|
for (i = 0; i < g_ut.max_allocable_entries / g_ut.xfer_size; ++i) {
|
||||||
batch = ftl_rwb_pop(g_rwb);
|
batch = ftl_rwb_pop(g_rwb);
|
||||||
CU_ASSERT_PTR_NOT_NULL_FATAL(batch);
|
CU_ASSERT_PTR_NOT_NULL_FATAL(batch);
|
||||||
}
|
}
|
||||||
@ -170,7 +310,7 @@ test_rwb_entry_from_offset(void)
|
|||||||
size_t i;
|
size_t i;
|
||||||
|
|
||||||
setup_rwb();
|
setup_rwb();
|
||||||
for (i = 0; i < RWB_ENTRY_COUNT; ++i) {
|
for (i = 0; i < g_ut.max_allocable_entries; ++i) {
|
||||||
ppa.offset = i;
|
ppa.offset = i;
|
||||||
|
|
||||||
entry = ftl_rwb_entry_from_offset(g_rwb, i);
|
entry = ftl_rwb_entry_from_offset(g_rwb, i);
|
||||||
@ -182,12 +322,11 @@ test_rwb_entry_from_offset(void)
|
|||||||
static void *
|
static void *
|
||||||
test_rwb_worker(void *ctx)
|
test_rwb_worker(void *ctx)
|
||||||
{
|
{
|
||||||
#define ENTRIES_PER_WORKER (16 * RWB_ENTRY_COUNT)
|
|
||||||
struct ftl_rwb_entry *entry;
|
struct ftl_rwb_entry *entry;
|
||||||
unsigned int *num_done = ctx;
|
unsigned int *num_done = ctx;
|
||||||
size_t i;
|
size_t i;
|
||||||
|
|
||||||
for (i = 0; i < ENTRIES_PER_WORKER; ++i) {
|
for (i = 0; i < g_ut.num_entries_per_worker; ++i) {
|
||||||
while (1) {
|
while (1) {
|
||||||
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
|
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
|
||||||
if (entry) {
|
if (entry) {
|
||||||
@ -251,7 +390,7 @@ test_rwb_parallel(void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
CU_ASSERT_TRUE(num_entries == NUM_PARALLEL_WORKERS * ENTRIES_PER_WORKER);
|
CU_ASSERT_TRUE(num_entries == NUM_PARALLEL_WORKERS * g_ut.num_entries_per_worker);
|
||||||
cleanup_rwb();
|
cleanup_rwb();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -282,6 +421,7 @@ test_rwb_limits_set(void)
|
|||||||
size_t i;
|
size_t i;
|
||||||
|
|
||||||
setup_rwb();
|
setup_rwb();
|
||||||
|
|
||||||
/* Check valid limits */
|
/* Check valid limits */
|
||||||
ftl_rwb_get_limits(g_rwb, limits);
|
ftl_rwb_get_limits(g_rwb, limits);
|
||||||
memcpy(check, limits, sizeof(limits));
|
memcpy(check, limits, sizeof(limits));
|
||||||
@ -307,9 +447,11 @@ test_rwb_limits_applied(void)
|
|||||||
struct ftl_rwb_entry *entry;
|
struct ftl_rwb_entry *entry;
|
||||||
struct ftl_rwb_batch *batch;
|
struct ftl_rwb_batch *batch;
|
||||||
size_t limits[FTL_RWB_TYPE_MAX];
|
size_t limits[FTL_RWB_TYPE_MAX];
|
||||||
|
const size_t test_limit = g_ut.xfer_size * g_ut.max_active_batches;
|
||||||
size_t i;
|
size_t i;
|
||||||
|
|
||||||
setup_rwb();
|
setup_rwb();
|
||||||
|
|
||||||
/* Check that it's impossible to acquire any entries when the limits are */
|
/* Check that it's impossible to acquire any entries when the limits are */
|
||||||
/* set to 0 */
|
/* set to 0 */
|
||||||
ftl_rwb_get_limits(g_rwb, limits);
|
ftl_rwb_get_limits(g_rwb, limits);
|
||||||
@ -325,11 +467,10 @@ test_rwb_limits_applied(void)
|
|||||||
CU_ASSERT_PTR_NULL(entry);
|
CU_ASSERT_PTR_NULL(entry);
|
||||||
|
|
||||||
/* Check positive limits */
|
/* Check positive limits */
|
||||||
#define TEST_LIMIT XFER_SIZE
|
|
||||||
limits[FTL_RWB_TYPE_USER] = ftl_rwb_entry_cnt(g_rwb);
|
limits[FTL_RWB_TYPE_USER] = ftl_rwb_entry_cnt(g_rwb);
|
||||||
limits[FTL_RWB_TYPE_INTERNAL] = TEST_LIMIT;
|
limits[FTL_RWB_TYPE_INTERNAL] = test_limit;
|
||||||
ftl_rwb_set_limits(g_rwb, limits);
|
ftl_rwb_set_limits(g_rwb, limits);
|
||||||
for (i = 0; i < TEST_LIMIT; ++i) {
|
for (i = 0; i < test_limit; ++i) {
|
||||||
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_INTERNAL);
|
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_INTERNAL);
|
||||||
SPDK_CU_ASSERT_FATAL(entry);
|
SPDK_CU_ASSERT_FATAL(entry);
|
||||||
entry->flags = FTL_IO_INTERNAL;
|
entry->flags = FTL_IO_INTERNAL;
|
||||||
@ -340,20 +481,22 @@ test_rwb_limits_applied(void)
|
|||||||
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_INTERNAL);
|
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_INTERNAL);
|
||||||
CU_ASSERT_PTR_NULL(entry);
|
CU_ASSERT_PTR_NULL(entry);
|
||||||
|
|
||||||
/* Complete the entries and check we can retrieve the entries once again */
|
for (i = 0; i < test_limit / g_ut.xfer_size; ++i) {
|
||||||
batch = ftl_rwb_pop(g_rwb);
|
/* Complete the entries and check we can retrieve the entries once again */
|
||||||
SPDK_CU_ASSERT_FATAL(batch);
|
batch = ftl_rwb_pop(g_rwb);
|
||||||
ftl_rwb_batch_release(batch);
|
SPDK_CU_ASSERT_FATAL(batch);
|
||||||
|
ftl_rwb_batch_release(batch);
|
||||||
|
}
|
||||||
|
|
||||||
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_INTERNAL);
|
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_INTERNAL);
|
||||||
CU_ASSERT_PTR_NOT_NULL_FATAL(entry);
|
SPDK_CU_ASSERT_FATAL(entry);
|
||||||
entry->flags = FTL_IO_INTERNAL;
|
entry->flags = FTL_IO_INTERNAL;
|
||||||
|
|
||||||
/* Set the same limit but this time for user entries */
|
/* Set the same limit but this time for user entries */
|
||||||
limits[FTL_RWB_TYPE_USER] = TEST_LIMIT;
|
limits[FTL_RWB_TYPE_USER] = test_limit;
|
||||||
limits[FTL_RWB_TYPE_INTERNAL] = ftl_rwb_entry_cnt(g_rwb);
|
limits[FTL_RWB_TYPE_INTERNAL] = ftl_rwb_entry_cnt(g_rwb);
|
||||||
ftl_rwb_set_limits(g_rwb, limits);
|
ftl_rwb_set_limits(g_rwb, limits);
|
||||||
for (i = 0; i < TEST_LIMIT; ++i) {
|
for (i = 0; i < test_limit; ++i) {
|
||||||
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
|
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
|
||||||
SPDK_CU_ASSERT_FATAL(entry);
|
SPDK_CU_ASSERT_FATAL(entry);
|
||||||
ftl_rwb_push(entry);
|
ftl_rwb_push(entry);
|
||||||
@ -365,45 +508,72 @@ test_rwb_limits_applied(void)
|
|||||||
|
|
||||||
/* Check that we're still able to acquire a number of internal entries */
|
/* Check that we're still able to acquire a number of internal entries */
|
||||||
/* while the user entires are being throttled */
|
/* while the user entires are being throttled */
|
||||||
for (i = 0; i < TEST_LIMIT; ++i) {
|
for (i = 0; i < g_ut.xfer_size; ++i) {
|
||||||
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_INTERNAL);
|
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_INTERNAL);
|
||||||
SPDK_CU_ASSERT_FATAL(entry);
|
SPDK_CU_ASSERT_FATAL(entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
cleanup_rwb();
|
cleanup_rwb();
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
main(int argc, char **argv)
|
main(int argc, char **argv)
|
||||||
{
|
{
|
||||||
CU_pSuite suite;
|
CU_pSuite suite1, suite2;
|
||||||
unsigned int num_failures;
|
unsigned int num_failures;
|
||||||
|
|
||||||
if (CU_initialize_registry() != CUE_SUCCESS) {
|
if (CU_initialize_registry() != CUE_SUCCESS) {
|
||||||
return CU_get_error();
|
return CU_get_error();
|
||||||
}
|
}
|
||||||
|
|
||||||
suite = CU_add_suite("ftl_rwb_suite", NULL, NULL);
|
suite1 = CU_add_suite("suite1", init_suite1, NULL);
|
||||||
if (!suite) {
|
if (!suite1) {
|
||||||
|
CU_cleanup_registry();
|
||||||
|
return CU_get_error();
|
||||||
|
}
|
||||||
|
|
||||||
|
suite2 = CU_add_suite("suite2", init_suite2, NULL);
|
||||||
|
if (!suite2) {
|
||||||
CU_cleanup_registry();
|
CU_cleanup_registry();
|
||||||
return CU_get_error();
|
return CU_get_error();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (
|
if (
|
||||||
CU_add_test(suite, "test_rwb_acquire",
|
CU_add_test(suite1, "test_rwb_acquire",
|
||||||
test_rwb_acquire) == NULL
|
test_rwb_acquire) == NULL
|
||||||
|| CU_add_test(suite, "test_rwb_pop",
|
|| CU_add_test(suite1, "test_rwb_pop",
|
||||||
test_rwb_pop) == NULL
|
test_rwb_pop) == NULL
|
||||||
|| CU_add_test(suite, "test_rwb_batch_revert",
|
|| CU_add_test(suite1, "test_rwb_disable_interleaving",
|
||||||
|
test_rwb_disable_interleaving) == NULL
|
||||||
|
|| CU_add_test(suite1, "test_rwb_batch_revert",
|
||||||
test_rwb_batch_revert) == NULL
|
test_rwb_batch_revert) == NULL
|
||||||
|| CU_add_test(suite, "test_rwb_entry_from_offset",
|
|| CU_add_test(suite1, "test_rwb_entry_from_offset",
|
||||||
test_rwb_entry_from_offset) == NULL
|
test_rwb_entry_from_offset) == NULL
|
||||||
|| CU_add_test(suite, "test_rwb_parallel",
|
|| CU_add_test(suite1, "test_rwb_parallel",
|
||||||
test_rwb_parallel) == NULL
|
test_rwb_parallel) == NULL
|
||||||
|| CU_add_test(suite, "test_rwb_limits_base",
|
|| CU_add_test(suite1, "test_rwb_limits_base",
|
||||||
test_rwb_limits_base) == NULL
|
test_rwb_limits_base) == NULL
|
||||||
|| CU_add_test(suite, "test_rwb_limits_set",
|
|| CU_add_test(suite1, "test_rwb_limits_set",
|
||||||
test_rwb_limits_set) == NULL
|
test_rwb_limits_set) == NULL
|
||||||
|| CU_add_test(suite, "test_rwb_limits_applied",
|
|| CU_add_test(suite1, "test_rwb_limits_applied",
|
||||||
|
test_rwb_limits_applied) == NULL
|
||||||
|
|| CU_add_test(suite2, "test_rwb_acquire",
|
||||||
|
test_rwb_acquire) == NULL
|
||||||
|
|| CU_add_test(suite2, "test_rwb_pop",
|
||||||
|
test_rwb_pop) == NULL
|
||||||
|
|| CU_add_test(suite2, "test_rwb_disable_interleaving",
|
||||||
|
test_rwb_disable_interleaving) == NULL
|
||||||
|
|| CU_add_test(suite2, "test_rwb_batch_revert",
|
||||||
|
test_rwb_batch_revert) == NULL
|
||||||
|
|| CU_add_test(suite2, "test_rwb_entry_from_offset",
|
||||||
|
test_rwb_entry_from_offset) == NULL
|
||||||
|
|| CU_add_test(suite2, "test_rwb_parallel",
|
||||||
|
test_rwb_parallel) == NULL
|
||||||
|
|| CU_add_test(suite2, "test_rwb_limits_base",
|
||||||
|
test_rwb_limits_base) == NULL
|
||||||
|
|| CU_add_test(suite2, "test_rwb_limits_set",
|
||||||
|
test_rwb_limits_set) == NULL
|
||||||
|
|| CU_add_test(suite2, "test_rwb_limits_applied",
|
||||||
test_rwb_limits_applied) == NULL
|
test_rwb_limits_applied) == NULL
|
||||||
) {
|
) {
|
||||||
CU_cleanup_registry();
|
CU_cleanup_registry();
|
||||||
|
Loading…
Reference in New Issue
Block a user