lib/ftl: Change ftl_chunk structure to ftl_zone

This is starting point for moving current FTL
implementation which is working on top of
Open Channel NVMe driver to work on top of
abstracted zoned bdev.

This patch is changing name of ftl_chunk structure
to ftl_zone and start using zone states from zdev
interface.

Change-Id: I5429f489cc08a1ac27f09aba3dca4b40ea95eeb3
Signed-off-by: Wojciech Malikowski <wojciech.malikowski@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/467391
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Konrad Sztyber <konrad.sztyber@intel.com>
Community-CI: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Wojciech Malikowski 2019-09-04 09:44:28 -04:00 committed by Tomasz Zawadzki
parent 2938dc14b0
commit 77d591e172
15 changed files with 265 additions and 274 deletions

View File

@ -150,10 +150,10 @@ struct spdk_ftl_attrs {
size_t lbk_size; size_t lbk_size;
/* Write buffer cache */ /* Write buffer cache */
struct spdk_bdev_desc *cache_bdev_desc; struct spdk_bdev_desc *cache_bdev_desc;
/* Number of chunks per parallel unit in the underlying device (including any offline ones) */ /* Number of zones per parallel unit in the underlying device (including any offline ones) */
size_t num_chunks; size_t num_zones;
/* Number of sectors per chunk */ /* Number of logical blocks per zone */
size_t chunk_size; size_t zone_size;
/* Device specific configuration */ /* Device specific configuration */
struct spdk_ftl_conf conf; struct spdk_ftl_conf conf;
}; };

View File

@ -137,7 +137,7 @@ ftl_anm_event_alloc(struct spdk_ftl_dev *dev, struct ftl_ppa ppa,
break; break;
case FTL_ANM_RANGE_CHK: case FTL_ANM_RANGE_CHK:
case FTL_ANM_RANGE_PU: case FTL_ANM_RANGE_PU:
event->num_lbks = ftl_dev_lbks_in_chunk(dev); event->num_lbks = ftl_dev_lbks_in_zone(dev);
break; break;
default: default:
assert(false); assert(false);
@ -160,9 +160,9 @@ ftl_anm_process_log(struct ftl_anm_poller *poller,
num_bands = range != FTL_ANM_RANGE_PU ? 1 : ftl_dev_num_bands(dev); num_bands = range != FTL_ANM_RANGE_PU ? 1 : ftl_dev_num_bands(dev);
for (i = 0; i < num_bands; ++i) { for (i = 0; i < num_bands; ++i) {
struct ftl_chunk *chk = ftl_band_chunk_from_ppa(&dev->bands[i], ppa); struct ftl_zone *zone = ftl_band_zone_from_ppa(&dev->bands[i], ppa);
if (chk->state == FTL_CHUNK_STATE_BAD) { if (zone->state == SPDK_BDEV_ZONE_STATE_OFFLINE) {
continue; continue;
} }

View File

@ -156,7 +156,7 @@ ftl_band_free_lba_map(struct ftl_band *band)
assert(!band->high_prio); assert(!band->high_prio);
/* Verify that band's metadata is consistent with l2p */ /* Verify that band's metadata is consistent with l2p */
if (band->num_chunks) { if (band->num_zones) {
assert(ftl_band_validate_md(band) == true); assert(ftl_band_validate_md(band) == true);
} }
@ -224,7 +224,7 @@ static void
_ftl_band_set_closed(struct ftl_band *band) _ftl_band_set_closed(struct ftl_band *band)
{ {
struct spdk_ftl_dev *dev = band->dev; struct spdk_ftl_dev *dev = band->dev;
struct ftl_chunk *chunk; struct ftl_zone *zone;
/* Set the state as free_md() checks for that */ /* Set the state as free_md() checks for that */
band->state = FTL_BAND_STATE_CLOSED; band->state = FTL_BAND_STATE_CLOSED;
@ -232,10 +232,10 @@ _ftl_band_set_closed(struct ftl_band *band)
/* Free the lba map if there are no outstanding IOs */ /* Free the lba map if there are no outstanding IOs */
ftl_band_release_lba_map(band); ftl_band_release_lba_map(band);
if (spdk_likely(band->num_chunks)) { if (spdk_likely(band->num_zones)) {
LIST_INSERT_HEAD(&dev->shut_bands, band, list_entry); LIST_INSERT_HEAD(&dev->shut_bands, band, list_entry);
CIRCLEQ_FOREACH(chunk, &band->chunks, circleq) { CIRCLEQ_FOREACH(zone, &band->zones, circleq) {
chunk->state = FTL_CHUNK_STATE_CLOSED; zone->state = SPDK_BDEV_ZONE_STATE_CLOSED;
} }
} else { } else {
LIST_REMOVE(band, list_entry); LIST_REMOVE(band, list_entry);
@ -335,7 +335,7 @@ ftl_unpack_tail_md(struct ftl_band *band)
/* /*
* When restoring from a dirty shutdown it's possible old tail meta wasn't yet cleared - * When restoring from a dirty shutdown it's possible old tail meta wasn't yet cleared -
* band had saved head meta, but didn't manage to send erase to all chunks. * band had saved head meta, but didn't manage to send erase to all zones.
* The already found tail md header is valid, but inconsistent with the head meta. Treat * The already found tail md header is valid, but inconsistent with the head meta. Treat
* such a band as open/without valid tail md. * such a band as open/without valid tail md.
*/ */
@ -386,28 +386,28 @@ struct ftl_ppa
ftl_band_tail_md_ppa(struct ftl_band *band) ftl_band_tail_md_ppa(struct ftl_band *band)
{ {
struct ftl_ppa ppa = {}; struct ftl_ppa ppa = {};
struct ftl_chunk *chunk; struct ftl_zone *zone;
struct spdk_ftl_dev *dev = band->dev; struct spdk_ftl_dev *dev = band->dev;
size_t xfer_size = dev->xfer_size; size_t xfer_size = dev->xfer_size;
size_t num_req = ftl_band_tail_md_offset(band) / xfer_size; size_t num_req = ftl_band_tail_md_offset(band) / xfer_size;
size_t i; size_t i;
if (spdk_unlikely(!band->num_chunks)) { if (spdk_unlikely(!band->num_zones)) {
return ftl_to_ppa(FTL_PPA_INVALID); return ftl_to_ppa(FTL_PPA_INVALID);
} }
/* Metadata should be aligned to xfer size */ /* Metadata should be aligned to xfer size */
assert(ftl_band_tail_md_offset(band) % xfer_size == 0); assert(ftl_band_tail_md_offset(band) % xfer_size == 0);
chunk = CIRCLEQ_FIRST(&band->chunks); zone = CIRCLEQ_FIRST(&band->zones);
for (i = 0; i < num_req % band->num_chunks; ++i) { for (i = 0; i < num_req % band->num_zones; ++i) {
chunk = ftl_band_next_chunk(band, chunk); zone = ftl_band_next_zone(band, zone);
} }
ppa.lbk = (num_req / band->num_chunks) * xfer_size; ppa.lbk = (num_req / band->num_zones) * xfer_size;
ppa.chk = band->id; ppa.chk = band->id;
ppa.pu = chunk->punit->start_ppa.pu; ppa.pu = zone->punit->start_ppa.pu;
ppa.grp = chunk->punit->start_ppa.grp; ppa.grp = zone->punit->start_ppa.grp;
return ppa; return ppa;
} }
@ -417,11 +417,11 @@ ftl_band_head_md_ppa(struct ftl_band *band)
{ {
struct ftl_ppa ppa; struct ftl_ppa ppa;
if (spdk_unlikely(!band->num_chunks)) { if (spdk_unlikely(!band->num_zones)) {
return ftl_to_ppa(FTL_PPA_INVALID); return ftl_to_ppa(FTL_PPA_INVALID);
} }
ppa = CIRCLEQ_FIRST(&band->chunks)->punit->start_ppa; ppa = CIRCLEQ_FIRST(&band->zones)->punit->start_ppa;
ppa.chk = band->id; ppa.chk = band->id;
return ppa; return ppa;
@ -482,7 +482,7 @@ ftl_band_age(const struct ftl_band *band)
size_t size_t
ftl_band_num_usable_lbks(const struct ftl_band *band) ftl_band_num_usable_lbks(const struct ftl_band *band)
{ {
return band->num_chunks * ftl_dev_lbks_in_chunk(band->dev); return band->num_zones * ftl_dev_lbks_in_zone(band->dev);
} }
size_t size_t
@ -516,8 +516,8 @@ ftl_band_from_ppa(struct spdk_ftl_dev *dev, struct ftl_ppa ppa)
return &dev->bands[ppa.chk]; return &dev->bands[ppa.chk];
} }
struct ftl_chunk * struct ftl_zone *
ftl_band_chunk_from_ppa(struct ftl_band *band, struct ftl_ppa ppa) ftl_band_zone_from_ppa(struct ftl_band *band, struct ftl_ppa ppa)
{ {
struct spdk_ftl_dev *dev = band->dev; struct spdk_ftl_dev *dev = band->dev;
unsigned int punit; unsigned int punit;
@ -525,7 +525,7 @@ ftl_band_chunk_from_ppa(struct ftl_band *band, struct ftl_ppa ppa)
punit = ftl_ppa_flatten_punit(dev, ppa); punit = ftl_ppa_flatten_punit(dev, ppa);
assert(punit < ftl_dev_num_punits(dev)); assert(punit < ftl_dev_num_punits(dev));
return &band->chunk_buf[punit]; return &band->zone_buf[punit];
} }
uint64_t uint64_t
@ -537,67 +537,67 @@ ftl_band_lbkoff_from_ppa(struct ftl_band *band, struct ftl_ppa ppa)
punit = ftl_ppa_flatten_punit(dev, ppa); punit = ftl_ppa_flatten_punit(dev, ppa);
assert(ppa.chk == band->id); assert(ppa.chk == band->id);
return punit * ftl_dev_lbks_in_chunk(dev) + ppa.lbk; return punit * ftl_dev_lbks_in_zone(dev) + ppa.lbk;
} }
struct ftl_ppa struct ftl_ppa
ftl_band_next_xfer_ppa(struct ftl_band *band, struct ftl_ppa ppa, size_t num_lbks) ftl_band_next_xfer_ppa(struct ftl_band *band, struct ftl_ppa ppa, size_t num_lbks)
{ {
struct spdk_ftl_dev *dev = band->dev; struct spdk_ftl_dev *dev = band->dev;
struct ftl_chunk *chunk; struct ftl_zone *zone;
unsigned int punit_num; unsigned int punit_num;
size_t num_xfers, num_stripes; size_t num_xfers, num_stripes;
assert(ppa.chk == band->id); assert(ppa.chk == band->id);
punit_num = ftl_ppa_flatten_punit(dev, ppa); punit_num = ftl_ppa_flatten_punit(dev, ppa);
chunk = &band->chunk_buf[punit_num]; zone = &band->zone_buf[punit_num];
num_lbks += (ppa.lbk % dev->xfer_size); num_lbks += (ppa.lbk % dev->xfer_size);
ppa.lbk -= (ppa.lbk % dev->xfer_size); ppa.lbk -= (ppa.lbk % dev->xfer_size);
#if defined(DEBUG) #if defined(DEBUG)
/* Check that the number of chunks has not been changed */ /* Check that the number of zones has not been changed */
struct ftl_chunk *_chunk; struct ftl_zone *_zone;
size_t _num_chunks = 0; size_t _num_zones = 0;
CIRCLEQ_FOREACH(_chunk, &band->chunks, circleq) { CIRCLEQ_FOREACH(_zone, &band->zones, circleq) {
if (spdk_likely(_chunk->state != FTL_CHUNK_STATE_BAD)) { if (spdk_likely(_zone->state != SPDK_BDEV_ZONE_STATE_OFFLINE)) {
_num_chunks++; _num_zones++;
} }
} }
assert(band->num_chunks == _num_chunks); assert(band->num_zones == _num_zones);
#endif #endif
assert(band->num_chunks != 0); assert(band->num_zones != 0);
num_stripes = (num_lbks / dev->xfer_size) / band->num_chunks; num_stripes = (num_lbks / dev->xfer_size) / band->num_zones;
ppa.lbk += num_stripes * dev->xfer_size; ppa.lbk += num_stripes * dev->xfer_size;
num_lbks -= num_stripes * dev->xfer_size * band->num_chunks; num_lbks -= num_stripes * dev->xfer_size * band->num_zones;
if (ppa.lbk > ftl_dev_lbks_in_chunk(dev)) { if (ppa.lbk > ftl_dev_lbks_in_zone(dev)) {
return ftl_to_ppa(FTL_PPA_INVALID); return ftl_to_ppa(FTL_PPA_INVALID);
} }
num_xfers = num_lbks / dev->xfer_size; num_xfers = num_lbks / dev->xfer_size;
for (size_t i = 0; i < num_xfers; ++i) { for (size_t i = 0; i < num_xfers; ++i) {
/* When the last chunk is reached the lbk part of the address */ /* When the last zone is reached the lbk part of the address */
/* needs to be increased by xfer_size */ /* needs to be increased by xfer_size */
if (ftl_band_chunk_is_last(band, chunk)) { if (ftl_band_zone_is_last(band, zone)) {
ppa.lbk += dev->xfer_size; ppa.lbk += dev->xfer_size;
if (ppa.lbk > ftl_dev_lbks_in_chunk(dev)) { if (ppa.lbk > ftl_dev_lbks_in_zone(dev)) {
return ftl_to_ppa(FTL_PPA_INVALID); return ftl_to_ppa(FTL_PPA_INVALID);
} }
} }
chunk = ftl_band_next_operational_chunk(band, chunk); zone = ftl_band_next_operational_zone(band, zone);
assert(chunk); assert(zone);
ppa.grp = chunk->start_ppa.grp; ppa.grp = zone->start_ppa.grp;
ppa.pu = chunk->start_ppa.pu; ppa.pu = zone->start_ppa.pu;
num_lbks -= dev->xfer_size; num_lbks -= dev->xfer_size;
} }
if (num_lbks) { if (num_lbks) {
ppa.lbk += num_lbks; ppa.lbk += num_lbks;
if (ppa.lbk > ftl_dev_lbks_in_chunk(dev)) { if (ppa.lbk > ftl_dev_lbks_in_zone(dev)) {
return ftl_to_ppa(FTL_PPA_INVALID); return ftl_to_ppa(FTL_PPA_INVALID);
} }
} }
@ -608,18 +608,18 @@ ftl_band_next_xfer_ppa(struct ftl_band *band, struct ftl_ppa ppa, size_t num_lbk
static size_t static size_t
ftl_xfer_offset_from_ppa(struct ftl_band *band, struct ftl_ppa ppa) ftl_xfer_offset_from_ppa(struct ftl_band *band, struct ftl_ppa ppa)
{ {
struct ftl_chunk *chunk, *current_chunk; struct ftl_zone *zone, *current_zone;
unsigned int punit_offset = 0; unsigned int punit_offset = 0;
size_t off, num_stripes, xfer_size = band->dev->xfer_size; size_t off, num_stripes, xfer_size = band->dev->xfer_size;
assert(ppa.chk == band->id); assert(ppa.chk == band->id);
num_stripes = (ppa.lbk / xfer_size) * band->num_chunks; num_stripes = (ppa.lbk / xfer_size) * band->num_zones;
off = ppa.lbk % xfer_size; off = ppa.lbk % xfer_size;
current_chunk = ftl_band_chunk_from_ppa(band, ppa); current_zone = ftl_band_zone_from_ppa(band, ppa);
CIRCLEQ_FOREACH(chunk, &band->chunks, circleq) { CIRCLEQ_FOREACH(zone, &band->zones, circleq) {
if (current_chunk == chunk) { if (current_zone == zone) {
break; break;
} }
punit_offset++; punit_offset++;
@ -635,9 +635,9 @@ ftl_band_ppa_from_lbkoff(struct ftl_band *band, uint64_t lbkoff)
struct spdk_ftl_dev *dev = band->dev; struct spdk_ftl_dev *dev = band->dev;
uint64_t punit; uint64_t punit;
punit = lbkoff / ftl_dev_lbks_in_chunk(dev) + dev->range.begin; punit = lbkoff / ftl_dev_lbks_in_zone(dev) + dev->range.begin;
ppa.lbk = lbkoff % ftl_dev_lbks_in_chunk(dev); ppa.lbk = lbkoff % ftl_dev_lbks_in_zone(dev);
ppa.chk = band->id; ppa.chk = band->id;
ppa.pu = punit / dev->geo.num_grp; ppa.pu = punit / dev->geo.num_grp;
ppa.grp = punit % dev->geo.num_grp; ppa.grp = punit % dev->geo.num_grp;
@ -823,7 +823,7 @@ ftl_band_read_md(struct ftl_band *band, size_t lbk_cnt, struct ftl_ppa start_ppa
struct spdk_ftl_dev *dev = band->dev; struct spdk_ftl_dev *dev = band->dev;
struct ftl_md_io *io; struct ftl_md_io *io;
if (spdk_unlikely(!band->num_chunks)) { if (spdk_unlikely(!band->num_zones)) {
return -ENOENT; return -ENOENT;
} }
@ -1036,46 +1036,46 @@ ftl_band_read_head_md(struct ftl_band *band, ftl_io_fn cb_fn, void *cb_ctx)
} }
static void static void
ftl_band_remove_chunk(struct ftl_band *band, struct ftl_chunk *chunk) ftl_band_remove_zone(struct ftl_band *band, struct ftl_zone *zone)
{ {
CIRCLEQ_REMOVE(&band->chunks, chunk, circleq); CIRCLEQ_REMOVE(&band->zones, zone, circleq);
band->num_chunks--; band->num_zones--;
} }
static void static void
ftl_erase_fail(struct ftl_io *io, int status) ftl_erase_fail(struct ftl_io *io, int status)
{ {
struct ftl_chunk *chunk; struct ftl_zone *zone;
struct ftl_band *band = io->band; struct ftl_band *band = io->band;
char buf[128]; char buf[128];
SPDK_ERRLOG("Erase failed @ppa: %s, status: %d\n", SPDK_ERRLOG("Erase failed @ppa: %s, status: %d\n",
ftl_ppa2str(io->ppa, buf, sizeof(buf)), status); ftl_ppa2str(io->ppa, buf, sizeof(buf)), status);
chunk = ftl_band_chunk_from_ppa(band, io->ppa); zone = ftl_band_zone_from_ppa(band, io->ppa);
chunk->state = FTL_CHUNK_STATE_BAD; zone->state = SPDK_BDEV_ZONE_STATE_OFFLINE;
ftl_band_remove_chunk(band, chunk); ftl_band_remove_zone(band, zone);
band->tail_md_ppa = ftl_band_tail_md_ppa(band); band->tail_md_ppa = ftl_band_tail_md_ppa(band);
} }
static void static void
ftl_band_erase_cb(struct ftl_io *io, void *ctx, int status) ftl_band_erase_cb(struct ftl_io *io, void *ctx, int status)
{ {
struct ftl_chunk *chunk; struct ftl_zone *zone;
if (spdk_unlikely(status)) { if (spdk_unlikely(status)) {
ftl_erase_fail(io, status); ftl_erase_fail(io, status);
return; return;
} }
chunk = ftl_band_chunk_from_ppa(io->band, io->ppa); zone = ftl_band_zone_from_ppa(io->band, io->ppa);
chunk->state = FTL_CHUNK_STATE_FREE; zone->state = SPDK_BDEV_ZONE_STATE_EMPTY;
chunk->write_offset = 0; zone->write_offset = 0;
} }
int int
ftl_band_erase(struct ftl_band *band) ftl_band_erase(struct ftl_band *band)
{ {
struct ftl_chunk *chunk; struct ftl_zone *zone;
struct ftl_io *io; struct ftl_io *io;
int rc = 0; int rc = 0;
@ -1084,8 +1084,8 @@ ftl_band_erase(struct ftl_band *band)
ftl_band_set_state(band, FTL_BAND_STATE_PREP); ftl_band_set_state(band, FTL_BAND_STATE_PREP);
CIRCLEQ_FOREACH(chunk, &band->chunks, circleq) { CIRCLEQ_FOREACH(zone, &band->zones, circleq) {
if (chunk->state == FTL_CHUNK_STATE_FREE) { if (zone->state == SPDK_BDEV_ZONE_STATE_EMPTY) {
continue; continue;
} }
@ -1095,7 +1095,7 @@ ftl_band_erase(struct ftl_band *band)
break; break;
} }
io->ppa = chunk->start_ppa; io->ppa = zone->start_ppa;
rc = ftl_io_erase(io); rc = ftl_io_erase(io);
if (rc) { if (rc) {
assert(0); assert(0);
@ -1120,27 +1120,27 @@ ftl_band_write_prep(struct ftl_band *band)
return 0; return 0;
} }
struct ftl_chunk * struct ftl_zone *
ftl_band_next_operational_chunk(struct ftl_band *band, struct ftl_chunk *chunk) ftl_band_next_operational_zone(struct ftl_band *band, struct ftl_zone *zone)
{ {
struct ftl_chunk *result = NULL; struct ftl_zone *result = NULL;
struct ftl_chunk *entry; struct ftl_zone *entry;
if (spdk_unlikely(!band->num_chunks)) { if (spdk_unlikely(!band->num_zones)) {
return NULL; return NULL;
} }
/* Erasing band may fail after it was assigned to wptr. */ /* Erasing band may fail after it was assigned to wptr. */
/* In such a case chunk is no longer in band->chunks queue. */ /* In such a case zone is no longer in band->zones queue. */
if (spdk_likely(chunk->state != FTL_CHUNK_STATE_BAD)) { if (spdk_likely(zone->state != SPDK_BDEV_ZONE_STATE_OFFLINE)) {
result = ftl_band_next_chunk(band, chunk); result = ftl_band_next_zone(band, zone);
} else { } else {
CIRCLEQ_FOREACH_REVERSE(entry, &band->chunks, circleq) { CIRCLEQ_FOREACH_REVERSE(entry, &band->zones, circleq) {
if (entry->pos > chunk->pos) { if (entry->pos > zone->pos) {
result = entry; result = entry;
} else { } else {
if (!result) { if (!result) {
result = CIRCLEQ_FIRST(&band->chunks); result = CIRCLEQ_FIRST(&band->zones);
} }
break; break;
} }

View File

@ -37,6 +37,7 @@
#include "spdk/stdinc.h" #include "spdk/stdinc.h"
#include "spdk/bit_array.h" #include "spdk/bit_array.h"
#include "spdk/queue.h" #include "spdk/queue.h"
#include "spdk/bdev_zone.h"
#include "ftl_io.h" #include "ftl_io.h"
#include "ftl_ppa.h" #include "ftl_ppa.h"
@ -48,17 +49,9 @@
struct spdk_ftl_dev; struct spdk_ftl_dev;
struct ftl_lba_map_request; struct ftl_lba_map_request;
enum ftl_chunk_state { struct ftl_zone {
FTL_CHUNK_STATE_FREE, /* Zone state */
FTL_CHUNK_STATE_OPEN, enum spdk_bdev_zone_state state;
FTL_CHUNK_STATE_CLOSED,
FTL_CHUNK_STATE_BAD,
FTL_CHUNK_STATE_VACANT,
};
struct ftl_chunk {
/* Block state */
enum ftl_chunk_state state;
/* Indicates that there is inflight write */ /* Indicates that there is inflight write */
bool busy; bool busy;
@ -72,10 +65,10 @@ struct ftl_chunk {
/* Pointer to parallel unit */ /* Pointer to parallel unit */
struct ftl_punit *punit; struct ftl_punit *punit;
/* Position in band's chunk_buf */ /* Position in band's zone_buf */
uint32_t pos; uint32_t pos;
CIRCLEQ_ENTRY(ftl_chunk) circleq; CIRCLEQ_ENTRY(ftl_zone) circleq;
}; };
enum ftl_md_status { enum ftl_md_status {
@ -154,14 +147,14 @@ struct ftl_band {
/* Device this band belongs to */ /* Device this band belongs to */
struct spdk_ftl_dev *dev; struct spdk_ftl_dev *dev;
/* Number of operational chunks */ /* Number of operational zones */
size_t num_chunks; size_t num_zones;
/* Array of chunks */ /* Array of zones */
struct ftl_chunk *chunk_buf; struct ftl_zone *zone_buf;
/* List of operational chunks */ /* List of operational zones */
CIRCLEQ_HEAD(, ftl_chunk) chunks; CIRCLEQ_HEAD(, ftl_zone) zones;
/* LBA map */ /* LBA map */
struct ftl_lba_map lba_map; struct ftl_lba_map lba_map;
@ -223,7 +216,7 @@ size_t ftl_band_user_lbks(const struct ftl_band *band);
void ftl_band_set_addr(struct ftl_band *band, uint64_t lba, void ftl_band_set_addr(struct ftl_band *band, uint64_t lba,
struct ftl_ppa ppa); struct ftl_ppa ppa);
struct ftl_band *ftl_band_from_ppa(struct spdk_ftl_dev *dev, struct ftl_ppa ppa); struct ftl_band *ftl_band_from_ppa(struct spdk_ftl_dev *dev, struct ftl_ppa ppa);
struct ftl_chunk *ftl_band_chunk_from_ppa(struct ftl_band *band, struct ftl_ppa); struct ftl_zone *ftl_band_zone_from_ppa(struct ftl_band *band, struct ftl_ppa);
void ftl_band_md_clear(struct ftl_band *band); void ftl_band_md_clear(struct ftl_band *band);
int ftl_band_read_tail_md(struct ftl_band *band, struct ftl_ppa, int ftl_band_read_tail_md(struct ftl_band *band, struct ftl_ppa,
ftl_io_fn cb_fn, void *cb_ctx); ftl_io_fn cb_fn, void *cb_ctx);
@ -236,8 +229,8 @@ void ftl_band_write_failed(struct ftl_band *band);
int ftl_band_full(struct ftl_band *band, size_t offset); int ftl_band_full(struct ftl_band *band, size_t offset);
int ftl_band_erase(struct ftl_band *band); int ftl_band_erase(struct ftl_band *band);
int ftl_band_write_prep(struct ftl_band *band); int ftl_band_write_prep(struct ftl_band *band);
struct ftl_chunk *ftl_band_next_operational_chunk(struct ftl_band *band, struct ftl_zone *ftl_band_next_operational_zone(struct ftl_band *band,
struct ftl_chunk *chunk); struct ftl_zone *zone);
size_t ftl_lba_map_pool_elem_size(struct spdk_ftl_dev *dev); size_t ftl_lba_map_pool_elem_size(struct spdk_ftl_dev *dev);
static inline int static inline int
@ -246,11 +239,11 @@ ftl_band_empty(const struct ftl_band *band)
return band->lba_map.num_vld == 0; return band->lba_map.num_vld == 0;
} }
static inline struct ftl_chunk * static inline struct ftl_zone *
ftl_band_next_chunk(struct ftl_band *band, struct ftl_chunk *chunk) ftl_band_next_zone(struct ftl_band *band, struct ftl_zone *zone)
{ {
assert(chunk->state != FTL_CHUNK_STATE_BAD); assert(zone->state != SPDK_BDEV_ZONE_STATE_OFFLINE);
return CIRCLEQ_LOOP_NEXT(&band->chunks, chunk, circleq); return CIRCLEQ_LOOP_NEXT(&band->zones, zone, circleq);
} }
static inline void static inline void
@ -282,23 +275,23 @@ ftl_band_lbkoff_valid(struct ftl_band *band, size_t lbkoff)
} }
static inline int static inline int
ftl_band_chunk_is_last(struct ftl_band *band, struct ftl_chunk *chunk) ftl_band_zone_is_last(struct ftl_band *band, struct ftl_zone *zone)
{ {
return chunk == CIRCLEQ_LAST(&band->chunks); return zone == CIRCLEQ_LAST(&band->zones);
} }
static inline int static inline int
ftl_band_chunk_is_first(struct ftl_band *band, struct ftl_chunk *chunk) ftl_band_zone_is_first(struct ftl_band *band, struct ftl_zone *zone)
{ {
return chunk == CIRCLEQ_FIRST(&band->chunks); return zone == CIRCLEQ_FIRST(&band->zones);
} }
static inline int static inline int
ftl_chunk_is_writable(const struct ftl_chunk *chunk) ftl_zone_is_writable(const struct ftl_zone *zone)
{ {
return (chunk->state == FTL_CHUNK_STATE_OPEN || return (zone->state == SPDK_BDEV_ZONE_STATE_OPEN ||
chunk->state == FTL_CHUNK_STATE_FREE) && zone->state == SPDK_BDEV_ZONE_STATE_EMPTY) &&
!chunk->busy; !zone->busy;
} }
#endif /* FTL_BAND_H */ #endif /* FTL_BAND_H */

View File

@ -74,8 +74,8 @@ struct ftl_wptr {
/* Current logical block's offset */ /* Current logical block's offset */
uint64_t offset; uint64_t offset;
/* Current erase block */ /* Current zone */
struct ftl_chunk *chunk; struct ftl_zone *zone;
/* Pending IO queue */ /* Pending IO queue */
TAILQ_HEAD(, ftl_io) pending_queue; TAILQ_HEAD(, ftl_io) pending_queue;
@ -295,7 +295,7 @@ ftl_ppa_read_next_ppa(struct ftl_io *io, struct ftl_ppa *ppa)
assert(!ftl_ppa_invalid(*ppa)); assert(!ftl_ppa_invalid(*ppa));
/* Metadata has to be read in the way it's written (jumping across */ /* Metadata has to be read in the way it's written (jumping across */
/* the chunks in xfer_size increments) */ /* the zones in xfer_size increments) */
if (io->flags & FTL_IO_MD) { if (io->flags & FTL_IO_MD) {
max_lbks = dev->xfer_size - (ppa->lbk % dev->xfer_size); max_lbks = dev->xfer_size - (ppa->lbk % dev->xfer_size);
lbk_cnt = spdk_min(ftl_io_iovec_len_left(io), max_lbks); lbk_cnt = spdk_min(ftl_io_iovec_len_left(io), max_lbks);
@ -322,7 +322,7 @@ ftl_wptr_open_band(struct ftl_wptr *wptr)
{ {
struct ftl_band *band = wptr->band; struct ftl_band *band = wptr->band;
assert(ftl_band_chunk_is_first(band, wptr->chunk)); assert(ftl_band_zone_is_first(band, wptr->zone));
assert(band->lba_map.num_vld == 0); assert(band->lba_map.num_vld == 0);
ftl_band_clear_lba_map(band); ftl_band_clear_lba_map(band);
@ -339,17 +339,16 @@ ftl_submit_erase(struct ftl_io *io)
struct spdk_ftl_dev *dev = io->dev; struct spdk_ftl_dev *dev = io->dev;
struct ftl_band *band = io->band; struct ftl_band *band = io->band;
struct ftl_ppa ppa = io->ppa; struct ftl_ppa ppa = io->ppa;
struct ftl_chunk *chunk; struct ftl_zone *zone;
uint64_t ppa_packed; uint64_t ppa_packed;
int rc = 0; int rc = 0;
size_t i; size_t i;
for (i = 0; i < io->lbk_cnt; ++i) { for (i = 0; i < io->lbk_cnt; ++i) {
if (i != 0) { if (i != 0) {
chunk = ftl_band_next_chunk(band, ftl_band_chunk_from_ppa(band, ppa)); zone = ftl_band_next_zone(band, ftl_band_zone_from_ppa(band, ppa));
assert(chunk->state == FTL_CHUNK_STATE_CLOSED || assert(zone->state == SPDK_BDEV_ZONE_STATE_CLOSED);
chunk->state == FTL_CHUNK_STATE_VACANT); ppa = zone->start_ppa;
ppa = chunk->start_ppa;
} }
assert(ppa.lbk == 0); assert(ppa.lbk == 0);
@ -474,8 +473,8 @@ ftl_wptr_init(struct ftl_band *band)
wptr->dev = dev; wptr->dev = dev;
wptr->band = band; wptr->band = band;
wptr->chunk = CIRCLEQ_FIRST(&band->chunks); wptr->zone = CIRCLEQ_FIRST(&band->zones);
wptr->ppa = wptr->chunk->start_ppa; wptr->ppa = wptr->zone->start_ppa;
TAILQ_INIT(&wptr->pending_queue); TAILQ_INIT(&wptr->pending_queue);
return wptr; return wptr;
@ -580,13 +579,13 @@ ftl_wptr_advance(struct ftl_wptr *wptr, size_t xfer_size)
ftl_band_set_state(band, FTL_BAND_STATE_FULL); ftl_band_set_state(band, FTL_BAND_STATE_FULL);
} }
wptr->chunk->busy = true; wptr->zone->busy = true;
wptr->ppa = ftl_band_next_xfer_ppa(band, wptr->ppa, xfer_size); wptr->ppa = ftl_band_next_xfer_ppa(band, wptr->ppa, xfer_size);
wptr->chunk = ftl_band_next_operational_chunk(band, wptr->chunk); wptr->zone = ftl_band_next_operational_zone(band, wptr->zone);
assert(!ftl_ppa_invalid(wptr->ppa)); assert(!ftl_ppa_invalid(wptr->ppa));
SPDK_DEBUGLOG(SPDK_LOG_FTL_CORE, "wptr: grp:%d, pu:%d chunk:%d, lbk:%u\n", SPDK_DEBUGLOG(SPDK_LOG_FTL_CORE, "wptr: grp:%d, pu:%d zone:%d, lbk:%u\n",
wptr->ppa.grp, wptr->ppa.pu, wptr->ppa.chk, wptr->ppa.lbk); wptr->ppa.grp, wptr->ppa.pu, wptr->ppa.chk, wptr->ppa.lbk);
if (wptr->offset >= next_thld && !dev->next_band) { if (wptr->offset >= next_thld && !dev->next_band) {
@ -607,9 +606,9 @@ ftl_wptr_ready(struct ftl_wptr *wptr)
/* TODO: add handling of empty bands */ /* TODO: add handling of empty bands */
if (spdk_unlikely(!ftl_chunk_is_writable(wptr->chunk))) { if (spdk_unlikely(!ftl_zone_is_writable(wptr->zone))) {
/* Erasing band may fail after it was assigned to wptr. */ /* Erasing band may fail after it was assigned to wptr. */
if (spdk_unlikely(wptr->chunk->state == FTL_CHUNK_STATE_BAD)) { if (spdk_unlikely(wptr->zone->state == SPDK_BDEV_ZONE_STATE_OFFLINE)) {
ftl_wptr_advance(wptr, wptr->dev->xfer_size); ftl_wptr_advance(wptr, wptr->dev->xfer_size);
} }
return 0; return 0;
@ -1477,14 +1476,14 @@ ftl_io_init_child_write(struct ftl_io *parent, struct ftl_ppa ppa,
static void static void
ftl_io_child_write_cb(struct ftl_io *io, void *ctx, int status) ftl_io_child_write_cb(struct ftl_io *io, void *ctx, int status)
{ {
struct ftl_chunk *chunk; struct ftl_zone *zone;
struct ftl_wptr *wptr; struct ftl_wptr *wptr;
chunk = ftl_band_chunk_from_ppa(io->band, io->ppa); zone = ftl_band_zone_from_ppa(io->band, io->ppa);
wptr = ftl_wptr_from_band(io->band); wptr = ftl_wptr_from_band(io->band);
chunk->busy = false; zone->busy = false;
chunk->write_offset += io->lbk_cnt; zone->write_offset += io->lbk_cnt;
/* If some other write on the same band failed the write pointer would already be freed */ /* If some other write on the same band failed the write pointer would already be freed */
if (spdk_likely(wptr)) { if (spdk_likely(wptr)) {
@ -1508,7 +1507,7 @@ ftl_submit_child_write(struct ftl_wptr *wptr, struct ftl_io *io, int lbk_cnt)
ppa = io->ppa; ppa = io->ppa;
} }
/* Split IO to child requests and release chunk immediately after child is completed */ /* Split IO to child requests and release zone immediately after child is completed */
child = ftl_io_init_child_write(io, ppa, ftl_io_iovec_addr(io), child = ftl_io_init_child_write(io, ppa, ftl_io_iovec_addr(io),
ftl_io_get_md(io), ftl_io_child_write_cb); ftl_io_get_md(io), ftl_io_child_write_cb);
if (!child) { if (!child) {
@ -1545,8 +1544,8 @@ ftl_submit_write(struct ftl_wptr *wptr, struct ftl_io *io)
while (io->iov_pos < io->iov_cnt) { while (io->iov_pos < io->iov_cnt) {
/* There are no guarantees of the order of completion of NVMe IO submission queue */ /* There are no guarantees of the order of completion of NVMe IO submission queue */
/* so wait until chunk is not busy before submitting another write */ /* so wait until zone is not busy before submitting another write */
if (wptr->chunk->busy) { if (wptr->zone->busy) {
TAILQ_INSERT_TAIL(&wptr->pending_queue, io, retry_entry); TAILQ_INSERT_TAIL(&wptr->pending_queue, io, retry_entry);
rc = -EAGAIN; rc = -EAGAIN;
break; break;
@ -1888,8 +1887,8 @@ spdk_ftl_dev_get_attrs(const struct spdk_ftl_dev *dev, struct spdk_ftl_attrs *at
attrs->lbk_size = FTL_BLOCK_SIZE; attrs->lbk_size = FTL_BLOCK_SIZE;
attrs->range = dev->range; attrs->range = dev->range;
attrs->cache_bdev_desc = dev->nv_cache.bdev_desc; attrs->cache_bdev_desc = dev->nv_cache.bdev_desc;
attrs->num_chunks = dev->geo.num_chk; attrs->num_zones = dev->geo.num_chk;
attrs->chunk_size = dev->geo.clba; attrs->zone_size = dev->geo.clba;
attrs->conf = dev->conf; attrs->conf = dev->conf;
} }
@ -2150,9 +2149,9 @@ ftl_process_anm_event(struct ftl_anm_event *event)
bool bool
ftl_ppa_is_written(struct ftl_band *band, struct ftl_ppa ppa) ftl_ppa_is_written(struct ftl_band *band, struct ftl_ppa ppa)
{ {
struct ftl_chunk *chunk = ftl_band_chunk_from_ppa(band, ppa); struct ftl_zone *zone = ftl_band_zone_from_ppa(band, ppa);
return ppa.lbk < chunk->write_offset; return ppa.lbk < zone->write_offset;
} }
static void static void

View File

@ -52,7 +52,7 @@
struct spdk_ftl_dev; struct spdk_ftl_dev;
struct ftl_band; struct ftl_band;
struct ftl_chunk; struct ftl_zone;
struct ftl_io; struct ftl_io;
struct ftl_restore; struct ftl_restore;
struct ftl_wptr; struct ftl_wptr;
@ -484,7 +484,7 @@ ftl_dev_num_bands(const struct spdk_ftl_dev *dev)
} }
static inline size_t static inline size_t
ftl_dev_lbks_in_chunk(const struct spdk_ftl_dev *dev) ftl_dev_lbks_in_zone(const struct spdk_ftl_dev *dev)
{ {
return dev->geo.clba; return dev->geo.clba;
} }
@ -498,7 +498,7 @@ ftl_dev_num_punits(const struct spdk_ftl_dev *dev)
static inline uint64_t static inline uint64_t
ftl_num_band_lbks(const struct spdk_ftl_dev *dev) ftl_num_band_lbks(const struct spdk_ftl_dev *dev)
{ {
return ftl_dev_num_punits(dev) * ftl_dev_lbks_in_chunk(dev); return ftl_dev_num_punits(dev) * ftl_dev_lbks_in_zone(dev);
} }
static inline size_t static inline size_t

View File

@ -107,17 +107,17 @@ ftl_dev_dump_bands(struct spdk_ftl_dev *dev)
continue; continue;
} }
if (!dev->bands[i].num_chunks) { if (!dev->bands[i].num_zones) {
ftl_debug(" Band %3zu: all chunks are offline\n", i + 1); ftl_debug(" Band %3zu: all zones are offline\n", i + 1);
continue; continue;
} }
total += dev->bands[i].lba_map.num_vld; total += dev->bands[i].lba_map.num_vld;
ftl_debug(" Band %3zu: %8zu / %zu \tnum_chunks: %zu \twr_cnt: %"PRIu64"\tmerit:" ftl_debug(" Band %3zu: %8zu / %zu \tnum_zones: %zu \twr_cnt: %"PRIu64"\tmerit:"
"%10.3f\tstate: %s\n", "%10.3f\tstate: %s\n",
i + 1, dev->bands[i].lba_map.num_vld, i + 1, dev->bands[i].lba_map.num_vld,
ftl_band_user_lbks(&dev->bands[i]), ftl_band_user_lbks(&dev->bands[i]),
dev->bands[i].num_chunks, dev->bands[i].num_zones,
dev->bands[i].wr_cnt, dev->bands[i].wr_cnt,
dev->bands[i].merit, dev->bands[i].merit,
ftl_band_state_str[dev->bands[i].state]); ftl_band_state_str[dev->bands[i].state]);

View File

@ -40,6 +40,7 @@
#include "spdk/ftl.h" #include "spdk/ftl.h"
#include "spdk/likely.h" #include "spdk/likely.h"
#include "spdk/string.h" #include "spdk/string.h"
#include "spdk/bdev_zone.h"
#include "ftl_core.h" #include "ftl_core.h"
#include "ftl_anm.h" #include "ftl_anm.h"
@ -259,26 +260,26 @@ ftl_retrieve_punit_chunk_info(struct spdk_ftl_dev *dev, const struct ftl_punit *
} }
static unsigned char static unsigned char
ftl_get_chunk_state(const struct spdk_ocssd_chunk_information_entry *info) ftl_get_zone_state(const struct spdk_ocssd_chunk_information_entry *info)
{ {
if (info->cs.free) { if (info->cs.free) {
return FTL_CHUNK_STATE_FREE; return SPDK_BDEV_ZONE_STATE_EMPTY;
} }
if (info->cs.open) { if (info->cs.open) {
return FTL_CHUNK_STATE_OPEN; return SPDK_BDEV_ZONE_STATE_OPEN;
} }
if (info->cs.closed) { if (info->cs.closed) {
return FTL_CHUNK_STATE_CLOSED; return SPDK_BDEV_ZONE_STATE_CLOSED;
} }
if (info->cs.offline) { if (info->cs.offline) {
return FTL_CHUNK_STATE_BAD; return SPDK_BDEV_ZONE_STATE_OFFLINE;
} }
assert(0 && "Invalid block state"); assert(0 && "Invalid block state");
return FTL_CHUNK_STATE_BAD; return SPDK_BDEV_ZONE_STATE_OFFLINE;
} }
static void static void
@ -289,7 +290,7 @@ ftl_remove_empty_bands(struct spdk_ftl_dev *dev)
/* Remove band from shut_bands list to prevent further processing */ /* Remove band from shut_bands list to prevent further processing */
/* if all blocks on this band are bad */ /* if all blocks on this band are bad */
LIST_FOREACH_SAFE(band, &dev->shut_bands, list_entry, temp_band) { LIST_FOREACH_SAFE(band, &dev->shut_bands, list_entry, temp_band) {
if (!band->num_chunks) { if (!band->num_zones) {
dev->num_bands--; dev->num_bands--;
LIST_REMOVE(band, list_entry); LIST_REMOVE(band, list_entry);
} }
@ -302,7 +303,7 @@ ftl_dev_init_bands(struct spdk_ftl_dev *dev)
struct spdk_ocssd_chunk_information_entry *info; struct spdk_ocssd_chunk_information_entry *info;
struct ftl_band *band, *pband; struct ftl_band *band, *pband;
struct ftl_punit *punit; struct ftl_punit *punit;
struct ftl_chunk *chunk; struct ftl_zone *zone;
unsigned int i, j; unsigned int i, j;
char buf[128]; char buf[128];
int rc = 0; int rc = 0;
@ -335,9 +336,9 @@ ftl_dev_init_bands(struct spdk_ftl_dev *dev)
} }
pband = band; pband = band;
CIRCLEQ_INIT(&band->chunks); CIRCLEQ_INIT(&band->zones);
band->chunk_buf = calloc(ftl_dev_num_punits(dev), sizeof(*band->chunk_buf)); band->zone_buf = calloc(ftl_dev_num_punits(dev), sizeof(*band->zone_buf));
if (!band->chunk_buf) { if (!band->zone_buf) {
SPDK_ERRLOG("Failed to allocate block state table for band: [%u]\n", i); SPDK_ERRLOG("Failed to allocate block state table for band: [%u]\n", i);
rc = -1; rc = -1;
goto out; goto out;
@ -369,17 +370,17 @@ ftl_dev_init_bands(struct spdk_ftl_dev *dev)
for (j = 0; j < ftl_dev_num_bands(dev); ++j) { for (j = 0; j < ftl_dev_num_bands(dev); ++j) {
band = &dev->bands[j]; band = &dev->bands[j];
chunk = &band->chunk_buf[i]; zone = &band->zone_buf[i];
chunk->pos = i; zone->pos = i;
chunk->state = ftl_get_chunk_state(&info[j]); zone->state = ftl_get_zone_state(&info[j]);
chunk->punit = punit; zone->punit = punit;
chunk->start_ppa = punit->start_ppa; zone->start_ppa = punit->start_ppa;
chunk->start_ppa.chk = band->id; zone->start_ppa.chk = band->id;
chunk->write_offset = ftl_dev_lbks_in_chunk(dev); zone->write_offset = ftl_dev_lbks_in_zone(dev);
if (chunk->state != FTL_CHUNK_STATE_BAD) { if (zone->state != SPDK_BDEV_ZONE_STATE_OFFLINE) {
band->num_chunks++; band->num_zones++;
CIRCLEQ_INSERT_TAIL(&band->chunks, chunk, circleq); CIRCLEQ_INSERT_TAIL(&band->zones, zone, circleq);
} }
} }
} }
@ -693,7 +694,7 @@ ftl_init_num_free_bands(struct spdk_ftl_dev *dev)
int cnt = 0; int cnt = 0;
LIST_FOREACH(band, &dev->shut_bands, list_entry) { LIST_FOREACH(band, &dev->shut_bands, list_entry) {
if (band->num_chunks && !band->lba_map.num_vld) { if (band->num_zones && !band->lba_map.num_vld) {
cnt++; cnt++;
} }
} }
@ -1246,7 +1247,7 @@ ftl_dev_free_sync(struct spdk_ftl_dev *dev)
if (dev->bands) { if (dev->bands) {
for (i = 0; i < ftl_dev_num_bands(dev); ++i) { for (i = 0; i < ftl_dev_num_bands(dev); ++i) {
free(dev->bands[i].chunk_buf); free(dev->bands[i].zone_buf);
spdk_bit_array_free(&dev->bands[i].lba_map.vld); spdk_bit_array_free(&dev->bands[i].lba_map.vld);
spdk_bit_array_free(&dev->bands[i].reloc_bitmap); spdk_bit_array_free(&dev->bands[i].reloc_bitmap);
} }

View File

@ -100,11 +100,11 @@ struct ftl_band_reloc {
/* Reloc map iterator */ /* Reloc map iterator */
struct { struct {
/* Array of chunk offsets */ /* Array of zone offsets */
size_t *chk_offset; size_t *zone_offset;
/* Currently chunk */ /* Current zone */
size_t chk_current; size_t zone_current;
} iter; } iter;
/* Number of outstanding moves */ /* Number of outstanding moves */
@ -160,19 +160,19 @@ ftl_reloc_is_defrag_active(const struct ftl_reloc *reloc)
} }
static size_t static size_t
ftl_reloc_iter_chk_offset(struct ftl_band_reloc *breloc) ftl_reloc_iter_zone_offset(struct ftl_band_reloc *breloc)
{ {
size_t chunk = breloc->iter.chk_current; size_t zone = breloc->iter.zone_current;
return breloc->iter.chk_offset[chunk]; return breloc->iter.zone_offset[zone];
} }
static size_t static size_t
ftl_reloc_iter_chk_done(struct ftl_band_reloc *breloc) ftl_reloc_iter_zone_done(struct ftl_band_reloc *breloc)
{ {
size_t num_lbks = ftl_dev_lbks_in_chunk(breloc->parent->dev); size_t num_lbks = ftl_dev_lbks_in_zone(breloc->parent->dev);
return ftl_reloc_iter_chk_offset(breloc) == num_lbks; return ftl_reloc_iter_zone_offset(breloc) == num_lbks;
} }
static void static void
@ -298,25 +298,25 @@ ftl_reloc_read_cb(struct ftl_io *io, void *arg, int status)
static void static void
ftl_reloc_iter_reset(struct ftl_band_reloc *breloc) ftl_reloc_iter_reset(struct ftl_band_reloc *breloc)
{ {
memset(breloc->iter.chk_offset, 0, ftl_dev_num_punits(breloc->band->dev) * memset(breloc->iter.zone_offset, 0, ftl_dev_num_punits(breloc->band->dev) *
sizeof(*breloc->iter.chk_offset)); sizeof(*breloc->iter.zone_offset));
breloc->iter.chk_current = 0; breloc->iter.zone_current = 0;
} }
static size_t static size_t
ftl_reloc_iter_lbkoff(struct ftl_band_reloc *breloc) ftl_reloc_iter_lbkoff(struct ftl_band_reloc *breloc)
{ {
size_t chk_offset = breloc->iter.chk_current * ftl_dev_lbks_in_chunk(breloc->parent->dev); size_t zone_offset = breloc->iter.zone_current * ftl_dev_lbks_in_zone(breloc->parent->dev);
return breloc->iter.chk_offset[breloc->iter.chk_current] + chk_offset; return breloc->iter.zone_offset[breloc->iter.zone_current] + zone_offset;
} }
static void static void
ftl_reloc_iter_next_chk(struct ftl_band_reloc *breloc) ftl_reloc_iter_next_zone(struct ftl_band_reloc *breloc)
{ {
size_t num_chk = ftl_dev_num_punits(breloc->band->dev); size_t num_zones = ftl_dev_num_punits(breloc->band->dev);
breloc->iter.chk_current = (breloc->iter.chk_current + 1) % num_chk; breloc->iter.zone_current = (breloc->iter.zone_current + 1) % num_zones;
} }
static int static int
@ -332,15 +332,15 @@ ftl_reloc_lbk_valid(struct ftl_band_reloc *breloc, size_t lbkoff)
static int static int
ftl_reloc_iter_next(struct ftl_band_reloc *breloc, size_t *lbkoff) ftl_reloc_iter_next(struct ftl_band_reloc *breloc, size_t *lbkoff)
{ {
size_t chunk = breloc->iter.chk_current; size_t zone = breloc->iter.zone_current;
*lbkoff = ftl_reloc_iter_lbkoff(breloc); *lbkoff = ftl_reloc_iter_lbkoff(breloc);
if (ftl_reloc_iter_chk_done(breloc)) { if (ftl_reloc_iter_zone_done(breloc)) {
return 0; return 0;
} }
breloc->iter.chk_offset[chunk]++; breloc->iter.zone_offset[zone]++;
if (!ftl_reloc_lbk_valid(breloc, *lbkoff)) { if (!ftl_reloc_lbk_valid(breloc, *lbkoff)) {
ftl_reloc_clr_lbk(breloc, *lbkoff); ftl_reloc_clr_lbk(breloc, *lbkoff);
@ -353,9 +353,9 @@ ftl_reloc_iter_next(struct ftl_band_reloc *breloc, size_t *lbkoff)
static int static int
ftl_reloc_first_valid_lbk(struct ftl_band_reloc *breloc, size_t *lbkoff) ftl_reloc_first_valid_lbk(struct ftl_band_reloc *breloc, size_t *lbkoff)
{ {
size_t i, num_lbks = ftl_dev_lbks_in_chunk(breloc->parent->dev); size_t i, num_lbks = ftl_dev_lbks_in_zone(breloc->parent->dev);
for (i = ftl_reloc_iter_chk_offset(breloc); i < num_lbks; ++i) { for (i = ftl_reloc_iter_zone_offset(breloc); i < num_lbks; ++i) {
if (ftl_reloc_iter_next(breloc, lbkoff)) { if (ftl_reloc_iter_next(breloc, lbkoff)) {
return 1; return 1;
} }
@ -368,11 +368,11 @@ static int
ftl_reloc_iter_done(struct ftl_band_reloc *breloc) ftl_reloc_iter_done(struct ftl_band_reloc *breloc)
{ {
size_t i; size_t i;
size_t num_chks = ftl_dev_num_punits(breloc->band->dev); size_t num_zones = ftl_dev_num_punits(breloc->band->dev);
size_t num_lbks = ftl_dev_lbks_in_chunk(breloc->parent->dev); size_t num_lbks = ftl_dev_lbks_in_zone(breloc->parent->dev);
for (i = 0; i < num_chks; ++i) { for (i = 0; i < num_zones; ++i) {
if (breloc->iter.chk_offset[i] != num_lbks) { if (breloc->iter.zone_offset[i] != num_lbks) {
return 0; return 0;
} }
} }
@ -409,7 +409,7 @@ ftl_reloc_next_lbks(struct ftl_band_reloc *breloc, struct ftl_ppa *ppa)
for (i = 0; i < ftl_dev_num_punits(dev); ++i) { for (i = 0; i < ftl_dev_num_punits(dev); ++i) {
lbk_cnt = ftl_reloc_find_valid_lbks(breloc, breloc->parent->xfer_size, ppa); lbk_cnt = ftl_reloc_find_valid_lbks(breloc, breloc->parent->xfer_size, ppa);
ftl_reloc_iter_next_chk(breloc); ftl_reloc_iter_next_zone(breloc);
if (lbk_cnt || ftl_reloc_iter_done(breloc)) { if (lbk_cnt || ftl_reloc_iter_done(breloc)) {
break; break;
@ -617,9 +617,9 @@ ftl_band_reloc_init(struct ftl_reloc *reloc, struct ftl_band_reloc *breloc,
return -1; return -1;
} }
breloc->iter.chk_offset = calloc(ftl_dev_num_punits(band->dev), breloc->iter.zone_offset = calloc(ftl_dev_num_punits(band->dev),
sizeof(*breloc->iter.chk_offset)); sizeof(*breloc->iter.zone_offset));
if (!breloc->iter.chk_offset) { if (!breloc->iter.zone_offset) {
SPDK_ERRLOG("Failed to initialize reloc iterator"); SPDK_ERRLOG("Failed to initialize reloc iterator");
return -1; return -1;
} }
@ -666,7 +666,7 @@ ftl_band_reloc_free(struct ftl_band_reloc *breloc)
spdk_ring_free(breloc->move_queue); spdk_ring_free(breloc->move_queue);
spdk_bit_array_free(&breloc->reloc_map); spdk_bit_array_free(&breloc->reloc_map);
free(breloc->iter.chk_offset); free(breloc->iter.zone_offset);
free(breloc->moves); free(breloc->moves);
} }

View File

@ -137,7 +137,7 @@ struct ftl_restore {
static int static int
ftl_restore_tail_md(struct ftl_restore_band *rband); ftl_restore_tail_md(struct ftl_restore_band *rband);
static void static void
ftl_pad_chunk_cb(struct ftl_io *io, void *arg, int status); ftl_pad_zone_cb(struct ftl_io *io, void *arg, int status);
static void static void
ftl_restore_pad_band(struct ftl_restore_band *rband); ftl_restore_pad_band(struct ftl_restore_band *rband);
@ -341,7 +341,7 @@ ftl_restore_head_md(void *ctx)
lba_map->dma_buf = restore->md_buf + i * ftl_head_md_num_lbks(dev) * FTL_BLOCK_SIZE; lba_map->dma_buf = restore->md_buf + i * ftl_head_md_num_lbks(dev) * FTL_BLOCK_SIZE;
if (ftl_band_read_head_md(rband->band, ftl_restore_head_cb, rband)) { if (ftl_band_read_head_md(rband->band, ftl_restore_head_cb, rband)) {
if (spdk_likely(rband->band->num_chunks)) { if (spdk_likely(rband->band->num_zones)) {
SPDK_ERRLOG("Failed to read metadata on band %zu\n", i); SPDK_ERRLOG("Failed to read metadata on band %zu\n", i);
rband->md_status = FTL_MD_INVALID_CRC; rband->md_status = FTL_MD_INVALID_CRC;
@ -419,7 +419,7 @@ ftl_restore_next_band(struct ftl_restore *restore)
for (; restore->current < ftl_dev_num_bands(restore->dev); ++restore->current) { for (; restore->current < ftl_dev_num_bands(restore->dev); ++restore->current) {
rband = &restore->bands[restore->current]; rband = &restore->bands[restore->current];
if (spdk_likely(rband->band->num_chunks) && if (spdk_likely(rband->band->num_zones) &&
rband->md_status == FTL_MD_SUCCESS) { rband->md_status == FTL_MD_SUCCESS) {
restore->current++; restore->current++;
return rband; return rband;
@ -1068,11 +1068,11 @@ ftl_restore_nv_cache(struct ftl_restore *restore, ftl_restore_fn cb)
} }
static bool static bool
ftl_pad_chunk_pad_finish(struct ftl_restore_band *rband, bool direct_access) ftl_pad_zone_pad_finish(struct ftl_restore_band *rband, bool direct_access)
{ {
struct ftl_restore *restore = rband->parent; struct ftl_restore *restore = rband->parent;
struct ftl_restore_band *next_band; struct ftl_restore_band *next_band;
size_t i, num_pad_chunks = 0; size_t i, num_pad_zones = 0;
if (spdk_unlikely(restore->pad_status && !restore->num_ios)) { if (spdk_unlikely(restore->pad_status && !restore->num_ios)) {
if (direct_access) { if (direct_access) {
@ -1086,14 +1086,14 @@ ftl_pad_chunk_pad_finish(struct ftl_restore_band *rband, bool direct_access)
return true; return true;
} }
for (i = 0; i < rband->band->num_chunks; ++i) { for (i = 0; i < rband->band->num_zones; ++i) {
if (rband->band->chunk_buf[i].state != FTL_CHUNK_STATE_CLOSED) { if (rband->band->zone_buf[i].state != SPDK_BDEV_ZONE_STATE_CLOSED) {
num_pad_chunks++; num_pad_zones++;
} }
} }
/* Finished all chunks in a band, check if all bands are done */ /* Finished all zones in a band, check if all bands are done */
if (num_pad_chunks == 0) { if (num_pad_zones == 0) {
if (direct_access) { if (direct_access) {
rband->band->state = FTL_BAND_STATE_CLOSED; rband->band->state = FTL_BAND_STATE_CLOSED;
ftl_band_set_direct_access(rband->band, false); ftl_band_set_direct_access(rband->band, false);
@ -1130,7 +1130,7 @@ ftl_restore_init_pad_io(struct ftl_restore_band *rband, void *buffer,
.flags = flags, .flags = flags,
.type = FTL_IO_WRITE, .type = FTL_IO_WRITE,
.lbk_cnt = dev->xfer_size, .lbk_cnt = dev->xfer_size,
.cb_fn = ftl_pad_chunk_cb, .cb_fn = ftl_pad_zone_cb,
.cb_ctx = rband, .cb_ctx = rband,
.data = buffer, .data = buffer,
.parent = NULL, .parent = NULL,
@ -1149,12 +1149,12 @@ ftl_restore_init_pad_io(struct ftl_restore_band *rband, void *buffer,
} }
static void static void
ftl_pad_chunk_cb(struct ftl_io *io, void *arg, int status) ftl_pad_zone_cb(struct ftl_io *io, void *arg, int status)
{ {
struct ftl_restore_band *rband = arg; struct ftl_restore_band *rband = arg;
struct ftl_restore *restore = rband->parent; struct ftl_restore *restore = rband->parent;
struct ftl_band *band = io->band; struct ftl_band *band = io->band;
struct ftl_chunk *chunk; struct ftl_zone *zone;
struct ftl_io *new_io; struct ftl_io *new_io;
restore->num_ios--; restore->num_ios--;
@ -1165,8 +1165,8 @@ ftl_pad_chunk_cb(struct ftl_io *io, void *arg, int status)
} }
if (io->ppa.lbk + io->lbk_cnt == band->dev->geo.clba) { if (io->ppa.lbk + io->lbk_cnt == band->dev->geo.clba) {
chunk = ftl_band_chunk_from_ppa(band, io->ppa); zone = ftl_band_zone_from_ppa(band, io->ppa);
chunk->state = FTL_CHUNK_STATE_CLOSED; zone->state = SPDK_BDEV_ZONE_STATE_CLOSED;
} else { } else {
struct ftl_ppa ppa = io->ppa; struct ftl_ppa ppa = io->ppa;
ppa.lbk += io->lbk_cnt; ppa.lbk += io->lbk_cnt;
@ -1182,7 +1182,7 @@ ftl_pad_chunk_cb(struct ftl_io *io, void *arg, int status)
end: end:
spdk_dma_free(io->iov[0].iov_base); spdk_dma_free(io->iov[0].iov_base);
ftl_pad_chunk_pad_finish(rband, true); ftl_pad_zone_pad_finish(rband, true);
} }
static void static void
@ -1198,8 +1198,8 @@ ftl_restore_pad_band(struct ftl_restore_band *rband)
size_t i; size_t i;
int rc = 0; int rc = 0;
/* Check if some chunks are not closed */ /* Check if some zones are not closed */
if (ftl_pad_chunk_pad_finish(rband, false)) { if (ftl_pad_zone_pad_finish(rband, false)) {
/* /*
* If we're here, end meta wasn't recognized, but the whole band is written * If we're here, end meta wasn't recognized, but the whole band is written
* Assume the band was padded and ignore it * Assume the band was padded and ignore it
@ -1214,16 +1214,16 @@ ftl_restore_pad_band(struct ftl_restore_band *rband)
return; return;
} }
for (i = 0; i < band->num_chunks; ++i) { for (i = 0; i < band->num_zones; ++i) {
if (band->chunk_buf[i].state == FTL_CHUNK_STATE_CLOSED) { if (band->zone_buf[i].state == SPDK_BDEV_ZONE_STATE_CLOSED) {
continue; continue;
} }
rc = ftl_retrieve_chunk_info(dev, band->chunk_buf[i].start_ppa, &info, 1); rc = ftl_retrieve_chunk_info(dev, band->zone_buf[i].start_ppa, &info, 1);
if (spdk_unlikely(rc)) { if (spdk_unlikely(rc)) {
goto error; goto error;
} }
ppa = band->chunk_buf[i].start_ppa; ppa = band->zone_buf[i].start_ppa;
ppa.lbk = info.wp; ppa.lbk = info.wp;
buffer = spdk_dma_zmalloc(FTL_BLOCK_SIZE * dev->xfer_size, 0, NULL); buffer = spdk_dma_zmalloc(FTL_BLOCK_SIZE * dev->xfer_size, 0, NULL);
@ -1246,7 +1246,7 @@ ftl_restore_pad_band(struct ftl_restore_band *rband)
error: error:
restore->pad_status = rc; restore->pad_status = rc;
ftl_pad_chunk_pad_finish(rband, true); ftl_pad_zone_pad_finish(rband, true);
} }
static void static void

View File

@ -474,8 +474,8 @@ bdev_ftl_dump_info_json(void *ctx, struct spdk_json_write_ctx *w)
spdk_json_write_named_object_begin(w, "ftl"); spdk_json_write_named_object_begin(w, "ftl");
_bdev_ftl_write_config_info(ftl_bdev, w); _bdev_ftl_write_config_info(ftl_bdev, w);
spdk_json_write_named_string_fmt(w, "num_chunks", "%zu", attrs.num_chunks); spdk_json_write_named_string_fmt(w, "num_zones", "%zu", attrs.num_zones);
spdk_json_write_named_string_fmt(w, "chunk_size", "%zu", attrs.chunk_size); spdk_json_write_named_string_fmt(w, "zone_size", "%zu", attrs.zone_size);
/* ftl */ /* ftl */
spdk_json_write_object_end(w); spdk_json_write_object_end(w);

View File

@ -87,7 +87,7 @@ struct ftl_band *
test_init_ftl_band(struct spdk_ftl_dev *dev, size_t id) test_init_ftl_band(struct spdk_ftl_dev *dev, size_t id)
{ {
struct ftl_band *band; struct ftl_band *band;
struct ftl_chunk *chunk; struct ftl_zone *zone;
SPDK_CU_ASSERT_FATAL(dev != NULL); SPDK_CU_ASSERT_FATAL(dev != NULL);
SPDK_CU_ASSERT_FATAL(id < dev->geo.num_chk); SPDK_CU_ASSERT_FATAL(id < dev->geo.num_chk);
@ -98,26 +98,26 @@ test_init_ftl_band(struct spdk_ftl_dev *dev, size_t id)
band->state = FTL_BAND_STATE_CLOSED; band->state = FTL_BAND_STATE_CLOSED;
LIST_INSERT_HEAD(&dev->shut_bands, band, list_entry); LIST_INSERT_HEAD(&dev->shut_bands, band, list_entry);
CIRCLEQ_INIT(&band->chunks); CIRCLEQ_INIT(&band->zones);
band->lba_map.vld = spdk_bit_array_create(ftl_num_band_lbks(dev)); band->lba_map.vld = spdk_bit_array_create(ftl_num_band_lbks(dev));
SPDK_CU_ASSERT_FATAL(band->lba_map.vld != NULL); SPDK_CU_ASSERT_FATAL(band->lba_map.vld != NULL);
band->chunk_buf = calloc(ftl_dev_num_punits(dev), sizeof(*band->chunk_buf)); band->zone_buf = calloc(ftl_dev_num_punits(dev), sizeof(*band->zone_buf));
SPDK_CU_ASSERT_FATAL(band->chunk_buf != NULL); SPDK_CU_ASSERT_FATAL(band->zone_buf != NULL);
band->reloc_bitmap = spdk_bit_array_create(ftl_dev_num_bands(dev)); band->reloc_bitmap = spdk_bit_array_create(ftl_dev_num_bands(dev));
SPDK_CU_ASSERT_FATAL(band->reloc_bitmap != NULL); SPDK_CU_ASSERT_FATAL(band->reloc_bitmap != NULL);
for (size_t i = 0; i < ftl_dev_num_punits(dev); ++i) { for (size_t i = 0; i < ftl_dev_num_punits(dev); ++i) {
chunk = &band->chunk_buf[i]; zone = &band->zone_buf[i];
chunk->pos = i; zone->pos = i;
chunk->state = FTL_CHUNK_STATE_CLOSED; zone->state = SPDK_BDEV_ZONE_STATE_CLOSED;
chunk->punit = &dev->punits[i]; zone->punit = &dev->punits[i];
chunk->start_ppa = dev->punits[i].start_ppa; zone->start_ppa = dev->punits[i].start_ppa;
chunk->start_ppa.chk = band->id; zone->start_ppa.chk = band->id;
CIRCLEQ_INSERT_TAIL(&band->chunks, chunk, circleq); CIRCLEQ_INSERT_TAIL(&band->zones, zone, circleq);
band->num_chunks++; band->num_zones++;
} }
pthread_spin_init(&band->lba_map.lock, PTHREAD_PROCESS_PRIVATE); pthread_spin_init(&band->lba_map.lock, PTHREAD_PROCESS_PRIVATE);
@ -143,7 +143,7 @@ test_free_ftl_band(struct ftl_band *band)
SPDK_CU_ASSERT_FATAL(band != NULL); SPDK_CU_ASSERT_FATAL(band != NULL);
spdk_bit_array_free(&band->lba_map.vld); spdk_bit_array_free(&band->lba_map.vld);
spdk_bit_array_free(&band->reloc_bitmap); spdk_bit_array_free(&band->reloc_bitmap);
free(band->chunk_buf); free(band->zone_buf);
spdk_dma_free(band->lba_map.dma_buf); spdk_dma_free(band->lba_map.dma_buf);
} }
@ -157,5 +157,5 @@ test_offset_from_ppa(struct ftl_ppa ppa, struct ftl_band *band)
punit = ftl_ppa_flatten_punit(dev, ppa); punit = ftl_ppa_flatten_punit(dev, ppa);
CU_ASSERT_EQUAL(ppa.chk, band->id); CU_ASSERT_EQUAL(ppa.chk, band->id);
return punit * ftl_dev_lbks_in_chunk(dev) + ppa.lbk; return punit * ftl_dev_lbks_in_zone(dev) + ppa.lbk;
} }

View File

@ -100,7 +100,7 @@ test_band_lbkoff_from_ppa_base(void)
ppa.chk = TEST_BAND_IDX; ppa.chk = TEST_BAND_IDX;
offset = ftl_band_lbkoff_from_ppa(g_band, ppa); offset = ftl_band_lbkoff_from_ppa(g_band, ppa);
CU_ASSERT_EQUAL(offset, flat_lun * ftl_dev_lbks_in_chunk(g_dev)); CU_ASSERT_EQUAL(offset, flat_lun * ftl_dev_lbks_in_zone(g_dev));
flat_lun++; flat_lun++;
} }
cleanup_band(); cleanup_band();
@ -234,7 +234,7 @@ test_next_xfer_ppa(void)
result = ftl_band_next_xfer_ppa(g_band, ppa, 1); result = ftl_band_next_xfer_ppa(g_band, ppa, 1);
CU_ASSERT_EQUAL(result.ppa, expect.ppa); CU_ASSERT_EQUAL(result.ppa, expect.ppa);
/* Verify jumping between chunks */ /* Verify jumping between zones */
expect = ppa_from_punit(g_range.begin + 1); expect = ppa_from_punit(g_range.begin + 1);
expect.chk = TEST_BAND_IDX; expect.chk = TEST_BAND_IDX;
result = ftl_band_next_xfer_ppa(g_band, ppa, g_dev->xfer_size); result = ftl_band_next_xfer_ppa(g_band, ppa, g_dev->xfer_size);
@ -247,7 +247,7 @@ test_next_xfer_ppa(void)
result = ftl_band_next_xfer_ppa(g_band, ppa, g_dev->xfer_size + 3); result = ftl_band_next_xfer_ppa(g_band, ppa, g_dev->xfer_size + 3);
CU_ASSERT_EQUAL(result.ppa, expect.ppa); CU_ASSERT_EQUAL(result.ppa, expect.ppa);
/* Verify jumping from last chunk to the first one */ /* Verify jumping from last zone to the first one */
expect = ppa_from_punit(g_range.begin); expect = ppa_from_punit(g_range.begin);
expect.chk = TEST_BAND_IDX; expect.chk = TEST_BAND_IDX;
expect.lbk = g_dev->xfer_size; expect.lbk = g_dev->xfer_size;
@ -256,7 +256,7 @@ test_next_xfer_ppa(void)
result = ftl_band_next_xfer_ppa(g_band, ppa, g_dev->xfer_size); result = ftl_band_next_xfer_ppa(g_band, ppa, g_dev->xfer_size);
CU_ASSERT_EQUAL(result.ppa, expect.ppa); CU_ASSERT_EQUAL(result.ppa, expect.ppa);
/* Verify jumping from last chunk to the first one with unaligned offset */ /* Verify jumping from last zone to the first one with unaligned offset */
expect = ppa_from_punit(g_range.begin); expect = ppa_from_punit(g_range.begin);
expect.chk = TEST_BAND_IDX; expect.chk = TEST_BAND_IDX;
expect.lbk = g_dev->xfer_size + 2; expect.lbk = g_dev->xfer_size + 2;
@ -276,10 +276,10 @@ test_next_xfer_ppa(void)
ftl_dev_num_punits(g_dev) + 3); ftl_dev_num_punits(g_dev) + 3);
CU_ASSERT_EQUAL(result.ppa, expect.ppa); CU_ASSERT_EQUAL(result.ppa, expect.ppa);
/* Remove one chunk and verify it's skipped properly */ /* Remove one zone and verify it's skipped properly */
g_band->chunk_buf[1].state = FTL_CHUNK_STATE_BAD; g_band->zone_buf[1].state = SPDK_BDEV_ZONE_STATE_OFFLINE;
CIRCLEQ_REMOVE(&g_band->chunks, &g_band->chunk_buf[1], circleq); CIRCLEQ_REMOVE(&g_band->zones, &g_band->zone_buf[1], circleq);
g_band->num_chunks--; g_band->num_zones--;
expect = ppa_from_punit(g_range.begin + 2); expect = ppa_from_punit(g_range.begin + 2);
expect.chk = TEST_BAND_IDX; expect.chk = TEST_BAND_IDX;
expect.lbk = g_dev->xfer_size * 5 + 4; expect.lbk = g_dev->xfer_size * 5 + 4;

View File

@ -116,9 +116,9 @@ ftl_band_ppa_from_lbkoff(struct ftl_band *band, uint64_t lbkoff)
struct spdk_ftl_dev *dev = band->dev; struct spdk_ftl_dev *dev = band->dev;
uint64_t punit; uint64_t punit;
punit = lbkoff / ftl_dev_lbks_in_chunk(dev) + dev->range.begin; punit = lbkoff / ftl_dev_lbks_in_zone(dev) + dev->range.begin;
ppa.lbk = lbkoff % ftl_dev_lbks_in_chunk(dev); ppa.lbk = lbkoff % ftl_dev_lbks_in_zone(dev);
ppa.chk = band->id; ppa.chk = band->id;
ppa.pu = punit / dev->geo.num_grp; ppa.pu = punit / dev->geo.num_grp;
ppa.grp = punit % dev->geo.num_grp; ppa.grp = punit % dev->geo.num_grp;
@ -283,7 +283,7 @@ test_reloc_iter_full(void)
CU_ASSERT_EQUAL(breloc->num_lbks, ftl_num_band_lbks(dev)); CU_ASSERT_EQUAL(breloc->num_lbks, ftl_num_band_lbks(dev));
num_iters = ftl_dev_num_punits(dev) * num_iters = ftl_dev_num_punits(dev) *
(ftl_dev_lbks_in_chunk(dev) / reloc->xfer_size); (ftl_dev_lbks_in_zone(dev) / reloc->xfer_size);
for (i = 0; i < num_iters; i++) { for (i = 0; i < num_iters; i++) {
num_lbks = ftl_reloc_next_lbks(breloc, &ppa); num_lbks = ftl_reloc_next_lbks(breloc, &ppa);
@ -293,10 +293,10 @@ test_reloc_iter_full(void)
num_iters = ftl_dev_num_punits(dev); num_iters = ftl_dev_num_punits(dev);
/* ftl_reloc_next_lbks is searching for maximum xfer_size */ /* ftl_reloc_next_lbks is searching for maximum xfer_size */
/* contiguous valid logic blocks in chunk, so we can end up */ /* contiguous valid logic blocks in zone, so we can end up */
/* with some reminder if number of logical blocks in chunk */ /* with some reminder if number of logical blocks in zone */
/* is not divisible by xfer_size */ /* is not divisible by xfer_size */
reminder = ftl_dev_lbks_in_chunk(dev) % reloc->xfer_size; reminder = ftl_dev_lbks_in_zone(dev) % reloc->xfer_size;
for (i = 0; i < num_iters; i++) { for (i = 0; i < num_iters; i++) {
num_lbks = ftl_reloc_next_lbks(breloc, &ppa); num_lbks = ftl_reloc_next_lbks(breloc, &ppa);
CU_ASSERT_EQUAL(reminder, num_lbks); CU_ASSERT_EQUAL(reminder, num_lbks);
@ -412,7 +412,7 @@ test_reloc_scatter_band(void)
} }
static void static void
test_reloc_chunk(void) test_reloc_zone(void)
{ {
struct spdk_ftl_dev *dev; struct spdk_ftl_dev *dev;
struct ftl_reloc *reloc; struct ftl_reloc *reloc;
@ -428,26 +428,24 @@ test_reloc_chunk(void)
band->high_prio = 1; band->high_prio = 1;
ftl_band_alloc_lba_map(band); ftl_band_alloc_lba_map(band);
num_io = MAX_RELOC_QDEPTH * reloc->xfer_size; num_io = MAX_RELOC_QDEPTH * reloc->xfer_size;
num_iters = ftl_dev_lbks_in_chunk(dev) / num_io; num_iters = ftl_dev_lbks_in_zone(dev) / num_io;
set_band_valid_map(band, 0, ftl_num_band_lbks(dev)); set_band_valid_map(band, 0, ftl_num_band_lbks(dev));
ftl_reloc_add(reloc, band, ftl_dev_lbks_in_chunk(dev) * 3, ftl_reloc_add(reloc, band, ftl_dev_lbks_in_zone(dev) * 3,
ftl_dev_lbks_in_chunk(dev), 1, false); ftl_dev_lbks_in_zone(dev), 1, false);
ftl_reloc_prep(breloc);
add_to_active_queue(reloc, breloc); add_to_active_queue(reloc, breloc);
CU_ASSERT_EQUAL(breloc->num_lbks, ftl_dev_lbks_in_chunk(dev)); CU_ASSERT_EQUAL(breloc->num_lbks, ftl_dev_lbks_in_zone(dev));
for (i = 1; i <= num_iters ; ++i) { for (i = 1; i <= num_iters ; ++i) {
single_reloc_move(breloc); single_reloc_move(breloc);
num_lbk = ftl_dev_lbks_in_chunk(dev) - (i * num_io); num_lbk = ftl_dev_lbks_in_zone(dev) - (i * num_io);
CU_ASSERT_EQUAL(breloc->num_lbks, num_lbk); CU_ASSERT_EQUAL(breloc->num_lbks, num_lbk);
} }
/* In case num_lbks_in_chunk % num_io != 0 one extra iteration is needed */ /* In case num_lbks_in_zone % num_io != 0 one extra iteration is needed */
single_reloc_move(breloc); single_reloc_move(breloc);
/* Drain move queue */ /* Drain move queue */
ftl_reloc_process_moves(breloc); ftl_reloc_process_moves(breloc);
@ -518,8 +516,8 @@ main(int argc, char **argv)
test_reloc_full_band) == NULL test_reloc_full_band) == NULL
|| CU_add_test(suite, "test_reloc_scatter_band", || CU_add_test(suite, "test_reloc_scatter_band",
test_reloc_scatter_band) == NULL test_reloc_scatter_band) == NULL
|| CU_add_test(suite, "test_reloc_chunk", || CU_add_test(suite, "test_reloc_zone",
test_reloc_chunk) == NULL test_reloc_zone) == NULL
|| CU_add_test(suite, "test_reloc_single_lbk", || CU_add_test(suite, "test_reloc_single_lbk",
test_reloc_single_lbk) == NULL test_reloc_single_lbk) == NULL
) { ) {

View File

@ -147,7 +147,7 @@ test_wptr(void)
struct ftl_band *band; struct ftl_band *band;
struct ftl_io io = { 0 }; struct ftl_io io = { 0 };
size_t xfer_size; size_t xfer_size;
size_t chunk, lbk, offset, i; size_t zone, lbk, offset, i;
int rc; int rc;
setup_wptr_test(&dev, &g_geo, &g_range); setup_wptr_test(&dev, &g_geo, &g_range);
@ -162,8 +162,8 @@ test_wptr(void)
io.band = band; io.band = band;
io.dev = dev; io.dev = dev;
for (lbk = 0, offset = 0; lbk < ftl_dev_lbks_in_chunk(dev) / xfer_size; ++lbk) { for (lbk = 0, offset = 0; lbk < ftl_dev_lbks_in_zone(dev) / xfer_size; ++lbk) {
for (chunk = 0; chunk < band->num_chunks; ++chunk) { for (zone = 0; zone < band->num_zones; ++zone) {
CU_ASSERT_EQUAL(wptr->ppa.lbk, (lbk * xfer_size)); CU_ASSERT_EQUAL(wptr->ppa.lbk, (lbk * xfer_size));
CU_ASSERT_EQUAL(wptr->offset, offset); CU_ASSERT_EQUAL(wptr->offset, offset);
ftl_wptr_advance(wptr, xfer_size); ftl_wptr_advance(wptr, xfer_size);
@ -172,7 +172,7 @@ test_wptr(void)
} }
CU_ASSERT_EQUAL(band->state, FTL_BAND_STATE_FULL); CU_ASSERT_EQUAL(band->state, FTL_BAND_STATE_FULL);
CU_ASSERT_EQUAL(wptr->ppa.lbk, ftl_dev_lbks_in_chunk(dev)); CU_ASSERT_EQUAL(wptr->ppa.lbk, ftl_dev_lbks_in_zone(dev));
ftl_band_set_state(band, FTL_BAND_STATE_CLOSING); ftl_band_set_state(band, FTL_BAND_STATE_CLOSING);