lib/ftl: Replace ftl_ppa struct with ftl_addr

FTL working on top of zoned bdev doesn't need
physical page address (PPA) anymore.
ftl_ppa was replaced with ftl_addr which represents
zoned device addressing schema.

Change-Id: Ied5750a7ab2f4ce42067ff3e69c1f26f85f5022a
Signed-off-by: Wojciech Malikowski <wojciech.malikowski@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/467633
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Konrad Sztyber <konrad.sztyber@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Community-CI: Broadcom SPDK FC-NVMe CI <spdk-ci.pdl@broadcom.com>
This commit is contained in:
Wojciech Malikowski 2019-09-06 08:08:03 -04:00 committed by Tomasz Zawadzki
parent 748785c2a9
commit e47e16d3a4
23 changed files with 605 additions and 603 deletions

View File

@ -31,13 +31,13 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/ */
#ifndef FTL_PPA_H #ifndef FTL_ADDR_H
#define FTL_PPA_H #define FTL_ADDR_H
#include "spdk/stdinc.h" #include "spdk/stdinc.h"
/* Marks PPA as invalid */ /* Marks address as invalid */
#define FTL_PPA_INVALID (-1) #define FTL_ADDR_INVALID (-1)
/* Marks LBA as invalid */ /* Marks LBA as invalid */
#define FTL_LBA_INVALID ((uint64_t)-1) #define FTL_LBA_INVALID ((uint64_t)-1)
/* Smallest data unit size */ /* Smallest data unit size */
@ -50,13 +50,13 @@
/* - packed version of the two formats above (can be only used when the */ /* - packed version of the two formats above (can be only used when the */
/* raw address can be represented in less than 32 bits) */ /* raw address can be represented in less than 32 bits) */
/* Packed format is used, when possible, to avoid wasting RAM on the L2P table. */ /* Packed format is used, when possible, to avoid wasting RAM on the L2P table. */
struct ftl_ppa { struct ftl_addr {
union { union {
struct { struct {
uint64_t offset : 32; uint64_t offset : 32;
uint64_t zone_id: 16; uint64_t zone_id : 16;
uint64_t pu : 15; uint64_t pu : 15;
uint64_t rsvd : 1; uint64_t rsvd : 1;
}; };
struct { struct {
@ -98,4 +98,4 @@ struct ftl_ppa_fmt {
unsigned int grp_mask; unsigned int grp_mask;
}; };
#endif /* FTL_PPA_H */ #endif /* FTL_ADDR_H */

View File

@ -118,7 +118,7 @@ ftl_anm_log_range(struct spdk_ocssd_chunk_notification_entry *log)
} }
static struct ftl_anm_event * static struct ftl_anm_event *
ftl_anm_event_alloc(struct spdk_ftl_dev *dev, struct ftl_ppa ppa, ftl_anm_event_alloc(struct spdk_ftl_dev *dev, struct ftl_addr addr,
enum ftl_anm_range range, size_t num_lbks) enum ftl_anm_range range, size_t num_lbks)
{ {
struct ftl_anm_event *event; struct ftl_anm_event *event;
@ -129,7 +129,7 @@ ftl_anm_event_alloc(struct spdk_ftl_dev *dev, struct ftl_ppa ppa,
} }
event->dev = dev; event->dev = dev;
event->ppa = ppa; event->addr = addr;
switch (range) { switch (range) {
case FTL_ANM_RANGE_LBK: case FTL_ANM_RANGE_LBK:
@ -152,7 +152,7 @@ ftl_anm_process_log(struct ftl_anm_poller *poller,
struct spdk_ocssd_chunk_notification_entry *log) struct spdk_ocssd_chunk_notification_entry *log)
{ {
struct ftl_anm_event *event; struct ftl_anm_event *event;
struct ftl_ppa ppa = ftl_ppa_addr_unpack(poller->dev, log->lba); struct ftl_addr addr = ftl_addr_addr_unpack(poller->dev, log->lba);
struct spdk_ftl_dev *dev = poller->dev; struct spdk_ftl_dev *dev = poller->dev;
enum ftl_anm_range range = ftl_anm_log_range(log); enum ftl_anm_range range = ftl_anm_log_range(log);
int i, num_bands = 1; int i, num_bands = 1;
@ -160,19 +160,19 @@ ftl_anm_process_log(struct ftl_anm_poller *poller,
num_bands = range != FTL_ANM_RANGE_PU ? 1 : ftl_dev_num_bands(dev); num_bands = range != FTL_ANM_RANGE_PU ? 1 : ftl_dev_num_bands(dev);
for (i = 0; i < num_bands; ++i) { for (i = 0; i < num_bands; ++i) {
struct ftl_zone *zone = ftl_band_zone_from_ppa(&dev->bands[i], ppa); struct ftl_zone *zone = ftl_band_zone_from_addr(&dev->bands[i], addr);
if (zone->state == SPDK_BDEV_ZONE_STATE_OFFLINE) { if (zone->state == SPDK_BDEV_ZONE_STATE_OFFLINE) {
continue; continue;
} }
event = ftl_anm_event_alloc(dev, ppa, range, log->nlb); event = ftl_anm_event_alloc(dev, addr, range, log->nlb);
if (!event) { if (!event) {
return -ENOMEM; return -ENOMEM;
} }
poller->fn(event); poller->fn(event);
ppa.zone_id++; addr.zone_id++;
} }
return 0; return 0;
@ -183,16 +183,16 @@ ftl_anm_in_poller_range(struct ftl_anm_poller *poller,
struct spdk_ocssd_chunk_notification_entry *log) struct spdk_ocssd_chunk_notification_entry *log)
{ {
struct spdk_ftl_dev *dev = poller->dev; struct spdk_ftl_dev *dev = poller->dev;
struct ftl_ppa ppa = ftl_ppa_addr_unpack(dev, log->lba); struct ftl_addr addr = ftl_addr_addr_unpack(dev, log->lba);
char buf[128]; char buf[128];
if (ppa.zone_id >= ftl_dev_num_bands(dev)) { if (addr.zone_id >= ftl_dev_num_bands(dev)) {
SPDK_ERRLOG("ANM log contains invalid @ppa: %s\n", SPDK_ERRLOG("ANM log contains invalid @addr: %s\n",
ftl_ppa2str(ppa, buf, sizeof(buf))); ftl_addr2str(addr, buf, sizeof(buf)));
return false; return false;
} }
if (!ftl_ppa_in_range(dev, ppa)) { if (!ftl_addr_in_range(dev, addr)) {
return false; return false;
} }

View File

@ -35,7 +35,7 @@
#define FTL_ANM_H #define FTL_ANM_H
#include "spdk/thread.h" #include "spdk/thread.h"
#include "ftl_ppa.h" #include "ftl_addr.h"
struct ftl_nvme_ctrlr; struct ftl_nvme_ctrlr;
struct ftl_anm_event; struct ftl_anm_event;
@ -54,8 +54,8 @@ struct ftl_anm_event {
/* Owner device */ /* Owner device */
struct spdk_ftl_dev *dev; struct spdk_ftl_dev *dev;
/* Start PPA */ /* First block address */
struct ftl_ppa ppa; struct ftl_addr addr;
/* Number of logical blocks */ /* Number of logical blocks */
size_t num_lbks; size_t num_lbks;

View File

@ -382,10 +382,10 @@ ftl_unpack_head_md(struct ftl_band *band)
return FTL_MD_SUCCESS; return FTL_MD_SUCCESS;
} }
struct ftl_ppa struct ftl_addr
ftl_band_tail_md_ppa(struct ftl_band *band) ftl_band_tail_md_addr(struct ftl_band *band)
{ {
struct ftl_ppa ppa = {}; struct ftl_addr addr = {};
struct ftl_zone *zone; struct ftl_zone *zone;
struct spdk_ftl_dev *dev = band->dev; struct spdk_ftl_dev *dev = band->dev;
size_t xfer_size = dev->xfer_size; size_t xfer_size = dev->xfer_size;
@ -393,7 +393,7 @@ ftl_band_tail_md_ppa(struct ftl_band *band)
size_t i; size_t i;
if (spdk_unlikely(!band->num_zones)) { if (spdk_unlikely(!band->num_zones)) {
return ftl_to_ppa(FTL_PPA_INVALID); return ftl_to_addr(FTL_ADDR_INVALID);
} }
/* Metadata should be aligned to xfer size */ /* Metadata should be aligned to xfer size */
@ -404,26 +404,26 @@ ftl_band_tail_md_ppa(struct ftl_band *band)
zone = ftl_band_next_zone(band, zone); zone = ftl_band_next_zone(band, zone);
} }
ppa.offset = (num_req / band->num_zones) * xfer_size; addr.offset = (num_req / band->num_zones) * xfer_size;
ppa.zone_id = band->id; addr.zone_id = band->id;
ppa.pu = zone->punit->start_ppa.pu; addr.pu = zone->punit->start_addr.pu;
return ppa; return addr;
} }
struct ftl_ppa struct ftl_addr
ftl_band_head_md_ppa(struct ftl_band *band) ftl_band_head_md_addr(struct ftl_band *band)
{ {
struct ftl_ppa ppa; struct ftl_addr addr;
if (spdk_unlikely(!band->num_zones)) { if (spdk_unlikely(!band->num_zones)) {
return ftl_to_ppa(FTL_PPA_INVALID); return ftl_to_addr(FTL_ADDR_INVALID);
} }
ppa = CIRCLEQ_FIRST(&band->zones)->punit->start_ppa; addr = CIRCLEQ_FIRST(&band->zones)->punit->start_addr;
ppa.zone_id = band->id; addr.zone_id = band->id;
return ppa; return addr;
} }
void void
@ -455,14 +455,14 @@ ftl_band_set_state(struct ftl_band *band, enum ftl_band_state state)
} }
void void
ftl_band_set_addr(struct ftl_band *band, uint64_t lba, struct ftl_ppa ppa) ftl_band_set_addr(struct ftl_band *band, uint64_t lba, struct ftl_addr addr)
{ {
struct ftl_lba_map *lba_map = &band->lba_map; struct ftl_lba_map *lba_map = &band->lba_map;
uint64_t offset; uint64_t offset;
assert(lba != FTL_LBA_INVALID); assert(lba != FTL_LBA_INVALID);
offset = ftl_band_lbkoff_from_ppa(band, ppa); offset = ftl_band_lbkoff_from_addr(band, addr);
pthread_spin_lock(&lba_map->lock); pthread_spin_lock(&lba_map->lock);
lba_map->num_vld++; lba_map->num_vld++;
@ -509,51 +509,51 @@ ftl_band_user_lbks(const struct ftl_band *band)
} }
struct ftl_band * struct ftl_band *
ftl_band_from_ppa(struct spdk_ftl_dev *dev, struct ftl_ppa ppa) ftl_band_from_addr(struct spdk_ftl_dev *dev, struct ftl_addr addr)
{ {
assert(ppa.zone_id < ftl_dev_num_bands(dev)); assert(addr.zone_id < ftl_dev_num_bands(dev));
return &dev->bands[ppa.zone_id]; return &dev->bands[addr.zone_id];
} }
struct ftl_zone * struct ftl_zone *
ftl_band_zone_from_ppa(struct ftl_band *band, struct ftl_ppa ppa) ftl_band_zone_from_addr(struct ftl_band *band, struct ftl_addr addr)
{ {
struct spdk_ftl_dev *dev = band->dev; struct spdk_ftl_dev *dev = band->dev;
unsigned int punit; unsigned int punit;
punit = ftl_ppa_flatten_punit(dev, ppa); punit = ftl_addr_flatten_punit(dev, addr);
assert(punit < ftl_dev_num_punits(dev)); assert(punit < ftl_dev_num_punits(dev));
return &band->zone_buf[punit]; return &band->zone_buf[punit];
} }
uint64_t uint64_t
ftl_band_lbkoff_from_ppa(struct ftl_band *band, struct ftl_ppa ppa) ftl_band_lbkoff_from_addr(struct ftl_band *band, struct ftl_addr addr)
{ {
struct spdk_ftl_dev *dev = band->dev; struct spdk_ftl_dev *dev = band->dev;
unsigned int punit; unsigned int punit;
punit = ftl_ppa_flatten_punit(dev, ppa); punit = ftl_addr_flatten_punit(dev, addr);
assert(ppa.zone_id == band->id); assert(addr.zone_id == band->id);
return punit * ftl_dev_lbks_in_zone(dev) + ppa.offset; return punit * ftl_dev_lbks_in_zone(dev) + addr.offset;
} }
struct ftl_ppa struct ftl_addr
ftl_band_next_xfer_ppa(struct ftl_band *band, struct ftl_ppa ppa, size_t num_lbks) ftl_band_next_xfer_addr(struct ftl_band *band, struct ftl_addr addr, size_t num_lbks)
{ {
struct spdk_ftl_dev *dev = band->dev; struct spdk_ftl_dev *dev = band->dev;
struct ftl_zone *zone; struct ftl_zone *zone;
unsigned int punit_num; unsigned int punit_num;
size_t num_xfers, num_stripes; size_t num_xfers, num_stripes;
assert(ppa.zone_id == band->id); assert(addr.zone_id == band->id);
punit_num = ftl_ppa_flatten_punit(dev, ppa); punit_num = ftl_addr_flatten_punit(dev, addr);
zone = &band->zone_buf[punit_num]; zone = &band->zone_buf[punit_num];
num_lbks += (ppa.offset % dev->xfer_size); num_lbks += (addr.offset % dev->xfer_size);
ppa.offset -= (ppa.offset % dev->xfer_size); addr.offset -= (addr.offset % dev->xfer_size);
#if defined(DEBUG) #if defined(DEBUG)
/* Check that the number of zones has not been changed */ /* Check that the number of zones has not been changed */
@ -568,11 +568,11 @@ ftl_band_next_xfer_ppa(struct ftl_band *band, struct ftl_ppa ppa, size_t num_lbk
#endif #endif
assert(band->num_zones != 0); assert(band->num_zones != 0);
num_stripes = (num_lbks / dev->xfer_size) / band->num_zones; num_stripes = (num_lbks / dev->xfer_size) / band->num_zones;
ppa.offset += num_stripes * dev->xfer_size; addr.offset += num_stripes * dev->xfer_size;
num_lbks -= num_stripes * dev->xfer_size * band->num_zones; num_lbks -= num_stripes * dev->xfer_size * band->num_zones;
if (ppa.offset > ftl_dev_lbks_in_zone(dev)) { if (addr.offset > ftl_dev_lbks_in_zone(dev)) {
return ftl_to_ppa(FTL_PPA_INVALID); return ftl_to_addr(FTL_ADDR_INVALID);
} }
num_xfers = num_lbks / dev->xfer_size; num_xfers = num_lbks / dev->xfer_size;
@ -580,42 +580,42 @@ ftl_band_next_xfer_ppa(struct ftl_band *band, struct ftl_ppa ppa, size_t num_lbk
/* When the last zone is reached the lbk part of the address */ /* When the last zone is reached the lbk part of the address */
/* needs to be increased by xfer_size */ /* needs to be increased by xfer_size */
if (ftl_band_zone_is_last(band, zone)) { if (ftl_band_zone_is_last(band, zone)) {
ppa.offset += dev->xfer_size; addr.offset += dev->xfer_size;
if (ppa.offset > ftl_dev_lbks_in_zone(dev)) { if (addr.offset > ftl_dev_lbks_in_zone(dev)) {
return ftl_to_ppa(FTL_PPA_INVALID); return ftl_to_addr(FTL_ADDR_INVALID);
} }
} }
zone = ftl_band_next_operational_zone(band, zone); zone = ftl_band_next_operational_zone(band, zone);
assert(zone); assert(zone);
ppa.pu = zone->start_ppa.pu; addr.pu = zone->start_addr.pu;
num_lbks -= dev->xfer_size; num_lbks -= dev->xfer_size;
} }
if (num_lbks) { if (num_lbks) {
ppa.offset += num_lbks; addr.offset += num_lbks;
if (ppa.offset > ftl_dev_lbks_in_zone(dev)) { if (addr.offset > ftl_dev_lbks_in_zone(dev)) {
return ftl_to_ppa(FTL_PPA_INVALID); return ftl_to_addr(FTL_ADDR_INVALID);
} }
} }
return ppa; return addr;
} }
static size_t static size_t
ftl_xfer_offset_from_ppa(struct ftl_band *band, struct ftl_ppa ppa) ftl_xfer_offset_from_addr(struct ftl_band *band, struct ftl_addr addr)
{ {
struct ftl_zone *zone, *current_zone; struct ftl_zone *zone, *current_zone;
unsigned int punit_offset = 0; unsigned int punit_offset = 0;
size_t off, num_stripes, xfer_size = band->dev->xfer_size; size_t off, num_stripes, xfer_size = band->dev->xfer_size;
assert(ppa.zone_id == band->id); assert(addr.zone_id == band->id);
num_stripes = (ppa.offset / xfer_size) * band->num_zones; num_stripes = (addr.offset / xfer_size) * band->num_zones;
off = ppa.offset % xfer_size; off = addr.offset % xfer_size;
current_zone = ftl_band_zone_from_ppa(band, ppa); current_zone = ftl_band_zone_from_addr(band, addr);
CIRCLEQ_FOREACH(zone, &band->zones, circleq) { CIRCLEQ_FOREACH(zone, &band->zones, circleq) {
if (current_zone == zone) { if (current_zone == zone) {
break; break;
@ -626,27 +626,27 @@ ftl_xfer_offset_from_ppa(struct ftl_band *band, struct ftl_ppa ppa)
return xfer_size * (num_stripes + punit_offset) + off; return xfer_size * (num_stripes + punit_offset) + off;
} }
struct ftl_ppa struct ftl_addr
ftl_band_ppa_from_lbkoff(struct ftl_band *band, uint64_t lbkoff) ftl_band_addr_from_lbkoff(struct ftl_band *band, uint64_t lbkoff)
{ {
struct ftl_ppa ppa = { .addr = 0 }; struct ftl_addr addr = { .addr = 0 };
struct spdk_ftl_dev *dev = band->dev; struct spdk_ftl_dev *dev = band->dev;
uint64_t punit; uint64_t punit;
punit = lbkoff / ftl_dev_lbks_in_zone(dev) + dev->range.begin; punit = lbkoff / ftl_dev_lbks_in_zone(dev) + dev->range.begin;
ppa.offset = lbkoff % ftl_dev_lbks_in_zone(dev); addr.offset = lbkoff % ftl_dev_lbks_in_zone(dev);
ppa.zone_id = band->id; addr.zone_id = band->id;
ppa.pu = punit; addr.pu = punit;
return ppa; return addr;
} }
struct ftl_ppa struct ftl_addr
ftl_band_next_ppa(struct ftl_band *band, struct ftl_ppa ppa, size_t offset) ftl_band_next_addr(struct ftl_band *band, struct ftl_addr addr, size_t offset)
{ {
uint64_t lbkoff = ftl_band_lbkoff_from_ppa(band, ppa); uint64_t lbkoff = ftl_band_lbkoff_from_addr(band, addr);
return ftl_band_ppa_from_lbkoff(band, lbkoff + offset); return ftl_band_addr_from_lbkoff(band, lbkoff + offset);
} }
void void
@ -711,7 +711,7 @@ ftl_read_md_cb(struct ftl_io *io, void *arg, int status)
} }
static struct ftl_md_io * static struct ftl_md_io *
ftl_io_init_md_read(struct spdk_ftl_dev *dev, struct ftl_ppa ppa, ftl_io_init_md_read(struct spdk_ftl_dev *dev, struct ftl_addr addr,
struct ftl_band *band, size_t lbk_cnt, void *buf, struct ftl_band *band, size_t lbk_cnt, void *buf,
ftl_io_fn fn, ftl_md_pack_fn pack_fn, ftl_io_fn cb_fn, void *cb_ctx) ftl_io_fn fn, ftl_md_pack_fn pack_fn, ftl_io_fn cb_fn, void *cb_ctx)
{ {
@ -722,7 +722,7 @@ ftl_io_init_md_read(struct spdk_ftl_dev *dev, struct ftl_ppa ppa,
.rwb_batch = NULL, .rwb_batch = NULL,
.band = band, .band = band,
.size = sizeof(*io), .size = sizeof(*io),
.flags = FTL_IO_MD | FTL_IO_PPA_MODE, .flags = FTL_IO_MD | FTL_IO_PHYSICAL_MODE,
.type = FTL_IO_READ, .type = FTL_IO_READ,
.lbk_cnt = lbk_cnt, .lbk_cnt = lbk_cnt,
.cb_fn = fn, .cb_fn = fn,
@ -734,7 +734,7 @@ ftl_io_init_md_read(struct spdk_ftl_dev *dev, struct ftl_ppa ppa,
return NULL; return NULL;
} }
io->io.ppa = ppa; io->io.addr = addr;
io->pack_fn = pack_fn; io->pack_fn = pack_fn;
io->cb_fn = cb_fn; io->cb_fn = cb_fn;
io->cb_ctx = cb_ctx; io->cb_ctx = cb_ctx;
@ -752,7 +752,7 @@ ftl_io_init_md_write(struct spdk_ftl_dev *dev, struct ftl_band *band,
.rwb_batch = NULL, .rwb_batch = NULL,
.band = band, .band = band,
.size = sizeof(struct ftl_io), .size = sizeof(struct ftl_io),
.flags = FTL_IO_MD | FTL_IO_PPA_MODE, .flags = FTL_IO_MD | FTL_IO_PHYSICAL_MODE,
.type = FTL_IO_WRITE, .type = FTL_IO_WRITE,
.lbk_cnt = lbk_cnt, .lbk_cnt = lbk_cnt,
.cb_fn = cb, .cb_fn = cb,
@ -804,17 +804,17 @@ ftl_band_write_tail_md(struct ftl_band *band, ftl_io_fn cb)
ftl_pack_tail_md, cb); ftl_pack_tail_md, cb);
} }
static struct ftl_ppa static struct ftl_addr
ftl_band_lba_map_ppa(struct ftl_band *band, size_t offset) ftl_band_lba_map_addr(struct ftl_band *band, size_t offset)
{ {
return ftl_band_next_xfer_ppa(band, band->tail_md_ppa, return ftl_band_next_xfer_addr(band, band->tail_md_addr,
ftl_tail_md_hdr_num_lbks() + ftl_tail_md_hdr_num_lbks() +
ftl_vld_map_num_lbks(band->dev) + ftl_vld_map_num_lbks(band->dev) +
offset); offset);
} }
static int static int
ftl_band_read_md(struct ftl_band *band, size_t lbk_cnt, struct ftl_ppa start_ppa, ftl_band_read_md(struct ftl_band *band, size_t lbk_cnt, struct ftl_addr start_addr,
void *buf, ftl_io_fn fn, ftl_md_pack_fn pack_fn, ftl_io_fn cb_fn, void *cb_ctx) void *buf, ftl_io_fn fn, ftl_md_pack_fn pack_fn, ftl_io_fn cb_fn, void *cb_ctx)
{ {
struct spdk_ftl_dev *dev = band->dev; struct spdk_ftl_dev *dev = band->dev;
@ -824,7 +824,7 @@ ftl_band_read_md(struct ftl_band *band, size_t lbk_cnt, struct ftl_ppa start_ppa
return -ENOENT; return -ENOENT;
} }
io = ftl_io_init_md_read(dev, start_ppa, band, lbk_cnt, buf, fn, pack_fn, cb_fn, cb_ctx); io = ftl_io_init_md_read(dev, start_addr, band, lbk_cnt, buf, fn, pack_fn, cb_fn, cb_ctx);
if (!io) { if (!io) {
return -ENOMEM; return -ENOMEM;
} }
@ -834,9 +834,9 @@ ftl_band_read_md(struct ftl_band *band, size_t lbk_cnt, struct ftl_ppa start_ppa
} }
int int
ftl_band_read_tail_md(struct ftl_band *band, struct ftl_ppa ppa, ftl_io_fn cb_fn, void *cb_ctx) ftl_band_read_tail_md(struct ftl_band *band, struct ftl_addr addr, ftl_io_fn cb_fn, void *cb_ctx)
{ {
return ftl_band_read_md(band, ftl_tail_md_num_lbks(band->dev), ppa, band->lba_map.dma_buf, return ftl_band_read_md(band, ftl_tail_md_num_lbks(band->dev), addr, band->lba_map.dma_buf,
ftl_read_md_cb, ftl_unpack_tail_md, cb_fn, cb_ctx); ftl_read_md_cb, ftl_unpack_tail_md, cb_fn, cb_ctx);
} }
@ -895,12 +895,12 @@ ftl_process_lba_map_requests(struct spdk_ftl_dev *dev, struct ftl_lba_map *lba_m
} }
static size_t static size_t
ftl_lba_map_offset_from_ppa(struct ftl_band *band, struct ftl_ppa ppa) ftl_lba_map_offset_from_addr(struct ftl_band *band, struct ftl_addr addr)
{ {
size_t offset; size_t offset;
struct ftl_ppa start_ppa = ftl_band_lba_map_ppa(band, 0); struct ftl_addr start_addr = ftl_band_lba_map_addr(band, 0);
offset = ftl_xfer_offset_from_ppa(band, ppa) - ftl_xfer_offset_from_ppa(band, start_ppa); offset = ftl_xfer_offset_from_addr(band, addr) - ftl_xfer_offset_from_addr(band, start_addr);
assert(offset < ftl_lba_map_num_lbks(band->dev)); assert(offset < ftl_lba_map_num_lbks(band->dev));
return offset; return offset;
@ -912,7 +912,7 @@ ftl_read_lba_map_cb(struct ftl_io *io, void *arg, int status)
struct ftl_lba_map *lba_map = &io->band->lba_map; struct ftl_lba_map *lba_map = &io->band->lba_map;
uint64_t lbk_off; uint64_t lbk_off;
lbk_off = ftl_lba_map_offset_from_ppa(io->band, io->ppa); lbk_off = ftl_lba_map_offset_from_addr(io->band, io->addr);
assert(lbk_off + io->lbk_cnt <= ftl_lba_map_num_lbks(io->dev)); assert(lbk_off + io->lbk_cnt <= ftl_lba_map_num_lbks(io->dev));
if (!status) { if (!status) {
@ -996,7 +996,7 @@ ftl_band_read_lba_map(struct ftl_band *band, size_t offset, size_t lba_cnt,
ftl_lba_map_set_segment_state(lba_map, lbk_off, num_read, ftl_lba_map_set_segment_state(lba_map, lbk_off, num_read,
FTL_LBA_MAP_SEG_PENDING); FTL_LBA_MAP_SEG_PENDING);
rc = ftl_band_read_md(band, num_read, ftl_band_lba_map_ppa(band, lbk_off), rc = ftl_band_read_md(band, num_read, ftl_band_lba_map_addr(band, lbk_off),
(char *)band->lba_map.map + lbk_off * FTL_BLOCK_SIZE, (char *)band->lba_map.map + lbk_off * FTL_BLOCK_SIZE,
ftl_read_lba_map_cb, NULL, cb_fn, cb_ctx); ftl_read_lba_map_cb, NULL, cb_fn, cb_ctx);
if (rc) { if (rc) {
@ -1024,7 +1024,7 @@ ftl_band_read_head_md(struct ftl_band *band, ftl_io_fn cb_fn, void *cb_ctx)
{ {
return ftl_band_read_md(band, return ftl_band_read_md(band,
ftl_head_md_num_lbks(band->dev), ftl_head_md_num_lbks(band->dev),
ftl_band_head_md_ppa(band), ftl_band_head_md_addr(band),
band->lba_map.dma_buf, band->lba_map.dma_buf,
ftl_read_md_cb, ftl_read_md_cb,
ftl_unpack_head_md, ftl_unpack_head_md,
@ -1046,13 +1046,13 @@ ftl_erase_fail(struct ftl_io *io, int status)
struct ftl_band *band = io->band; struct ftl_band *band = io->band;
char buf[128]; char buf[128];
SPDK_ERRLOG("Erase failed @ppa: %s, status: %d\n", SPDK_ERRLOG("Erase failed @addr: %s, status: %d\n",
ftl_ppa2str(io->ppa, buf, sizeof(buf)), status); ftl_addr2str(io->addr, buf, sizeof(buf)), status);
zone = ftl_band_zone_from_ppa(band, io->ppa); zone = ftl_band_zone_from_addr(band, io->addr);
zone->state = SPDK_BDEV_ZONE_STATE_OFFLINE; zone->state = SPDK_BDEV_ZONE_STATE_OFFLINE;
ftl_band_remove_zone(band, zone); ftl_band_remove_zone(band, zone);
band->tail_md_ppa = ftl_band_tail_md_ppa(band); band->tail_md_addr = ftl_band_tail_md_addr(band);
} }
static void static void
@ -1064,7 +1064,7 @@ ftl_band_erase_cb(struct ftl_io *io, void *ctx, int status)
ftl_erase_fail(io, status); ftl_erase_fail(io, status);
return; return;
} }
zone = ftl_band_zone_from_ppa(io->band, io->ppa); zone = ftl_band_zone_from_addr(io->band, io->addr);
zone->state = SPDK_BDEV_ZONE_STATE_EMPTY; zone->state = SPDK_BDEV_ZONE_STATE_EMPTY;
zone->write_offset = 0; zone->write_offset = 0;
} }
@ -1092,7 +1092,7 @@ ftl_band_erase(struct ftl_band *band)
break; break;
} }
io->ppa = zone->start_ppa; io->addr = zone->start_addr;
rc = ftl_io_erase(io); rc = ftl_io_erase(io);
if (rc) { if (rc) {
assert(0); assert(0);

View File

@ -40,7 +40,7 @@
#include "spdk/bdev_zone.h" #include "spdk/bdev_zone.h"
#include "ftl_io.h" #include "ftl_io.h"
#include "ftl_ppa.h" #include "ftl_addr.h"
#include "ftl_io.h" #include "ftl_io.h"
/* Number of LBAs that could be stored in a single block */ /* Number of LBAs that could be stored in a single block */
@ -59,8 +59,8 @@ struct ftl_zone {
/* Current logical block's offset */ /* Current logical block's offset */
uint64_t write_offset; uint64_t write_offset;
/* First PPA */ /* First logical block of a zone */
struct ftl_ppa start_ppa; struct ftl_addr start_addr;
/* Pointer to parallel unit */ /* Pointer to parallel unit */
struct ftl_punit *punit; struct ftl_punit *punit;
@ -178,8 +178,8 @@ struct ftl_band {
/* Number of defrag cycles */ /* Number of defrag cycles */
uint64_t wr_cnt; uint64_t wr_cnt;
/* End metadata start ppa */ /* End metadata start addr */
struct ftl_ppa tail_md_ppa; struct ftl_addr tail_md_addr;
/* Bitmap of all bands that have its data moved onto this band */ /* Bitmap of all bands that have its data moved onto this band */
struct spdk_bit_array *reloc_bitmap; struct spdk_bit_array *reloc_bitmap;
@ -195,8 +195,8 @@ struct ftl_band {
STAILQ_ENTRY(ftl_band) prio_stailq; STAILQ_ENTRY(ftl_band) prio_stailq;
}; };
uint64_t ftl_band_lbkoff_from_ppa(struct ftl_band *band, struct ftl_ppa ppa); uint64_t ftl_band_lbkoff_from_addr(struct ftl_band *band, struct ftl_addr addr);
struct ftl_ppa ftl_band_ppa_from_lbkoff(struct ftl_band *band, uint64_t lbkoff); struct ftl_addr ftl_band_addr_from_lbkoff(struct ftl_band *band, uint64_t lbkoff);
void ftl_band_set_state(struct ftl_band *band, enum ftl_band_state state); void ftl_band_set_state(struct ftl_band *band, enum ftl_band_state state);
size_t ftl_band_age(const struct ftl_band *band); size_t ftl_band_age(const struct ftl_band *band);
void ftl_band_acquire_lba_map(struct ftl_band *band); void ftl_band_acquire_lba_map(struct ftl_band *band);
@ -206,25 +206,25 @@ void ftl_band_release_lba_map(struct ftl_band *band);
int ftl_band_read_lba_map(struct ftl_band *band, int ftl_band_read_lba_map(struct ftl_band *band,
size_t offset, size_t lba_cnt, size_t offset, size_t lba_cnt,
ftl_io_fn cb_fn, void *cb_ctx); ftl_io_fn cb_fn, void *cb_ctx);
struct ftl_ppa ftl_band_next_xfer_ppa(struct ftl_band *band, struct ftl_ppa ppa, struct ftl_addr ftl_band_next_xfer_addr(struct ftl_band *band, struct ftl_addr addr,
size_t num_lbks); size_t num_lbks);
struct ftl_ppa ftl_band_next_ppa(struct ftl_band *band, struct ftl_ppa ppa, struct ftl_addr ftl_band_next_addr(struct ftl_band *band, struct ftl_addr addr,
size_t offset); size_t offset);
size_t ftl_band_num_usable_lbks(const struct ftl_band *band); size_t ftl_band_num_usable_lbks(const struct ftl_band *band);
size_t ftl_band_user_lbks_left(const struct ftl_band *band, size_t offset); size_t ftl_band_user_lbks_left(const struct ftl_band *band, size_t offset);
size_t ftl_band_user_lbks(const struct ftl_band *band); size_t ftl_band_user_lbks(const struct ftl_band *band);
void ftl_band_set_addr(struct ftl_band *band, uint64_t lba, void ftl_band_set_addr(struct ftl_band *band, uint64_t lba,
struct ftl_ppa ppa); struct ftl_addr addr);
struct ftl_band *ftl_band_from_ppa(struct spdk_ftl_dev *dev, struct ftl_ppa ppa); struct ftl_band *ftl_band_from_addr(struct spdk_ftl_dev *dev, struct ftl_addr addr);
struct ftl_zone *ftl_band_zone_from_ppa(struct ftl_band *band, struct ftl_ppa); struct ftl_zone *ftl_band_zone_from_addr(struct ftl_band *band, struct ftl_addr);
void ftl_band_md_clear(struct ftl_band *band); void ftl_band_md_clear(struct ftl_band *band);
int ftl_band_read_tail_md(struct ftl_band *band, struct ftl_ppa, int ftl_band_read_tail_md(struct ftl_band *band, struct ftl_addr,
ftl_io_fn cb_fn, void *cb_ctx); ftl_io_fn cb_fn, void *cb_ctx);
int ftl_band_read_head_md(struct ftl_band *band, ftl_io_fn cb_fn, void *cb_ctx); int ftl_band_read_head_md(struct ftl_band *band, ftl_io_fn cb_fn, void *cb_ctx);
int ftl_band_write_tail_md(struct ftl_band *band, ftl_io_fn cb); int ftl_band_write_tail_md(struct ftl_band *band, ftl_io_fn cb);
int ftl_band_write_head_md(struct ftl_band *band, ftl_io_fn cb); int ftl_band_write_head_md(struct ftl_band *band, ftl_io_fn cb);
struct ftl_ppa ftl_band_tail_md_ppa(struct ftl_band *band); struct ftl_addr ftl_band_tail_md_addr(struct ftl_band *band);
struct ftl_ppa ftl_band_head_md_ppa(struct ftl_band *band); struct ftl_addr ftl_band_head_md_addr(struct ftl_band *band);
void ftl_band_write_failed(struct ftl_band *band); void ftl_band_write_failed(struct ftl_band *band);
int ftl_band_full(struct ftl_band *band, size_t offset); int ftl_band_full(struct ftl_band *band, size_t offset);
int ftl_band_erase(struct ftl_band *band); int ftl_band_erase(struct ftl_band *band);

View File

@ -65,8 +65,8 @@ struct ftl_wptr {
/* Owner device */ /* Owner device */
struct spdk_ftl_dev *dev; struct spdk_ftl_dev *dev;
/* Current PPA */ /* Current address */
struct ftl_ppa ppa; struct ftl_addr addr;
/* Band currently being written to */ /* Band currently being written to */
struct ftl_band *band; struct ftl_band *band;
@ -85,7 +85,8 @@ struct ftl_wptr {
/* /*
* If setup in direct mode, there will be no offset or band state update after IO. * If setup in direct mode, there will be no offset or band state update after IO.
* The PPA is not assigned by wptr, and is instead taken directly from the request. * The zoned bdev address is not assigned by wptr, and is instead taken directly
* from the request.
*/ */
bool direct_mode; bool direct_mode;
@ -224,8 +225,8 @@ ftl_md_write_fail(struct ftl_io *io, int status)
wptr = ftl_wptr_from_band(band); wptr = ftl_wptr_from_band(band);
assert(wptr); assert(wptr);
SPDK_ERRLOG("Metadata write failed @ppa: %s, status: %d\n", SPDK_ERRLOG("Metadata write failed @addr: %s, status: %d\n",
ftl_ppa2str(wptr->ppa, buf, sizeof(buf)), status); ftl_addr2str(wptr->addr, buf, sizeof(buf)), status);
ftl_halt_writes(io->dev, band); ftl_halt_writes(io->dev, band);
} }
@ -278,28 +279,29 @@ ftl_md_write_cb(struct ftl_io *io, void *arg, int status)
} }
static int static int
ftl_ppa_read_next_ppa(struct ftl_io *io, struct ftl_ppa *ppa) ftl_read_next_physical_addr(struct ftl_io *io, struct ftl_addr *addr)
{ {
struct spdk_ftl_dev *dev = io->dev; struct spdk_ftl_dev *dev = io->dev;
size_t lbk_cnt, max_lbks; size_t lbk_cnt, max_lbks;
assert(ftl_io_mode_ppa(io)); assert(ftl_io_mode_physical(io));
assert(io->iov_pos < io->iov_cnt); assert(io->iov_pos < io->iov_cnt);
if (io->pos == 0) { if (io->pos == 0) {
*ppa = io->ppa; *addr = io->addr;
} else { } else {
*ppa = ftl_band_next_xfer_ppa(io->band, io->ppa, io->pos); *addr = ftl_band_next_xfer_addr(io->band, io->addr, io->pos);
} }
assert(!ftl_ppa_invalid(*ppa)); assert(!ftl_addr_invalid(*addr));
/* Metadata has to be read in the way it's written (jumping across */ /* Metadata has to be read in the way it's written (jumping across */
/* the zones in xfer_size increments) */ /* the zones in xfer_size increments) */
if (io->flags & FTL_IO_MD) { if (io->flags & FTL_IO_MD) {
max_lbks = dev->xfer_size - (ppa->offset % dev->xfer_size); max_lbks = dev->xfer_size - (addr->offset % dev->xfer_size);
lbk_cnt = spdk_min(ftl_io_iovec_len_left(io), max_lbks); lbk_cnt = spdk_min(ftl_io_iovec_len_left(io), max_lbks);
assert(ppa->offset / dev->xfer_size == (ppa->offset + lbk_cnt - 1) / dev->xfer_size); assert(addr->offset / dev->xfer_size ==
(addr->offset + lbk_cnt - 1) / dev->xfer_size);
} else { } else {
lbk_cnt = ftl_io_iovec_len_left(io); lbk_cnt = ftl_io_iovec_len_left(io);
} }
@ -338,25 +340,25 @@ ftl_submit_erase(struct ftl_io *io)
{ {
struct spdk_ftl_dev *dev = io->dev; struct spdk_ftl_dev *dev = io->dev;
struct ftl_band *band = io->band; struct ftl_band *band = io->band;
struct ftl_ppa ppa = io->ppa; struct ftl_addr addr = io->addr;
struct ftl_zone *zone; struct ftl_zone *zone;
uint64_t ppa_packed; uint64_t addr_packed;
int rc = 0; int rc = 0;
size_t i; size_t i;
for (i = 0; i < io->lbk_cnt; ++i) { for (i = 0; i < io->lbk_cnt; ++i) {
if (i != 0) { if (i != 0) {
zone = ftl_band_next_zone(band, ftl_band_zone_from_ppa(band, ppa)); zone = ftl_band_next_zone(band, ftl_band_zone_from_addr(band, addr));
assert(zone->state == SPDK_BDEV_ZONE_STATE_CLOSED); assert(zone->state == SPDK_BDEV_ZONE_STATE_CLOSED);
ppa = zone->start_ppa; addr = zone->start_addr;
} }
assert(ppa.offset == 0); assert(addr.offset == 0);
ppa_packed = ftl_ppa_addr_pack(dev, ppa); addr_packed = ftl_addr_addr_pack(dev, addr);
ftl_trace_submission(dev, io, ppa, 1); ftl_trace_submission(dev, io, addr, 1);
rc = spdk_nvme_ocssd_ns_cmd_vector_reset(dev->ns, ftl_get_write_qpair(dev), rc = spdk_nvme_ocssd_ns_cmd_vector_reset(dev->ns, ftl_get_write_qpair(dev),
&ppa_packed, 1, NULL, ftl_io_cmpl_cb, io); &addr_packed, 1, NULL, ftl_io_cmpl_cb, io);
if (spdk_unlikely(rc)) { if (spdk_unlikely(rc)) {
ftl_io_fail(io, rc); ftl_io_fail(io, rc);
SPDK_ERRLOG("Vector reset failed with status: %d\n", rc); SPDK_ERRLOG("Vector reset failed with status: %d\n", rc);
@ -474,7 +476,7 @@ ftl_wptr_init(struct ftl_band *band)
wptr->dev = dev; wptr->dev = dev;
wptr->band = band; wptr->band = band;
wptr->zone = CIRCLEQ_FIRST(&band->zones); wptr->zone = CIRCLEQ_FIRST(&band->zones);
wptr->ppa = wptr->zone->start_ppa; wptr->addr = wptr->zone->start_addr;
TAILQ_INIT(&wptr->pending_queue); TAILQ_INIT(&wptr->pending_queue);
return wptr; return wptr;
@ -580,13 +582,13 @@ ftl_wptr_advance(struct ftl_wptr *wptr, size_t xfer_size)
} }
wptr->zone->busy = true; wptr->zone->busy = true;
wptr->ppa = ftl_band_next_xfer_ppa(band, wptr->ppa, xfer_size); wptr->addr = ftl_band_next_xfer_addr(band, wptr->addr, xfer_size);
wptr->zone = ftl_band_next_operational_zone(band, wptr->zone); wptr->zone = ftl_band_next_operational_zone(band, wptr->zone);
assert(!ftl_ppa_invalid(wptr->ppa)); assert(!ftl_addr_invalid(wptr->addr));
SPDK_DEBUGLOG(SPDK_LOG_FTL_CORE, "wptr: pu:%d zone:%d, lbk:%u\n", SPDK_DEBUGLOG(SPDK_LOG_FTL_CORE, "wptr: pu:%d zone:%d, lbk:%u\n",
wptr->ppa.pu, wptr->ppa.zone_id, wptr->ppa.offset); wptr->addr.pu, wptr->addr.zone_id, wptr->addr.offset);
if (wptr->offset >= next_thld && !dev->next_band) { if (wptr->offset >= next_thld && !dev->next_band) {
dev->next_band = ftl_next_write_band(dev); dev->next_band = ftl_next_write_band(dev);
@ -681,15 +683,15 @@ ftl_get_limit(const struct spdk_ftl_dev *dev, int type)
static bool static bool
ftl_cache_lba_valid(struct spdk_ftl_dev *dev, struct ftl_rwb_entry *entry) ftl_cache_lba_valid(struct spdk_ftl_dev *dev, struct ftl_rwb_entry *entry)
{ {
struct ftl_ppa ppa; struct ftl_addr addr;
/* If the LBA is invalid don't bother checking the md and l2p */ /* If the LBA is invalid don't bother checking the md and l2p */
if (spdk_unlikely(entry->lba == FTL_LBA_INVALID)) { if (spdk_unlikely(entry->lba == FTL_LBA_INVALID)) {
return false; return false;
} }
ppa = ftl_l2p_get(dev, entry->lba); addr = ftl_l2p_get(dev, entry->lba);
if (!(ftl_ppa_cached(ppa) && ppa.cache_offset == entry->pos)) { if (!(ftl_addr_cached(addr) && addr.cache_offset == entry->pos)) {
return false; return false;
} }
@ -706,13 +708,13 @@ ftl_evict_cache_entry(struct spdk_ftl_dev *dev, struct ftl_rwb_entry *entry)
} }
/* If the l2p wasn't updated and still points at the entry, fill it with the */ /* If the l2p wasn't updated and still points at the entry, fill it with the */
/* on-disk PPA and clear the cache status bit. Otherwise, skip the l2p update */ /* on-disk address and clear the cache status bit. Otherwise, skip the l2p update */
/* and just clear the cache status. */ /* and just clear the cache status. */
if (!ftl_cache_lba_valid(dev, entry)) { if (!ftl_cache_lba_valid(dev, entry)) {
goto clear; goto clear;
} }
ftl_l2p_set(dev, entry->lba, entry->ppa); ftl_l2p_set(dev, entry->lba, entry->addr);
clear: clear:
ftl_rwb_entry_invalidate(entry); ftl_rwb_entry_invalidate(entry);
unlock: unlock:
@ -748,7 +750,7 @@ ftl_rwb_pad(struct spdk_ftl_dev *dev, size_t size)
} }
entry->lba = FTL_LBA_INVALID; entry->lba = FTL_LBA_INVALID;
entry->ppa = ftl_to_ppa(FTL_PPA_INVALID); entry->addr = ftl_to_addr(FTL_ADDR_INVALID);
memset(entry->data, 0, FTL_BLOCK_SIZE); memset(entry->data, 0, FTL_BLOCK_SIZE);
ftl_rwb_push(entry); ftl_rwb_push(entry);
} }
@ -842,13 +844,13 @@ apply:
} }
static int static int
ftl_invalidate_addr_unlocked(struct spdk_ftl_dev *dev, struct ftl_ppa ppa) ftl_invalidate_addr_unlocked(struct spdk_ftl_dev *dev, struct ftl_addr addr)
{ {
struct ftl_band *band = ftl_band_from_ppa(dev, ppa); struct ftl_band *band = ftl_band_from_addr(dev, addr);
struct ftl_lba_map *lba_map = &band->lba_map; struct ftl_lba_map *lba_map = &band->lba_map;
uint64_t offset; uint64_t offset;
offset = ftl_band_lbkoff_from_ppa(band, ppa); offset = ftl_band_lbkoff_from_addr(band, addr);
/* The bit might be already cleared if two writes are scheduled to the */ /* The bit might be already cleared if two writes are scheduled to the */
/* same LBA at the same time */ /* same LBA at the same time */
@ -863,16 +865,16 @@ ftl_invalidate_addr_unlocked(struct spdk_ftl_dev *dev, struct ftl_ppa ppa)
} }
int int
ftl_invalidate_addr(struct spdk_ftl_dev *dev, struct ftl_ppa ppa) ftl_invalidate_addr(struct spdk_ftl_dev *dev, struct ftl_addr addr)
{ {
struct ftl_band *band; struct ftl_band *band;
int rc; int rc;
assert(!ftl_ppa_cached(ppa)); assert(!ftl_addr_cached(addr));
band = ftl_band_from_ppa(dev, ppa); band = ftl_band_from_addr(dev, addr);
pthread_spin_lock(&band->lba_map.lock); pthread_spin_lock(&band->lba_map.lock);
rc = ftl_invalidate_addr_unlocked(dev, ppa); rc = ftl_invalidate_addr_unlocked(dev, addr);
pthread_spin_unlock(&band->lba_map.lock); pthread_spin_unlock(&band->lba_map.lock);
return rc; return rc;
@ -900,19 +902,19 @@ ftl_add_to_retry_queue(struct ftl_io *io)
} }
static int static int
ftl_ppa_cache_read(struct ftl_io *io, uint64_t lba, ftl_cache_read(struct ftl_io *io, uint64_t lba,
struct ftl_ppa ppa, void *buf) struct ftl_addr addr, void *buf)
{ {
struct ftl_rwb *rwb = io->dev->rwb; struct ftl_rwb *rwb = io->dev->rwb;
struct ftl_rwb_entry *entry; struct ftl_rwb_entry *entry;
struct ftl_ppa nppa; struct ftl_addr naddr;
int rc = 0; int rc = 0;
entry = ftl_rwb_entry_from_offset(rwb, ppa.cache_offset); entry = ftl_rwb_entry_from_offset(rwb, addr.cache_offset);
pthread_spin_lock(&entry->lock); pthread_spin_lock(&entry->lock);
nppa = ftl_l2p_get(io->dev, lba); naddr = ftl_l2p_get(io->dev, lba);
if (ppa.addr != nppa.addr) { if (addr.addr != naddr.addr) {
rc = -1; rc = -1;
goto out; goto out;
} }
@ -924,24 +926,24 @@ out:
} }
static int static int
ftl_lba_read_next_ppa(struct ftl_io *io, struct ftl_ppa *ppa) ftl_read_next_logical_addr(struct ftl_io *io, struct ftl_addr *addr)
{ {
struct spdk_ftl_dev *dev = io->dev; struct spdk_ftl_dev *dev = io->dev;
struct ftl_ppa next_ppa; struct ftl_addr next_addr;
size_t i; size_t i;
*ppa = ftl_l2p_get(dev, ftl_io_current_lba(io)); *addr = ftl_l2p_get(dev, ftl_io_current_lba(io));
SPDK_DEBUGLOG(SPDK_LOG_FTL_CORE, "Read ppa:%lx, lba:%lu\n", SPDK_DEBUGLOG(SPDK_LOG_FTL_CORE, "Read addr:%lx, lba:%lu\n",
ppa->addr, ftl_io_current_lba(io)); addr->addr, ftl_io_current_lba(io));
/* If the PPA is invalid, skip it (the buffer should already be zero'ed) */ /* If the address is invalid, skip it (the buffer should already be zero'ed) */
if (ftl_ppa_invalid(*ppa)) { if (ftl_addr_invalid(*addr)) {
return -EFAULT; return -EFAULT;
} }
if (ftl_ppa_cached(*ppa)) { if (ftl_addr_cached(*addr)) {
if (!ftl_ppa_cache_read(io, ftl_io_current_lba(io), *ppa, ftl_io_iovec_addr(io))) { if (!ftl_cache_read(io, ftl_io_current_lba(io), *addr, ftl_io_iovec_addr(io))) {
return 0; return 0;
} }
@ -950,13 +952,13 @@ ftl_lba_read_next_ppa(struct ftl_io *io, struct ftl_ppa *ppa)
} }
for (i = 1; i < ftl_io_iovec_len_left(io); ++i) { for (i = 1; i < ftl_io_iovec_len_left(io); ++i) {
next_ppa = ftl_l2p_get(dev, ftl_io_get_lba(io, io->pos + i)); next_addr = ftl_l2p_get(dev, ftl_io_get_lba(io, io->pos + i));
if (ftl_ppa_invalid(next_ppa) || ftl_ppa_cached(next_ppa)) { if (ftl_addr_invalid(next_addr) || ftl_addr_cached(next_addr)) {
break; break;
} }
if (ftl_ppa_addr_pack(dev, *ppa) + i != ftl_ppa_addr_pack(dev, next_ppa)) { if (ftl_addr_addr_pack(dev, *addr) + i != ftl_addr_addr_pack(dev, next_addr)) {
break; break;
} }
} }
@ -968,16 +970,16 @@ static int
ftl_submit_read(struct ftl_io *io) ftl_submit_read(struct ftl_io *io)
{ {
struct spdk_ftl_dev *dev = io->dev; struct spdk_ftl_dev *dev = io->dev;
struct ftl_ppa ppa; struct ftl_addr addr;
int rc = 0, lbk_cnt; int rc = 0, lbk_cnt;
assert(LIST_EMPTY(&io->children)); assert(LIST_EMPTY(&io->children));
while (io->pos < io->lbk_cnt) { while (io->pos < io->lbk_cnt) {
if (ftl_io_mode_ppa(io)) { if (ftl_io_mode_physical(io)) {
lbk_cnt = rc = ftl_ppa_read_next_ppa(io, &ppa); lbk_cnt = rc = ftl_read_next_physical_addr(io, &addr);
} else { } else {
lbk_cnt = rc = ftl_lba_read_next_ppa(io, &ppa); lbk_cnt = rc = ftl_read_next_logical_addr(io, &addr);
} }
/* We might need to retry the read from scratch (e.g. */ /* We might need to retry the read from scratch (e.g. */
@ -998,10 +1000,10 @@ ftl_submit_read(struct ftl_io *io)
assert(lbk_cnt > 0); assert(lbk_cnt > 0);
ftl_trace_submission(dev, io, ppa, lbk_cnt); ftl_trace_submission(dev, io, addr, lbk_cnt);
rc = spdk_nvme_ns_cmd_read(dev->ns, ftl_get_read_qpair(dev), rc = spdk_nvme_ns_cmd_read(dev->ns, ftl_get_read_qpair(dev),
ftl_io_iovec_addr(io), ftl_io_iovec_addr(io),
ftl_ppa_addr_pack(io->dev, ppa), lbk_cnt, ftl_addr_addr_pack(io->dev, addr), lbk_cnt,
ftl_io_cmpl_cb, io, 0); ftl_io_cmpl_cb, io, 0);
if (spdk_unlikely(rc)) { if (spdk_unlikely(rc)) {
if (rc == -ENOMEM) { if (rc == -ENOMEM) {
@ -1148,7 +1150,7 @@ ftl_nv_cache_submit_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
struct ftl_nv_cache *nv_cache = &io->dev->nv_cache; struct ftl_nv_cache *nv_cache = &io->dev->nv_cache;
if (spdk_unlikely(!success)) { if (spdk_unlikely(!success)) {
SPDK_ERRLOG("Non-volatile cache write failed at %"PRIx64"\n", io->ppa.addr); SPDK_ERRLOG("Non-volatile cache write failed at %"PRIx64"\n", io->addr.addr);
io->status = -EIO; io->status = -EIO;
} }
@ -1175,14 +1177,14 @@ ftl_submit_nv_cache(void *ctx)
thread = spdk_io_channel_get_thread(io->ioch); thread = spdk_io_channel_get_thread(io->ioch);
rc = spdk_bdev_write_blocks_with_md(nv_cache->bdev_desc, ioch->cache_ioch, rc = spdk_bdev_write_blocks_with_md(nv_cache->bdev_desc, ioch->cache_ioch,
ftl_io_iovec_addr(io), io->md, io->ppa.addr, ftl_io_iovec_addr(io), io->md, io->addr.addr,
io->lbk_cnt, ftl_nv_cache_submit_cb, io); io->lbk_cnt, ftl_nv_cache_submit_cb, io);
if (rc == -ENOMEM) { if (rc == -ENOMEM) {
spdk_thread_send_msg(thread, ftl_submit_nv_cache, io); spdk_thread_send_msg(thread, ftl_submit_nv_cache, io);
return; return;
} else if (rc) { } else if (rc) {
SPDK_ERRLOG("Write to persistent cache failed: %s (%"PRIu64", %"PRIu64")\n", SPDK_ERRLOG("Write to persistent cache failed: %s (%"PRIu64", %"PRIu64")\n",
spdk_strerror(-rc), io->ppa.addr, io->lbk_cnt); spdk_strerror(-rc), io->addr.addr, io->lbk_cnt);
spdk_mempool_put(nv_cache->md_pool, io->md); spdk_mempool_put(nv_cache->md_pool, io->md);
io->status = -EIO; io->status = -EIO;
ftl_io_complete(io); ftl_io_complete(io);
@ -1238,8 +1240,8 @@ _ftl_write_nv_cache(void *ctx)
} }
/* Reserve area on the write buffer cache */ /* Reserve area on the write buffer cache */
child->ppa.addr = ftl_reserve_nv_cache(&dev->nv_cache, &num_lbks, &phase); child->addr.addr = ftl_reserve_nv_cache(&dev->nv_cache, &num_lbks, &phase);
if (child->ppa.addr == FTL_LBA_INVALID) { if (child->addr.addr == FTL_LBA_INVALID) {
spdk_mempool_put(dev->nv_cache.md_pool, child->md); spdk_mempool_put(dev->nv_cache.md_pool, child->md);
ftl_io_free(child); ftl_io_free(child);
spdk_thread_send_msg(thread, _ftl_write_nv_cache, io); spdk_thread_send_msg(thread, _ftl_write_nv_cache, io);
@ -1319,16 +1321,16 @@ ftl_write_fail(struct ftl_io *io, int status)
entry = ftl_rwb_batch_first_entry(batch); entry = ftl_rwb_batch_first_entry(batch);
band = ftl_band_from_ppa(io->dev, entry->ppa); band = ftl_band_from_addr(io->dev, entry->addr);
SPDK_ERRLOG("Write failed @ppa: %s, status: %d\n", SPDK_ERRLOG("Write failed @addr: %s, status: %d\n",
ftl_ppa2str(entry->ppa, buf, sizeof(buf)), status); ftl_addr2str(entry->addr, buf, sizeof(buf)), status);
/* Close the band and, halt wptr and defrag */ /* Close the band and, halt wptr and defrag */
ftl_halt_writes(dev, band); ftl_halt_writes(dev, band);
ftl_rwb_foreach(entry, batch) { ftl_rwb_foreach(entry, batch) {
/* Invalidate meta set by process_writes() */ /* Invalidate meta set by process_writes() */
ftl_invalidate_addr(dev, entry->ppa); ftl_invalidate_addr(dev, entry->addr);
} }
/* Reset the batch back to the the RWB to resend it later */ /* Reset the batch back to the the RWB to resend it later */
@ -1361,8 +1363,8 @@ ftl_write_cb(struct ftl_io *io, void *arg, int status)
band->num_reloc_blocks--; band->num_reloc_blocks--;
} }
SPDK_DEBUGLOG(SPDK_LOG_FTL_CORE, "Write ppa:%lu, lba:%lu\n", SPDK_DEBUGLOG(SPDK_LOG_FTL_CORE, "Write addr:%lu, lba:%lu\n",
entry->ppa.addr, entry->lba); entry->addr.addr, entry->lba);
} }
ftl_process_flush(dev, batch); ftl_process_flush(dev, batch);
@ -1380,70 +1382,70 @@ ftl_update_rwb_stats(struct spdk_ftl_dev *dev, const struct ftl_rwb_entry *entry
static void static void
ftl_update_l2p(struct spdk_ftl_dev *dev, const struct ftl_rwb_entry *entry, ftl_update_l2p(struct spdk_ftl_dev *dev, const struct ftl_rwb_entry *entry,
struct ftl_ppa ppa) struct ftl_addr addr)
{ {
struct ftl_ppa prev_ppa; struct ftl_addr prev_addr;
struct ftl_rwb_entry *prev; struct ftl_rwb_entry *prev;
struct ftl_band *band; struct ftl_band *band;
int valid; int valid;
prev_ppa = ftl_l2p_get(dev, entry->lba); prev_addr = ftl_l2p_get(dev, entry->lba);
if (ftl_ppa_invalid(prev_ppa)) { if (ftl_addr_invalid(prev_addr)) {
ftl_l2p_set(dev, entry->lba, ppa); ftl_l2p_set(dev, entry->lba, addr);
return; return;
} }
/* If the L2P's PPA is different than what we expected we don't need to */ /* If the L2P's physical address is different than what we expected we don't need to */
/* do anything (someone's already overwritten our data). */ /* do anything (someone's already overwritten our data). */
if (ftl_rwb_entry_weak(entry) && !ftl_ppa_cmp(prev_ppa, entry->ppa)) { if (ftl_rwb_entry_weak(entry) && !ftl_addr_cmp(prev_addr, entry->addr)) {
return; return;
} }
if (ftl_ppa_cached(prev_ppa)) { if (ftl_addr_cached(prev_addr)) {
assert(!ftl_rwb_entry_weak(entry)); assert(!ftl_rwb_entry_weak(entry));
prev = ftl_rwb_entry_from_offset(dev->rwb, prev_ppa.cache_offset); prev = ftl_rwb_entry_from_offset(dev->rwb, prev_addr.cache_offset);
pthread_spin_lock(&prev->lock); pthread_spin_lock(&prev->lock);
/* Re-read the L2P under the lock to protect against updates */ /* Re-read the L2P under the lock to protect against updates */
/* to this LBA from other threads */ /* to this LBA from other threads */
prev_ppa = ftl_l2p_get(dev, entry->lba); prev_addr = ftl_l2p_get(dev, entry->lba);
/* If the entry is no longer in cache, another write has been */ /* If the entry is no longer in cache, another write has been */
/* scheduled in the meantime, so we have to invalidate its LBA */ /* scheduled in the meantime, so we have to invalidate its LBA */
if (!ftl_ppa_cached(prev_ppa)) { if (!ftl_addr_cached(prev_addr)) {
ftl_invalidate_addr(dev, prev_ppa); ftl_invalidate_addr(dev, prev_addr);
} }
/* If previous entry is part of cache, remove and invalidate it */ /* If previous entry is part of cache, remove and invalidate it */
if (ftl_rwb_entry_valid(prev)) { if (ftl_rwb_entry_valid(prev)) {
ftl_invalidate_addr(dev, prev->ppa); ftl_invalidate_addr(dev, prev->addr);
ftl_rwb_entry_invalidate(prev); ftl_rwb_entry_invalidate(prev);
} }
ftl_l2p_set(dev, entry->lba, ppa); ftl_l2p_set(dev, entry->lba, addr);
pthread_spin_unlock(&prev->lock); pthread_spin_unlock(&prev->lock);
return; return;
} }
/* Lock the band containing previous PPA. This assures atomic changes to */ /* Lock the band containing previous physical address. This assures atomic changes to */
/* the L2P as wall as metadata. The valid bits in metadata are used to */ /* the L2P as wall as metadata. The valid bits in metadata are used to */
/* check weak writes validity. */ /* check weak writes validity. */
band = ftl_band_from_ppa(dev, prev_ppa); band = ftl_band_from_addr(dev, prev_addr);
pthread_spin_lock(&band->lba_map.lock); pthread_spin_lock(&band->lba_map.lock);
valid = ftl_invalidate_addr_unlocked(dev, prev_ppa); valid = ftl_invalidate_addr_unlocked(dev, prev_addr);
/* If the address has been invalidated already, we don't want to update */ /* If the address has been invalidated already, we don't want to update */
/* the L2P for weak writes, as it means the write is no longer valid. */ /* the L2P for weak writes, as it means the write is no longer valid. */
if (!ftl_rwb_entry_weak(entry) || valid) { if (!ftl_rwb_entry_weak(entry) || valid) {
ftl_l2p_set(dev, entry->lba, ppa); ftl_l2p_set(dev, entry->lba, addr);
} }
pthread_spin_unlock(&band->lba_map.lock); pthread_spin_unlock(&band->lba_map.lock);
} }
static struct ftl_io * static struct ftl_io *
ftl_io_init_child_write(struct ftl_io *parent, struct ftl_ppa ppa, ftl_io_init_child_write(struct ftl_io *parent, struct ftl_addr addr,
void *data, void *md, ftl_io_fn cb) void *data, void *md, ftl_io_fn cb)
{ {
struct ftl_io *io; struct ftl_io *io;
@ -1468,7 +1470,7 @@ ftl_io_init_child_write(struct ftl_io *parent, struct ftl_ppa ppa,
return NULL; return NULL;
} }
io->ppa = ppa; io->addr = addr;
return io; return io;
} }
@ -1479,7 +1481,7 @@ ftl_io_child_write_cb(struct ftl_io *io, void *ctx, int status)
struct ftl_zone *zone; struct ftl_zone *zone;
struct ftl_wptr *wptr; struct ftl_wptr *wptr;
zone = ftl_band_zone_from_ppa(io->band, io->ppa); zone = ftl_band_zone_from_addr(io->band, io->addr);
wptr = ftl_wptr_from_band(io->band); wptr = ftl_wptr_from_band(io->band);
zone->busy = false; zone->busy = false;
@ -1497,18 +1499,18 @@ ftl_submit_child_write(struct ftl_wptr *wptr, struct ftl_io *io, int lbk_cnt)
struct spdk_ftl_dev *dev = io->dev; struct spdk_ftl_dev *dev = io->dev;
struct ftl_io *child; struct ftl_io *child;
int rc; int rc;
struct ftl_ppa ppa; struct ftl_addr addr;
if (spdk_likely(!wptr->direct_mode)) { if (spdk_likely(!wptr->direct_mode)) {
ppa = wptr->ppa; addr = wptr->addr;
} else { } else {
assert(io->flags & FTL_IO_DIRECT_ACCESS); assert(io->flags & FTL_IO_DIRECT_ACCESS);
assert(io->ppa.zone_id == wptr->band->id); assert(io->addr.zone_id == wptr->band->id);
ppa = io->ppa; addr = io->addr;
} }
/* Split IO to child requests and release zone immediately after child is completed */ /* Split IO to child requests and release zone immediately after child is completed */
child = ftl_io_init_child_write(io, ppa, ftl_io_iovec_addr(io), child = ftl_io_init_child_write(io, addr, ftl_io_iovec_addr(io),
ftl_io_get_md(io), ftl_io_child_write_cb); ftl_io_get_md(io), ftl_io_child_write_cb);
if (!child) { if (!child) {
return -EAGAIN; return -EAGAIN;
@ -1517,14 +1519,14 @@ ftl_submit_child_write(struct ftl_wptr *wptr, struct ftl_io *io, int lbk_cnt)
wptr->num_outstanding++; wptr->num_outstanding++;
rc = spdk_nvme_ns_cmd_write_with_md(dev->ns, ftl_get_write_qpair(dev), rc = spdk_nvme_ns_cmd_write_with_md(dev->ns, ftl_get_write_qpair(dev),
ftl_io_iovec_addr(child), child->md, ftl_io_iovec_addr(child), child->md,
ftl_ppa_addr_pack(dev, ppa), ftl_addr_addr_pack(dev, addr),
lbk_cnt, ftl_io_cmpl_cb, child, 0, 0, 0); lbk_cnt, ftl_io_cmpl_cb, child, 0, 0, 0);
if (rc) { if (rc) {
wptr->num_outstanding--; wptr->num_outstanding--;
ftl_io_fail(child, rc); ftl_io_fail(child, rc);
ftl_io_complete(child); ftl_io_complete(child);
SPDK_ERRLOG("spdk_nvme_ns_cmd_write_with_md failed with status:%d, ppa:%lu\n", SPDK_ERRLOG("spdk_nvme_ns_cmd_write_with_md failed with status:%d, addr:%lu\n",
rc, ppa.addr); rc, addr.addr);
return -EIO; return -EIO;
} }
@ -1561,7 +1563,7 @@ ftl_submit_write(struct ftl_wptr *wptr, struct ftl_io *io)
break; break;
} }
ftl_trace_submission(dev, io, wptr->ppa, dev->xfer_size); ftl_trace_submission(dev, io, wptr->addr, dev->xfer_size);
ftl_wptr_advance(wptr, dev->xfer_size); ftl_wptr_advance(wptr, dev->xfer_size);
} }
@ -1602,7 +1604,7 @@ ftl_wptr_process_writes(struct ftl_wptr *wptr)
struct ftl_rwb_batch *batch; struct ftl_rwb_batch *batch;
struct ftl_rwb_entry *entry; struct ftl_rwb_entry *entry;
struct ftl_io *io; struct ftl_io *io;
struct ftl_ppa ppa, prev_ppa; struct ftl_addr addr, prev_addr;
if (spdk_unlikely(!TAILQ_EMPTY(&wptr->pending_queue))) { if (spdk_unlikely(!TAILQ_EMPTY(&wptr->pending_queue))) {
io = TAILQ_FIRST(&wptr->pending_queue); io = TAILQ_FIRST(&wptr->pending_queue);
@ -1642,7 +1644,7 @@ ftl_wptr_process_writes(struct ftl_wptr *wptr)
goto error; goto error;
} }
ppa = wptr->ppa; addr = wptr->addr;
ftl_rwb_foreach(entry, batch) { ftl_rwb_foreach(entry, batch) {
/* Update band's relocation stats if the IO comes from reloc */ /* Update band's relocation stats if the IO comes from reloc */
if (entry->flags & FTL_IO_WEAK) { if (entry->flags & FTL_IO_WEAK) {
@ -1652,17 +1654,17 @@ ftl_wptr_process_writes(struct ftl_wptr *wptr)
} }
} }
entry->ppa = ppa; entry->addr = addr;
if (entry->lba != FTL_LBA_INVALID) { if (entry->lba != FTL_LBA_INVALID) {
pthread_spin_lock(&entry->lock); pthread_spin_lock(&entry->lock);
prev_ppa = ftl_l2p_get(dev, entry->lba); prev_addr = ftl_l2p_get(dev, entry->lba);
/* If the l2p was updated in the meantime, don't update band's metadata */ /* If the l2p was updated in the meantime, don't update band's metadata */
if (ftl_ppa_cached(prev_ppa) && prev_ppa.cache_offset == entry->pos) { if (ftl_addr_cached(prev_addr) && prev_addr.cache_offset == entry->pos) {
/* Setting entry's cache bit needs to be done after metadata */ /* Setting entry's cache bit needs to be done after metadata */
/* within the band is updated to make sure that writes */ /* within the band is updated to make sure that writes */
/* invalidating the entry clear the metadata as well */ /* invalidating the entry clear the metadata as well */
ftl_band_set_addr(wptr->band, entry->lba, entry->ppa); ftl_band_set_addr(wptr->band, entry->lba, entry->addr);
ftl_rwb_entry_set_valid(entry); ftl_rwb_entry_set_valid(entry);
} }
pthread_spin_unlock(&entry->lock); pthread_spin_unlock(&entry->lock);
@ -1671,11 +1673,11 @@ ftl_wptr_process_writes(struct ftl_wptr *wptr)
ftl_trace_rwb_pop(dev, entry); ftl_trace_rwb_pop(dev, entry);
ftl_update_rwb_stats(dev, entry); ftl_update_rwb_stats(dev, entry);
ppa = ftl_band_next_ppa(wptr->band, ppa, 1); addr = ftl_band_next_addr(wptr->band, addr, 1);
} }
SPDK_DEBUGLOG(SPDK_LOG_FTL_CORE, "Write ppa:%lx, %lx\n", wptr->ppa.addr, SPDK_DEBUGLOG(SPDK_LOG_FTL_CORE, "Write addr:%lx, %lx\n", wptr->addr.addr,
ftl_ppa_addr_pack(dev, wptr->ppa)); ftl_addr_addr_pack(dev, wptr->addr));
if (ftl_submit_write(wptr, io)) { if (ftl_submit_write(wptr, io)) {
/* TODO: we need some recovery here */ /* TODO: we need some recovery here */
@ -1722,8 +1724,8 @@ ftl_rwb_entry_fill(struct ftl_rwb_entry *entry, struct ftl_io *io)
memcpy(entry->data, ftl_io_iovec_addr(io), FTL_BLOCK_SIZE); memcpy(entry->data, ftl_io_iovec_addr(io), FTL_BLOCK_SIZE);
if (ftl_rwb_entry_weak(entry)) { if (ftl_rwb_entry_weak(entry)) {
entry->band = ftl_band_from_ppa(io->dev, io->ppa); entry->band = ftl_band_from_addr(io->dev, io->addr);
entry->ppa = ftl_band_next_ppa(entry->band, io->ppa, io->pos); entry->addr = ftl_band_next_addr(entry->band, io->addr, io->pos);
entry->band->num_reloc_blocks++; entry->band->num_reloc_blocks++;
} }
@ -1740,7 +1742,7 @@ ftl_rwb_fill(struct ftl_io *io)
{ {
struct spdk_ftl_dev *dev = io->dev; struct spdk_ftl_dev *dev = io->dev;
struct ftl_rwb_entry *entry; struct ftl_rwb_entry *entry;
struct ftl_ppa ppa = { .cached = 1 }; struct ftl_addr addr = { .cached = 1 };
int flags = ftl_rwb_flags_from_io(io); int flags = ftl_rwb_flags_from_io(io);
while (io->pos < io->lbk_cnt) { while (io->pos < io->lbk_cnt) {
@ -1756,10 +1758,10 @@ ftl_rwb_fill(struct ftl_io *io)
ftl_rwb_entry_fill(entry, io); ftl_rwb_entry_fill(entry, io);
ppa.cache_offset = entry->pos; addr.cache_offset = entry->pos;
ftl_trace_rwb_fill(dev, io); ftl_trace_rwb_fill(dev, io);
ftl_update_l2p(dev, entry, ppa); ftl_update_l2p(dev, entry, addr);
ftl_io_advance(io, 1); ftl_io_advance(io, 1);
/* Needs to be done after L2P is updated to avoid race with */ /* Needs to be done after L2P is updated to avoid race with */
@ -2139,19 +2141,19 @@ ftl_process_anm_event(struct ftl_anm_event *event)
return; return;
} }
band = ftl_band_from_ppa(dev, event->ppa); band = ftl_band_from_addr(dev, event->addr);
lbkoff = ftl_band_lbkoff_from_ppa(band, event->ppa); lbkoff = ftl_band_lbkoff_from_addr(band, event->addr);
ftl_reloc_add(dev->reloc, band, lbkoff, event->num_lbks, 0, false); ftl_reloc_add(dev->reloc, band, lbkoff, event->num_lbks, 0, false);
ftl_anm_event_complete(event); ftl_anm_event_complete(event);
} }
bool bool
ftl_ppa_is_written(struct ftl_band *band, struct ftl_ppa ppa) ftl_addr_is_written(struct ftl_band *band, struct ftl_addr addr)
{ {
struct ftl_zone *zone = ftl_band_zone_from_ppa(band, ppa); struct ftl_zone *zone = ftl_band_zone_from_addr(band, addr);
return ppa.offset < zone->write_offset; return addr.offset < zone->write_offset;
} }
static void static void

View File

@ -46,7 +46,7 @@
#include "spdk/ftl.h" #include "spdk/ftl.h"
#include "spdk/bdev.h" #include "spdk/bdev.h"
#include "ftl_ppa.h" #include "ftl_addr.h"
#include "ftl_io.h" #include "ftl_io.h"
#include "ftl_trace.h" #include "ftl_trace.h"
@ -78,7 +78,7 @@ struct ftl_stats {
struct ftl_punit { struct ftl_punit {
struct spdk_ftl_dev *dev; struct spdk_ftl_dev *dev;
struct ftl_ppa start_ppa; struct ftl_addr start_addr;
}; };
struct ftl_thread { struct ftl_thread {
@ -214,8 +214,8 @@ struct spdk_ftl_dev {
/* PPA format */ /* PPA format */
struct ftl_ppa_fmt ppaf; struct ftl_ppa_fmt ppaf;
/* PPA address size */ /* Address size */
size_t ppa_len; size_t addr_len;
/* Device's geometry */ /* Device's geometry */
struct spdk_ocssd_geometry_data geo; struct spdk_ocssd_geometry_data geo;
@ -277,7 +277,7 @@ void ftl_io_write(struct ftl_io *io);
int ftl_io_erase(struct ftl_io *io); int ftl_io_erase(struct ftl_io *io);
int ftl_flush_rwb(struct spdk_ftl_dev *dev, spdk_ftl_fn cb_fn, void *cb_arg); int ftl_flush_rwb(struct spdk_ftl_dev *dev, spdk_ftl_fn cb_fn, void *cb_arg);
int ftl_current_limit(const struct spdk_ftl_dev *dev); int ftl_current_limit(const struct spdk_ftl_dev *dev);
int ftl_invalidate_addr(struct spdk_ftl_dev *dev, struct ftl_ppa ppa); int ftl_invalidate_addr(struct spdk_ftl_dev *dev, struct ftl_addr addr);
int ftl_task_core(void *ctx); int ftl_task_core(void *ctx);
int ftl_task_read(void *ctx); int ftl_task_read(void *ctx);
void ftl_process_anm_event(struct ftl_anm_event *event); void ftl_process_anm_event(struct ftl_anm_event *event);
@ -290,10 +290,10 @@ int ftl_restore_md(struct spdk_ftl_dev *dev, ftl_restore_fn cb);
int ftl_restore_device(struct ftl_restore *restore, ftl_restore_fn cb); int ftl_restore_device(struct ftl_restore *restore, ftl_restore_fn cb);
void ftl_restore_nv_cache(struct ftl_restore *restore, ftl_restore_fn cb); void ftl_restore_nv_cache(struct ftl_restore *restore, ftl_restore_fn cb);
int ftl_band_set_direct_access(struct ftl_band *band, bool access); int ftl_band_set_direct_access(struct ftl_band *band, bool access);
int ftl_retrieve_chunk_info(struct spdk_ftl_dev *dev, struct ftl_ppa ppa, int ftl_retrieve_chunk_info(struct spdk_ftl_dev *dev, struct ftl_addr addr,
struct spdk_ocssd_chunk_information_entry *info, struct spdk_ocssd_chunk_information_entry *info,
unsigned int num_entries); unsigned int num_entries);
bool ftl_ppa_is_written(struct ftl_band *band, struct ftl_ppa ppa); bool ftl_addr_is_written(struct ftl_band *band, struct ftl_addr addr);
int ftl_flush_active_bands(struct spdk_ftl_dev *dev, spdk_ftl_fn cb_fn, void *cb_arg); int ftl_flush_active_bands(struct spdk_ftl_dev *dev, spdk_ftl_fn cb_fn, void *cb_arg);
int ftl_nv_cache_write_header(struct ftl_nv_cache *nv_cache, bool shutdown, int ftl_nv_cache_write_header(struct ftl_nv_cache *nv_cache, bool shutdown,
spdk_bdev_io_completion_cb cb_fn, void *cb_arg); spdk_bdev_io_completion_cb cb_fn, void *cb_arg);
@ -303,11 +303,11 @@ int ftl_nv_cache_scrub(struct ftl_nv_cache *nv_cache, spdk_bdev_io_completion_cb
struct spdk_io_channel * struct spdk_io_channel *
ftl_get_io_channel(const struct spdk_ftl_dev *dev); ftl_get_io_channel(const struct spdk_ftl_dev *dev);
#define ftl_to_ppa(address) \ #define ftl_to_addr(address) \
(struct ftl_ppa) { .addr = (uint64_t)(address) } (struct ftl_addr) { .addr = (uint64_t)(address) }
#define ftl_to_ppa_packed(address) \ #define ftl_to_addr_packed(address) \
(struct ftl_ppa) { .pack.addr = (uint32_t)(address) } (struct ftl_addr) { .pack.addr = (uint32_t)(address) }
static inline struct spdk_thread * static inline struct spdk_thread *
ftl_get_core_thread(const struct spdk_ftl_dev *dev) ftl_get_core_thread(const struct spdk_ftl_dev *dev)
@ -334,32 +334,32 @@ ftl_get_read_qpair(const struct spdk_ftl_dev *dev)
} }
static inline int static inline int
ftl_ppa_packed(const struct spdk_ftl_dev *dev) ftl_addr_packed(const struct spdk_ftl_dev *dev)
{ {
return dev->ppa_len < 32; return dev->addr_len < 32;
} }
static inline int static inline int
ftl_ppa_invalid(struct ftl_ppa ppa) ftl_addr_invalid(struct ftl_addr addr)
{ {
return ppa.addr == ftl_to_ppa(FTL_PPA_INVALID).addr; return addr.addr == ftl_to_addr(FTL_ADDR_INVALID).addr;
} }
static inline int static inline int
ftl_ppa_cached(struct ftl_ppa ppa) ftl_addr_cached(struct ftl_addr addr)
{ {
return !ftl_ppa_invalid(ppa) && ppa.cached; return !ftl_addr_invalid(addr) && addr.cached;
} }
static inline uint64_t static inline uint64_t
ftl_ppa_addr_pack(const struct spdk_ftl_dev *dev, struct ftl_ppa ppa) ftl_addr_addr_pack(const struct spdk_ftl_dev *dev, struct ftl_addr addr)
{ {
uint64_t lbk, chk, pu, grp; uint64_t lbk, chk, pu, grp;
lbk = ppa.offset; lbk = addr.offset;
chk = ppa.zone_id; chk = addr.zone_id;
pu = ppa.pu / dev->geo.num_grp; pu = addr.pu / dev->geo.num_grp;
grp = ppa.pu % dev->geo.num_grp; grp = addr.pu % dev->geo.num_grp;
return (lbk << dev->ppaf.lbk_offset) | return (lbk << dev->ppaf.lbk_offset) |
(chk << dev->ppaf.chk_offset) | (chk << dev->ppaf.chk_offset) |
@ -367,65 +367,65 @@ ftl_ppa_addr_pack(const struct spdk_ftl_dev *dev, struct ftl_ppa ppa)
(grp << dev->ppaf.grp_offset); (grp << dev->ppaf.grp_offset);
} }
static inline struct ftl_ppa static inline struct ftl_addr
ftl_ppa_addr_unpack(const struct spdk_ftl_dev *dev, uint64_t ppa) ftl_addr_addr_unpack(const struct spdk_ftl_dev *dev, uint64_t addr)
{ {
struct ftl_ppa res = {}; struct ftl_addr res = {};
unsigned int pu, grp; unsigned int pu, grp;
res.offset = (ppa >> dev->ppaf.lbk_offset) & dev->ppaf.lbk_mask; res.offset = (addr >> dev->ppaf.lbk_offset) & dev->ppaf.lbk_mask;
res.zone_id = (ppa >> dev->ppaf.chk_offset) & dev->ppaf.chk_mask; res.zone_id = (addr >> dev->ppaf.chk_offset) & dev->ppaf.chk_mask;
pu = (ppa >> dev->ppaf.pu_offset) & dev->ppaf.pu_mask; pu = (addr >> dev->ppaf.pu_offset) & dev->ppaf.pu_mask;
grp = (ppa >> dev->ppaf.grp_offset) & dev->ppaf.grp_mask; grp = (addr >> dev->ppaf.grp_offset) & dev->ppaf.grp_mask;
res.pu = grp * dev->geo.num_pu + pu; res.pu = grp * dev->geo.num_pu + pu;
return res; return res;
} }
static inline struct ftl_ppa static inline struct ftl_addr
ftl_ppa_to_packed(const struct spdk_ftl_dev *dev, struct ftl_ppa ppa) ftl_addr_to_packed(const struct spdk_ftl_dev *dev, struct ftl_addr addr)
{ {
struct ftl_ppa p = {}; struct ftl_addr p = {};
if (ftl_ppa_invalid(ppa)) { if (ftl_addr_invalid(addr)) {
p = ftl_to_ppa_packed(FTL_PPA_INVALID); p = ftl_to_addr_packed(FTL_ADDR_INVALID);
} else if (ftl_ppa_cached(ppa)) { } else if (ftl_addr_cached(addr)) {
p.pack.cached = 1; p.pack.cached = 1;
p.pack.cache_offset = (uint32_t) ppa.cache_offset; p.pack.cache_offset = (uint32_t) addr.cache_offset;
} else { } else {
p.pack.addr = (uint32_t) ftl_ppa_addr_pack(dev, ppa); p.pack.addr = (uint32_t) ftl_addr_addr_pack(dev, addr);
} }
return p; return p;
} }
static inline struct ftl_ppa static inline struct ftl_addr
ftl_ppa_from_packed(const struct spdk_ftl_dev *dev, struct ftl_ppa p) ftl_addr_from_packed(const struct spdk_ftl_dev *dev, struct ftl_addr p)
{ {
struct ftl_ppa ppa = {}; struct ftl_addr addr = {};
if (p.pack.addr == (uint32_t)FTL_PPA_INVALID) { if (p.pack.addr == (uint32_t)FTL_ADDR_INVALID) {
ppa = ftl_to_ppa(FTL_PPA_INVALID); addr = ftl_to_addr(FTL_ADDR_INVALID);
} else if (p.pack.cached) { } else if (p.pack.cached) {
ppa.cached = 1; addr.cached = 1;
ppa.cache_offset = p.pack.cache_offset; addr.cache_offset = p.pack.cache_offset;
} else { } else {
ppa = ftl_ppa_addr_unpack(dev, p.pack.addr); addr = ftl_addr_addr_unpack(dev, p.pack.addr);
} }
return ppa; return addr;
} }
static inline unsigned int static inline unsigned int
ftl_ppa_flatten_punit(const struct spdk_ftl_dev *dev, struct ftl_ppa ppa) ftl_addr_flatten_punit(const struct spdk_ftl_dev *dev, struct ftl_addr addr)
{ {
return ppa.pu - dev->range.begin; return addr.pu - dev->range.begin;
} }
static inline int static inline int
ftl_ppa_in_range(const struct spdk_ftl_dev *dev, struct ftl_ppa ppa) ftl_addr_in_range(const struct spdk_ftl_dev *dev, struct ftl_addr addr)
{ {
if (ppa.pu >= dev->range.begin && ppa.pu <= dev->range.end) { if (addr.pu >= dev->range.begin && addr.pu <= dev->range.end) {
return 1; return 1;
} }
@ -450,31 +450,31 @@ ftl_ppa_in_range(const struct spdk_ftl_dev *dev, struct ftl_ppa ppa)
#define _ftl_l2p_get64(l2p, off) \ #define _ftl_l2p_get64(l2p, off) \
_ftl_l2p_get(l2p, off, 64) _ftl_l2p_get(l2p, off, 64)
#define ftl_ppa_cmp(p1, p2) \ #define ftl_addr_cmp(p1, p2) \
((p1).addr == (p2).addr) ((p1).addr == (p2).addr)
static inline void static inline void
ftl_l2p_set(struct spdk_ftl_dev *dev, uint64_t lba, struct ftl_ppa ppa) ftl_l2p_set(struct spdk_ftl_dev *dev, uint64_t lba, struct ftl_addr addr)
{ {
assert(dev->num_lbas > lba); assert(dev->num_lbas > lba);
if (ftl_ppa_packed(dev)) { if (ftl_addr_packed(dev)) {
_ftl_l2p_set32(dev->l2p, lba, ftl_ppa_to_packed(dev, ppa).addr); _ftl_l2p_set32(dev->l2p, lba, ftl_addr_to_packed(dev, addr).addr);
} else { } else {
_ftl_l2p_set64(dev->l2p, lba, ppa.addr); _ftl_l2p_set64(dev->l2p, lba, addr.addr);
} }
} }
static inline struct ftl_ppa static inline struct ftl_addr
ftl_l2p_get(struct spdk_ftl_dev *dev, uint64_t lba) ftl_l2p_get(struct spdk_ftl_dev *dev, uint64_t lba)
{ {
assert(dev->num_lbas > lba); assert(dev->num_lbas > lba);
if (ftl_ppa_packed(dev)) { if (ftl_addr_packed(dev)) {
return ftl_ppa_from_packed(dev, ftl_to_ppa_packed( return ftl_addr_from_packed(dev, ftl_to_addr_packed(
_ftl_l2p_get32(dev->l2p, lba))); _ftl_l2p_get32(dev->l2p, lba)));
} else { } else {
return ftl_to_ppa(_ftl_l2p_get64(dev->l2p, lba)); return ftl_to_addr(_ftl_l2p_get64(dev->l2p, lba));
} }
} }
static inline size_t static inline size_t

View File

@ -55,7 +55,7 @@ ftl_band_validate_md(struct ftl_band *band)
{ {
struct spdk_ftl_dev *dev = band->dev; struct spdk_ftl_dev *dev = band->dev;
struct ftl_lba_map *lba_map = &band->lba_map; struct ftl_lba_map *lba_map = &band->lba_map;
struct ftl_ppa ppa_md, ppa_l2p; struct ftl_addr addr_md, addr_l2p;
size_t i, size, seg_off; size_t i, size, seg_off;
bool valid = true; bool valid = true;
@ -72,14 +72,14 @@ ftl_band_validate_md(struct ftl_band *band)
continue; continue;
} }
ppa_md = ftl_band_ppa_from_lbkoff(band, i); addr_md = ftl_band_addr_from_lbkoff(band, i);
ppa_l2p = ftl_l2p_get(dev, lba_map->map[i]); addr_l2p = ftl_l2p_get(dev, lba_map->map[i]);
if (ppa_l2p.cached) { if (addr_l2p.cached) {
continue; continue;
} }
if (ppa_l2p.addr != ppa_md.addr) { if (addr_l2p.addr != addr_md.addr) {
valid = false; valid = false;
break; break;
} }

View File

@ -34,7 +34,7 @@
#ifndef FTL_DEBUG_H #ifndef FTL_DEBUG_H
#define FTL_DEBUG_H #define FTL_DEBUG_H
#include "ftl_ppa.h" #include "ftl_addr.h"
#include "ftl_band.h" #include "ftl_band.h"
#include "ftl_core.h" #include "ftl_core.h"
#include "ftl_rwb.h" #include "ftl_rwb.h"
@ -51,10 +51,10 @@
#endif #endif
static inline const char * static inline const char *
ftl_ppa2str(struct ftl_ppa ppa, char *buf, size_t size) ftl_addr2str(struct ftl_addr addr, char *buf, size_t size)
{ {
snprintf(buf, size, "(pu: %u, chk: %u, lbk: %u)", snprintf(buf, size, "(pu: %u, chk: %u, lbk: %u)",
ppa.pu, ppa.zone_id, ppa.offset); addr.pu, addr.zone_id, addr.offset);
return buf; return buf;
} }

View File

@ -203,16 +203,16 @@ out:
} }
int int
ftl_retrieve_chunk_info(struct spdk_ftl_dev *dev, struct ftl_ppa ppa, ftl_retrieve_chunk_info(struct spdk_ftl_dev *dev, struct ftl_addr addr,
struct spdk_ocssd_chunk_information_entry *info, struct spdk_ocssd_chunk_information_entry *info,
unsigned int num_entries) unsigned int num_entries)
{ {
volatile struct ftl_admin_cmpl cmpl = {}; volatile struct ftl_admin_cmpl cmpl = {};
uint32_t nsid = spdk_nvme_ns_get_id(dev->ns); uint32_t nsid = spdk_nvme_ns_get_id(dev->ns);
unsigned int grp = ppa.pu % dev->geo.num_grp; unsigned int grp = addr.pu % dev->geo.num_grp;
unsigned int punit = ppa.pu / dev->geo.num_grp; unsigned int punit = addr.pu / dev->geo.num_grp;
uint64_t offset = (grp * dev->geo.num_pu + punit) * uint64_t offset = (grp * dev->geo.num_pu + punit) *
dev->geo.num_chk + ppa.zone_id; dev->geo.num_chk + addr.zone_id;
int rc; int rc;
rc = spdk_nvme_ctrlr_cmd_get_log_page(dev->ctrlr, SPDK_OCSSD_LOG_CHUNK_INFO, nsid, rc = spdk_nvme_ctrlr_cmd_get_log_page(dev->ctrlr, SPDK_OCSSD_LOG_CHUNK_INFO, nsid,
@ -243,17 +243,17 @@ ftl_retrieve_punit_chunk_info(struct spdk_ftl_dev *dev, const struct ftl_punit *
{ {
uint32_t i = 0; uint32_t i = 0;
unsigned int num_entries = FTL_BLOCK_SIZE / sizeof(*info); unsigned int num_entries = FTL_BLOCK_SIZE / sizeof(*info);
struct ftl_ppa chunk_ppa = punit->start_ppa; struct ftl_addr chunk_addr = punit->start_addr;
char ppa_buf[128]; char addr_buf[128];
for (i = 0; i < dev->geo.num_chk; i += num_entries, chunk_ppa.zone_id += num_entries) { for (i = 0; i < dev->geo.num_chk; i += num_entries, chunk_addr.zone_id += num_entries) {
if (num_entries > dev->geo.num_chk - i) { if (num_entries > dev->geo.num_chk - i) {
num_entries = dev->geo.num_chk - i; num_entries = dev->geo.num_chk - i;
} }
if (ftl_retrieve_chunk_info(dev, chunk_ppa, &info[i], num_entries)) { if (ftl_retrieve_chunk_info(dev, chunk_addr, &info[i], num_entries)) {
SPDK_ERRLOG("Failed to retrieve chunk information @ppa: %s\n", SPDK_ERRLOG("Failed to retrieve chunk information @addr: %s\n",
ftl_ppa2str(chunk_ppa, ppa_buf, sizeof(ppa_buf))); ftl_addr2str(chunk_addr, addr_buf, sizeof(addr_buf)));
return -1; return -1;
} }
} }
@ -364,9 +364,9 @@ ftl_dev_init_bands(struct spdk_ftl_dev *dev)
rc = ftl_retrieve_punit_chunk_info(dev, punit, info); rc = ftl_retrieve_punit_chunk_info(dev, punit, info);
if (rc) { if (rc) {
SPDK_ERRLOG("Failed to retrieve bbt for @ppa: %s [%lu]\n", SPDK_ERRLOG("Failed to retrieve bbt for @addr: %s [%lu]\n",
ftl_ppa2str(punit->start_ppa, buf, sizeof(buf)), ftl_addr2str(punit->start_addr, buf, sizeof(buf)),
ftl_ppa_addr_pack(dev, punit->start_ppa)); ftl_addr_addr_pack(dev, punit->start_addr));
goto out; goto out;
} }
@ -376,8 +376,8 @@ ftl_dev_init_bands(struct spdk_ftl_dev *dev)
zone->pos = i; zone->pos = i;
zone->state = ftl_get_zone_state(&info[j]); zone->state = ftl_get_zone_state(&info[j]);
zone->punit = punit; zone->punit = punit;
zone->start_ppa = punit->start_ppa; zone->start_addr = punit->start_addr;
zone->start_ppa.zone_id = band->id; zone->start_addr.zone_id = band->id;
zone->write_offset = ftl_dev_lbks_in_zone(dev); zone->write_offset = ftl_dev_lbks_in_zone(dev);
if (zone->state != SPDK_BDEV_ZONE_STATE_OFFLINE) { if (zone->state != SPDK_BDEV_ZONE_STATE_OFFLINE) {
@ -389,7 +389,7 @@ ftl_dev_init_bands(struct spdk_ftl_dev *dev)
for (i = 0; i < ftl_dev_num_bands(dev); ++i) { for (i = 0; i < ftl_dev_num_bands(dev); ++i) {
band = &dev->bands[i]; band = &dev->bands[i];
band->tail_md_ppa = ftl_band_tail_md_ppa(band); band->tail_md_addr = ftl_band_tail_md_addr(band);
} }
ftl_remove_empty_bands(dev); ftl_remove_empty_bands(dev);
@ -412,8 +412,8 @@ ftl_dev_init_punits(struct spdk_ftl_dev *dev)
dev->punits[i].dev = dev; dev->punits[i].dev = dev;
punit = dev->range.begin + i; punit = dev->range.begin + i;
dev->punits[i].start_ppa.addr = 0; dev->punits[i].start_addr.addr = 0;
dev->punits[i].start_ppa.pu = punit; dev->punits[i].start_addr.pu = punit;
} }
return 0; return 0;
@ -443,10 +443,10 @@ ftl_dev_retrieve_geo(struct spdk_ftl_dev *dev)
} }
/* TODO: add sanity checks for the geo */ /* TODO: add sanity checks for the geo */
dev->ppa_len = dev->geo.lbaf.grp_len + dev->addr_len = dev->geo.lbaf.grp_len +
dev->geo.lbaf.pu_len + dev->geo.lbaf.pu_len +
dev->geo.lbaf.chk_len + dev->geo.lbaf.chk_len +
dev->geo.lbaf.lbk_len; dev->geo.lbaf.lbk_len;
dev->ppaf.lbk_offset = 0; dev->ppaf.lbk_offset = 0;
dev->ppaf.lbk_mask = (1 << dev->geo.lbaf.lbk_len) - 1; dev->ppaf.lbk_mask = (1 << dev->geo.lbaf.lbk_len) - 1;
@ -804,7 +804,7 @@ ftl_dev_l2p_alloc(struct spdk_ftl_dev *dev)
return -1; return -1;
} }
addr_size = dev->ppa_len >= 32 ? 8 : 4; addr_size = dev->addr_len >= 32 ? 8 : 4;
dev->l2p = malloc(dev->num_lbas * addr_size); dev->l2p = malloc(dev->num_lbas * addr_size);
if (!dev->l2p) { if (!dev->l2p) {
SPDK_DEBUGLOG(SPDK_LOG_FTL_INIT, "Failed to allocate l2p table\n"); SPDK_DEBUGLOG(SPDK_LOG_FTL_INIT, "Failed to allocate l2p table\n");
@ -812,7 +812,7 @@ ftl_dev_l2p_alloc(struct spdk_ftl_dev *dev)
} }
for (i = 0; i < dev->num_lbas; ++i) { for (i = 0; i < dev->num_lbas; ++i) {
ftl_l2p_set(dev, i, ftl_to_ppa(FTL_PPA_INVALID)); ftl_l2p_set(dev, i, ftl_to_addr(FTL_ADDR_INVALID));
} }
return 0; return 0;

View File

@ -263,7 +263,7 @@ ftl_io_init(struct ftl_io *io, struct spdk_ftl_dev *dev,
io->type = type; io->type = type;
io->dev = dev; io->dev = dev;
io->lba.single = FTL_LBA_INVALID; io->lba.single = FTL_LBA_INVALID;
io->ppa.addr = FTL_PPA_INVALID; io->addr.addr = FTL_ADDR_INVALID;
io->cb_fn = fn; io->cb_fn = fn;
io->cb_ctx = ctx; io->cb_ctx = ctx;
io->trace = ftl_trace_alloc_id(dev); io->trace = ftl_trace_alloc_id(dev);
@ -356,7 +356,7 @@ ftl_io_erase_init(struct ftl_band *band, size_t lbk_cnt, ftl_io_fn cb)
.rwb_batch = NULL, .rwb_batch = NULL,
.band = band, .band = band,
.size = sizeof(struct ftl_io), .size = sizeof(struct ftl_io),
.flags = FTL_IO_PPA_MODE, .flags = FTL_IO_PHYSICAL_MODE,
.type = FTL_IO_ERASE, .type = FTL_IO_ERASE,
.lbk_cnt = 1, .lbk_cnt = 1,
.cb_fn = cb, .cb_fn = cb,
@ -489,7 +489,7 @@ ftl_io_alloc_child(struct ftl_io *parent)
void void
ftl_io_process_error(struct ftl_io *io, const struct spdk_nvme_cpl *status) ftl_io_process_error(struct ftl_io *io, const struct spdk_nvme_cpl *status)
{ {
char ppa_buf[128]; char addr_buf[128];
/* TODO: add error handling for specifc cases */ /* TODO: add error handling for specifc cases */
if (status->status.sct == SPDK_NVME_SCT_MEDIA_ERROR && if (status->status.sct == SPDK_NVME_SCT_MEDIA_ERROR &&
@ -497,8 +497,8 @@ ftl_io_process_error(struct ftl_io *io, const struct spdk_nvme_cpl *status)
return; return;
} }
SPDK_ERRLOG("Status code type 0x%x, status code 0x%x for IO type %u @ppa: %s, lba 0x%lx, cnt %lu\n", SPDK_ERRLOG("Status code type 0x%x, status code 0x%x for IO type %u @addr: %s, lba 0x%lx, cnt %lu\n",
status->status.sct, status->status.sc, io->type, ftl_ppa2str(io->ppa, ppa_buf, sizeof(ppa_buf)), status->status.sct, status->status.sc, io->type, ftl_addr2str(io->addr, addr_buf, sizeof(addr_buf)),
ftl_io_get_lba(io, 0), io->lbk_cnt); ftl_io_get_lba(io, 0), io->lbk_cnt);
io->status = -EIO; io->status = -EIO;

View File

@ -38,7 +38,7 @@
#include "spdk/nvme.h" #include "spdk/nvme.h"
#include "spdk/ftl.h" #include "spdk/ftl.h"
#include "ftl_ppa.h" #include "ftl_addr.h"
#include "ftl_trace.h" #include "ftl_trace.h"
struct spdk_ftl_dev; struct spdk_ftl_dev;
@ -62,15 +62,15 @@ enum ftl_io_flags {
FTL_IO_PAD = (1 << 3), FTL_IO_PAD = (1 << 3),
/* The IO operates on metadata */ /* The IO operates on metadata */
FTL_IO_MD = (1 << 4), FTL_IO_MD = (1 << 4),
/* Using PPA instead of LBA */ /* Using physical instead of logical address */
FTL_IO_PPA_MODE = (1 << 5), FTL_IO_PHYSICAL_MODE = (1 << 5),
/* Indicates that IO contains noncontiguous LBAs */ /* Indicates that IO contains noncontiguous LBAs */
FTL_IO_VECTOR_LBA = (1 << 6), FTL_IO_VECTOR_LBA = (1 << 6),
/* Indicates that IO is being retried */ /* Indicates that IO is being retried */
FTL_IO_RETRY = (1 << 7), FTL_IO_RETRY = (1 << 7),
/* The IO is directed to non-volatile cache */ /* The IO is directed to non-volatile cache */
FTL_IO_CACHE = (1 << 8), FTL_IO_CACHE = (1 << 8),
/* Indicates that PPA should be taken from IO struct, */ /* Indicates that physical address should be taken from IO struct, */
/* not assigned by wptr, only works if wptr is also in direct mode */ /* not assigned by wptr, only works if wptr is also in direct mode */
FTL_IO_DIRECT_ACCESS = (1 << 9), FTL_IO_DIRECT_ACCESS = (1 << 9),
/* Bypass the non-volatile cache */ /* Bypass the non-volatile cache */
@ -150,8 +150,8 @@ struct ftl_io {
uint64_t single; uint64_t single;
} lba; } lba;
/* First PPA */ /* First block address */
struct ftl_ppa ppa; struct ftl_addr addr;
/* Number of processed lbks */ /* Number of processed lbks */
size_t pos; size_t pos;
@ -235,15 +235,15 @@ struct ftl_md_io {
}; };
static inline bool static inline bool
ftl_io_mode_ppa(const struct ftl_io *io) ftl_io_mode_physical(const struct ftl_io *io)
{ {
return io->flags & FTL_IO_PPA_MODE; return io->flags & FTL_IO_PHYSICAL_MODE;
} }
static inline bool static inline bool
ftl_io_mode_lba(const struct ftl_io *io) ftl_io_mode_logical(const struct ftl_io *io)
{ {
return !ftl_io_mode_ppa(io); return !ftl_io_mode_physical(io);
} }
static inline bool static inline bool

View File

@ -64,8 +64,8 @@ enum ftl_band_reloc_state {
struct ftl_reloc_move { struct ftl_reloc_move {
struct ftl_band_reloc *breloc; struct ftl_band_reloc *breloc;
/* Start ppa */ /* Start addr */
struct ftl_ppa ppa; struct ftl_addr addr;
/* Number of logical blocks */ /* Number of logical blocks */
size_t lbk_cnt; size_t lbk_cnt;
@ -205,7 +205,7 @@ ftl_reloc_read_lba_map(struct ftl_band_reloc *breloc, struct ftl_reloc_move *mov
struct ftl_band *band = breloc->band; struct ftl_band *band = breloc->band;
breloc->num_outstanding++; breloc->num_outstanding++;
return ftl_band_read_lba_map(band, ftl_band_lbkoff_from_ppa(band, move->ppa), return ftl_band_read_lba_map(band, ftl_band_lbkoff_from_addr(band, move->addr),
move->lbk_cnt, ftl_reloc_read_lba_map_cb, move); move->lbk_cnt, ftl_reloc_read_lba_map_cb, move);
} }
@ -252,7 +252,7 @@ static void
ftl_reloc_write_cb(struct ftl_io *io, void *arg, int status) ftl_reloc_write_cb(struct ftl_io *io, void *arg, int status)
{ {
struct ftl_reloc_move *move = arg; struct ftl_reloc_move *move = arg;
struct ftl_ppa ppa = move->ppa; struct ftl_addr addr = move->addr;
struct ftl_band_reloc *breloc = move->breloc; struct ftl_band_reloc *breloc = move->breloc;
size_t i; size_t i;
@ -265,8 +265,8 @@ ftl_reloc_write_cb(struct ftl_io *io, void *arg, int status)
} }
for (i = 0; i < move->lbk_cnt; ++i) { for (i = 0; i < move->lbk_cnt; ++i) {
ppa.offset = move->ppa.offset + i; addr.offset = move->addr.offset + i;
size_t lbkoff = ftl_band_lbkoff_from_ppa(breloc->band, ppa); size_t lbkoff = ftl_band_lbkoff_from_addr(breloc->band, addr);
ftl_reloc_clr_lbk(breloc, lbkoff); ftl_reloc_clr_lbk(breloc, lbkoff);
} }
@ -322,9 +322,9 @@ ftl_reloc_iter_next_zone(struct ftl_band_reloc *breloc)
static int static int
ftl_reloc_lbk_valid(struct ftl_band_reloc *breloc, size_t lbkoff) ftl_reloc_lbk_valid(struct ftl_band_reloc *breloc, size_t lbkoff)
{ {
struct ftl_ppa ppa = ftl_band_ppa_from_lbkoff(breloc->band, lbkoff); struct ftl_addr addr = ftl_band_addr_from_lbkoff(breloc->band, lbkoff);
return ftl_ppa_is_written(breloc->band, ppa) && return ftl_addr_is_written(breloc->band, addr) &&
spdk_bit_array_get(breloc->reloc_map, lbkoff) && spdk_bit_array_get(breloc->reloc_map, lbkoff) &&
ftl_band_lbkoff_valid(breloc->band, lbkoff); ftl_band_lbkoff_valid(breloc->band, lbkoff);
} }
@ -382,7 +382,7 @@ ftl_reloc_iter_done(struct ftl_band_reloc *breloc)
static size_t static size_t
ftl_reloc_find_valid_lbks(struct ftl_band_reloc *breloc, ftl_reloc_find_valid_lbks(struct ftl_band_reloc *breloc,
size_t num_lbk, struct ftl_ppa *ppa) size_t num_lbk, struct ftl_addr *addr)
{ {
size_t lbkoff, lbk_cnt = 0; size_t lbkoff, lbk_cnt = 0;
@ -390,7 +390,7 @@ ftl_reloc_find_valid_lbks(struct ftl_band_reloc *breloc,
return 0; return 0;
} }
*ppa = ftl_band_ppa_from_lbkoff(breloc->band, lbkoff); *addr = ftl_band_addr_from_lbkoff(breloc->band, lbkoff);
for (lbk_cnt = 1; lbk_cnt < num_lbk; lbk_cnt++) { for (lbk_cnt = 1; lbk_cnt < num_lbk; lbk_cnt++) {
if (!ftl_reloc_iter_next(breloc, &lbkoff)) { if (!ftl_reloc_iter_next(breloc, &lbkoff)) {
@ -402,13 +402,13 @@ ftl_reloc_find_valid_lbks(struct ftl_band_reloc *breloc,
} }
static size_t static size_t
ftl_reloc_next_lbks(struct ftl_band_reloc *breloc, struct ftl_ppa *ppa) ftl_reloc_next_lbks(struct ftl_band_reloc *breloc, struct ftl_addr *addr)
{ {
size_t i, lbk_cnt = 0; size_t i, lbk_cnt = 0;
struct spdk_ftl_dev *dev = breloc->parent->dev; struct spdk_ftl_dev *dev = breloc->parent->dev;
for (i = 0; i < ftl_dev_num_punits(dev); ++i) { for (i = 0; i < ftl_dev_num_punits(dev); ++i) {
lbk_cnt = ftl_reloc_find_valid_lbks(breloc, breloc->parent->xfer_size, ppa); lbk_cnt = ftl_reloc_find_valid_lbks(breloc, breloc->parent->xfer_size, addr);
ftl_reloc_iter_next_zone(breloc); ftl_reloc_iter_next_zone(breloc);
if (lbk_cnt || ftl_reloc_iter_done(breloc)) { if (lbk_cnt || ftl_reloc_iter_done(breloc)) {
@ -424,13 +424,13 @@ ftl_reloc_io_init(struct ftl_band_reloc *breloc, struct ftl_reloc_move *move,
ftl_io_fn fn, enum ftl_io_type io_type, int flags) ftl_io_fn fn, enum ftl_io_type io_type, int flags)
{ {
size_t lbkoff, i; size_t lbkoff, i;
struct ftl_ppa ppa = move->ppa; struct ftl_addr addr = move->addr;
struct ftl_io *io = NULL; struct ftl_io *io = NULL;
struct ftl_io_init_opts opts = { struct ftl_io_init_opts opts = {
.dev = breloc->parent->dev, .dev = breloc->parent->dev,
.band = breloc->band, .band = breloc->band,
.size = sizeof(*io), .size = sizeof(*io),
.flags = flags | FTL_IO_INTERNAL | FTL_IO_PPA_MODE, .flags = flags | FTL_IO_INTERNAL | FTL_IO_PHYSICAL_MODE,
.type = io_type, .type = io_type,
.lbk_cnt = move->lbk_cnt, .lbk_cnt = move->lbk_cnt,
.data = move->data, .data = move->data,
@ -443,11 +443,11 @@ ftl_reloc_io_init(struct ftl_band_reloc *breloc, struct ftl_reloc_move *move,
} }
io->cb_ctx = move; io->cb_ctx = move;
io->ppa = move->ppa; io->addr = move->addr;
if (flags & FTL_IO_VECTOR_LBA) { if (flags & FTL_IO_VECTOR_LBA) {
for (i = 0; i < io->lbk_cnt; ++i, ++ppa.offset) { for (i = 0; i < io->lbk_cnt; ++i, ++addr.offset) {
lbkoff = ftl_band_lbkoff_from_ppa(breloc->band, ppa); lbkoff = ftl_band_lbkoff_from_addr(breloc->band, addr);
if (!ftl_band_lbkoff_valid(breloc->band, lbkoff)) { if (!ftl_band_lbkoff_valid(breloc->band, lbkoff)) {
io->lba.vector[i] = FTL_LBA_INVALID; io->lba.vector[i] = FTL_LBA_INVALID;
@ -485,11 +485,11 @@ ftl_reloc_write(struct ftl_band_reloc *breloc, struct ftl_reloc_move *move)
static int static int
ftl_reloc_read(struct ftl_band_reloc *breloc, struct ftl_reloc_move *move) ftl_reloc_read(struct ftl_band_reloc *breloc, struct ftl_reloc_move *move)
{ {
struct ftl_ppa ppa = {}; struct ftl_addr addr = {};
move->lbk_cnt = ftl_reloc_next_lbks(breloc, &ppa); move->lbk_cnt = ftl_reloc_next_lbks(breloc, &addr);
move->breloc = breloc; move->breloc = breloc;
move->ppa = ppa; move->addr = addr;
if (!move->lbk_cnt) { if (!move->lbk_cnt) {
return 0; return 0;

View File

@ -383,7 +383,7 @@ static int
ftl_restore_l2p(struct ftl_band *band) ftl_restore_l2p(struct ftl_band *band)
{ {
struct spdk_ftl_dev *dev = band->dev; struct spdk_ftl_dev *dev = band->dev;
struct ftl_ppa ppa; struct ftl_addr addr;
uint64_t lba; uint64_t lba;
size_t i; size_t i;
@ -397,15 +397,15 @@ ftl_restore_l2p(struct ftl_band *band)
return -1; return -1;
} }
ppa = ftl_l2p_get(dev, lba); addr = ftl_l2p_get(dev, lba);
if (!ftl_ppa_invalid(ppa)) { if (!ftl_addr_invalid(addr)) {
ftl_invalidate_addr(dev, ppa); ftl_invalidate_addr(dev, addr);
} }
ppa = ftl_band_ppa_from_lbkoff(band, i); addr = ftl_band_addr_from_lbkoff(band, i);
ftl_band_set_addr(band, lba, ppa); ftl_band_set_addr(band, lba, addr);
ftl_l2p_set(dev, lba, ppa); ftl_l2p_set(dev, lba, addr);
} }
return 0; return 0;
@ -1115,11 +1115,11 @@ ftl_pad_zone_pad_finish(struct ftl_restore_band *rband, bool direct_access)
static struct ftl_io * static struct ftl_io *
ftl_restore_init_pad_io(struct ftl_restore_band *rband, void *buffer, ftl_restore_init_pad_io(struct ftl_restore_band *rband, void *buffer,
struct ftl_ppa ppa) struct ftl_addr addr)
{ {
struct ftl_band *band = rband->band; struct ftl_band *band = rband->band;
struct spdk_ftl_dev *dev = band->dev; struct spdk_ftl_dev *dev = band->dev;
int flags = FTL_IO_PAD | FTL_IO_INTERNAL | FTL_IO_PPA_MODE | FTL_IO_MD | int flags = FTL_IO_PAD | FTL_IO_INTERNAL | FTL_IO_PHYSICAL_MODE | FTL_IO_MD |
FTL_IO_DIRECT_ACCESS; FTL_IO_DIRECT_ACCESS;
struct ftl_io_init_opts opts = { struct ftl_io_init_opts opts = {
.dev = dev, .dev = dev,
@ -1142,7 +1142,7 @@ ftl_restore_init_pad_io(struct ftl_restore_band *rband, void *buffer,
return NULL; return NULL;
} }
io->ppa = ppa; io->addr = addr;
rband->parent->num_ios++; rband->parent->num_ios++;
return io; return io;
@ -1164,13 +1164,13 @@ ftl_pad_zone_cb(struct ftl_io *io, void *arg, int status)
goto end; goto end;
} }
if (io->ppa.offset + io->lbk_cnt == band->dev->geo.clba) { if (io->addr.offset + io->lbk_cnt == band->dev->geo.clba) {
zone = ftl_band_zone_from_ppa(band, io->ppa); zone = ftl_band_zone_from_addr(band, io->addr);
zone->state = SPDK_BDEV_ZONE_STATE_CLOSED; zone->state = SPDK_BDEV_ZONE_STATE_CLOSED;
} else { } else {
struct ftl_ppa ppa = io->ppa; struct ftl_addr addr = io->addr;
ppa.offset += io->lbk_cnt; addr.offset += io->lbk_cnt;
new_io = ftl_restore_init_pad_io(rband, io->iov[0].iov_base, ppa); new_io = ftl_restore_init_pad_io(rband, io->iov[0].iov_base, addr);
if (spdk_unlikely(!new_io)) { if (spdk_unlikely(!new_io)) {
restore->pad_status = -ENOMEM; restore->pad_status = -ENOMEM;
goto end; goto end;
@ -1194,7 +1194,7 @@ ftl_restore_pad_band(struct ftl_restore_band *rband)
struct spdk_ftl_dev *dev = band->dev; struct spdk_ftl_dev *dev = band->dev;
void *buffer = NULL; void *buffer = NULL;
struct ftl_io *io; struct ftl_io *io;
struct ftl_ppa ppa; struct ftl_addr addr;
size_t i; size_t i;
int rc = 0; int rc = 0;
@ -1219,12 +1219,12 @@ ftl_restore_pad_band(struct ftl_restore_band *rband)
continue; continue;
} }
rc = ftl_retrieve_chunk_info(dev, band->zone_buf[i].start_ppa, &info, 1); rc = ftl_retrieve_chunk_info(dev, band->zone_buf[i].start_addr, &info, 1);
if (spdk_unlikely(rc)) { if (spdk_unlikely(rc)) {
goto error; goto error;
} }
ppa = band->zone_buf[i].start_ppa; addr = band->zone_buf[i].start_addr;
ppa.offset = info.wp; addr.offset = info.wp;
buffer = spdk_dma_zmalloc(FTL_BLOCK_SIZE * dev->xfer_size, 0, NULL); buffer = spdk_dma_zmalloc(FTL_BLOCK_SIZE * dev->xfer_size, 0, NULL);
if (spdk_unlikely(!buffer)) { if (spdk_unlikely(!buffer)) {
@ -1232,7 +1232,7 @@ ftl_restore_pad_band(struct ftl_restore_band *rband)
goto error; goto error;
} }
io = ftl_restore_init_pad_io(rband, buffer, ppa); io = ftl_restore_init_pad_io(rband, buffer, addr);
if (spdk_unlikely(!io)) { if (spdk_unlikely(!io)) {
rc = -ENOMEM; rc = -ENOMEM;
spdk_dma_free(buffer); spdk_dma_free(buffer);
@ -1312,7 +1312,7 @@ ftl_restore_tail_md(struct ftl_restore_band *rband)
return -ENOMEM; return -ENOMEM;
} }
if (ftl_band_read_tail_md(band, band->tail_md_ppa, ftl_restore_tail_md_cb, rband)) { if (ftl_band_read_tail_md(band, band->tail_md_addr, ftl_restore_tail_md_cb, rband)) {
SPDK_ERRLOG("Failed to send tail metadata read\n"); SPDK_ERRLOG("Failed to send tail metadata read\n");
ftl_restore_complete(restore, -EIO); ftl_restore_complete(restore, -EIO);
return -EIO; return -EIO;

View File

@ -38,7 +38,7 @@
#include "spdk/queue.h" #include "spdk/queue.h"
#include "ftl_io.h" #include "ftl_io.h"
#include "ftl_ppa.h" #include "ftl_addr.h"
#include "ftl_trace.h" #include "ftl_trace.h"
struct ftl_rwb; struct ftl_rwb;
@ -64,7 +64,7 @@ struct ftl_rwb_entry {
uint64_t lba; uint64_t lba;
/* Physical address */ /* Physical address */
struct ftl_ppa ppa; struct ftl_addr addr;
/* Band the data is moved from (only valid when relocating data) */ /* Band the data is moved from (only valid when relocating data) */
struct ftl_band *band; struct ftl_band *band;
@ -84,7 +84,7 @@ struct ftl_rwb_entry {
/* Flags */ /* Flags */
unsigned int flags; unsigned int flags;
/* Indicates whether the entry is part of cache and is assigned a PPA */ /* Indicates whether the entry is part of cache and is assigned a physical address */
bool valid; bool valid;
/* Trace group id */ /* Trace group id */

View File

@ -101,20 +101,20 @@ SPDK_TRACE_REGISTER_FN(ftl_trace_func, "ftl", TRACE_GROUP_FTL)
snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "md_read_sched"); snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "md_read_sched");
spdk_trace_register_description(descbuf, FTL_TRACE_MD_READ_SCHEDULE(i), spdk_trace_register_description(descbuf, FTL_TRACE_MD_READ_SCHEDULE(i),
OWNER_FTL, OBJECT_NONE, 0, 0, "ppa: "); OWNER_FTL, OBJECT_NONE, 0, 0, "addr: ");
snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "md_read_submit"); snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "md_read_submit");
spdk_trace_register_description(descbuf, FTL_TRACE_MD_READ_SUBMISSION(i), spdk_trace_register_description(descbuf, FTL_TRACE_MD_READ_SUBMISSION(i),
OWNER_FTL, OBJECT_NONE, 0, 0, "ppa: "); OWNER_FTL, OBJECT_NONE, 0, 0, "addr: ");
snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "md_read_cmpl"); snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "md_read_cmpl");
spdk_trace_register_description(descbuf, FTL_TRACE_MD_READ_COMPLETION(i), spdk_trace_register_description(descbuf, FTL_TRACE_MD_READ_COMPLETION(i),
OWNER_FTL, OBJECT_NONE, 0, 0, "lba: "); OWNER_FTL, OBJECT_NONE, 0, 0, "lba: ");
snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "md_write_sched"); snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "md_write_sched");
spdk_trace_register_description(descbuf, FTL_TRACE_MD_WRITE_SCHEDULE(i), spdk_trace_register_description(descbuf, FTL_TRACE_MD_WRITE_SCHEDULE(i),
OWNER_FTL, OBJECT_NONE, 0, 0, "ppa: "); OWNER_FTL, OBJECT_NONE, 0, 0, "addr: ");
snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "md_write_submit"); snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "md_write_submit");
spdk_trace_register_description(descbuf, FTL_TRACE_MD_WRITE_SUBMISSION(i), spdk_trace_register_description(descbuf, FTL_TRACE_MD_WRITE_SUBMISSION(i),
OWNER_FTL, OBJECT_NONE, 0, 0, "ppa: "); OWNER_FTL, OBJECT_NONE, 0, 0, "addr: ");
snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "md_write_cmpl"); snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "md_write_cmpl");
spdk_trace_register_description(descbuf, FTL_TRACE_MD_WRITE_COMPLETION(i), spdk_trace_register_description(descbuf, FTL_TRACE_MD_WRITE_COMPLETION(i),
OWNER_FTL, OBJECT_NONE, 0, 0, "lba: "); OWNER_FTL, OBJECT_NONE, 0, 0, "lba: ");
@ -124,7 +124,7 @@ SPDK_TRACE_REGISTER_FN(ftl_trace_func, "ftl", TRACE_GROUP_FTL)
OWNER_FTL, OBJECT_NONE, 0, 0, "lba: "); OWNER_FTL, OBJECT_NONE, 0, 0, "lba: ");
snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "read_submit"); snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "read_submit");
spdk_trace_register_description(descbuf, FTL_TRACE_READ_SUBMISSION(i), spdk_trace_register_description(descbuf, FTL_TRACE_READ_SUBMISSION(i),
OWNER_FTL, OBJECT_NONE, 0, 0, "ppa: "); OWNER_FTL, OBJECT_NONE, 0, 0, "addr: ");
snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "read_cmpl_invld"); snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "read_cmpl_invld");
spdk_trace_register_description(descbuf, FTL_TRACE_READ_COMPLETION_INVALID(i), spdk_trace_register_description(descbuf, FTL_TRACE_READ_COMPLETION_INVALID(i),
OWNER_FTL, OBJECT_NONE, 0, 0, "lba: "); OWNER_FTL, OBJECT_NONE, 0, 0, "lba: ");
@ -143,17 +143,17 @@ SPDK_TRACE_REGISTER_FN(ftl_trace_func, "ftl", TRACE_GROUP_FTL)
OWNER_FTL, OBJECT_NONE, 0, 0, "lba: "); OWNER_FTL, OBJECT_NONE, 0, 0, "lba: ");
snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "write_submit"); snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "write_submit");
spdk_trace_register_description(descbuf, FTL_TRACE_WRITE_SUBMISSION(i), spdk_trace_register_description(descbuf, FTL_TRACE_WRITE_SUBMISSION(i),
OWNER_FTL, OBJECT_NONE, 0, 0, "ppa: "); OWNER_FTL, OBJECT_NONE, 0, 0, "addr: ");
snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "write_cmpl"); snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "write_cmpl");
spdk_trace_register_description(descbuf, FTL_TRACE_WRITE_COMPLETION(i), spdk_trace_register_description(descbuf, FTL_TRACE_WRITE_COMPLETION(i),
OWNER_FTL, OBJECT_NONE, 0, 0, "lba: "); OWNER_FTL, OBJECT_NONE, 0, 0, "lba: ");
snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "erase_submit"); snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "erase_submit");
spdk_trace_register_description(descbuf, FTL_TRACE_ERASE_SUBMISSION(i), spdk_trace_register_description(descbuf, FTL_TRACE_ERASE_SUBMISSION(i),
OWNER_FTL, OBJECT_NONE, 0, 0, "ppa: "); OWNER_FTL, OBJECT_NONE, 0, 0, "addr: ");
snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "erase_cmpl"); snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "erase_cmpl");
spdk_trace_register_description(descbuf, FTL_TRACE_ERASE_COMPLETION(i), spdk_trace_register_description(descbuf, FTL_TRACE_ERASE_COMPLETION(i),
OWNER_FTL, OBJECT_NONE, 0, 0, "ppa: "); OWNER_FTL, OBJECT_NONE, 0, 0, "addr: ");
} }
} }
@ -249,7 +249,7 @@ ftl_trace_rwb_pop(struct spdk_ftl_dev *dev, const struct ftl_rwb_entry *entry)
tpoint_id = FTL_TRACE_RWB_POP(FTL_TRACE_SOURCE_USER); tpoint_id = FTL_TRACE_RWB_POP(FTL_TRACE_SOURCE_USER);
} }
spdk_trace_record(tpoint_id, entry->trace, 0, entry->ppa.addr, entry->lba); spdk_trace_record(tpoint_id, entry->trace, 0, entry->addr.addr, entry->lba);
} }
void void
@ -302,8 +302,8 @@ ftl_trace_completion(struct spdk_ftl_dev *dev, const struct ftl_io *io,
} }
void void
ftl_trace_submission(struct spdk_ftl_dev *dev, const struct ftl_io *io, struct ftl_ppa ppa, ftl_trace_submission(struct spdk_ftl_dev *dev, const struct ftl_io *io, struct ftl_addr addr,
size_t ppa_cnt) size_t addr_cnt)
{ {
uint16_t tpoint_id = 0, source; uint16_t tpoint_id = 0, source;
@ -337,7 +337,7 @@ ftl_trace_submission(struct spdk_ftl_dev *dev, const struct ftl_io *io, struct f
} }
} }
spdk_trace_record(tpoint_id, io->trace, ppa_cnt, 0, ppa.addr); spdk_trace_record(tpoint_id, io->trace, addr_cnt, 0, addr.addr);
} }
void void

View File

@ -34,7 +34,7 @@
#ifndef FTL_TRACE_H #ifndef FTL_TRACE_H
#define FTL_TRACE_H #define FTL_TRACE_H
#include "ftl_ppa.h" #include "ftl_addr.h"
#define FTL_TRACE_INVALID_ID ((uint64_t) -1) #define FTL_TRACE_INVALID_ID ((uint64_t) -1)
@ -63,7 +63,7 @@ void ftl_trace_rwb_fill(struct spdk_ftl_dev *dev, const struct ftl_io *io);
void ftl_trace_rwb_pop(struct spdk_ftl_dev *dev, const struct ftl_rwb_entry *entry); void ftl_trace_rwb_pop(struct spdk_ftl_dev *dev, const struct ftl_rwb_entry *entry);
void ftl_trace_submission(struct spdk_ftl_dev *dev, void ftl_trace_submission(struct spdk_ftl_dev *dev,
const struct ftl_io *io, const struct ftl_io *io,
struct ftl_ppa ppa, size_t ppa_cnt); struct ftl_addr addr, size_t addr_cnt);
void ftl_trace_completion(struct spdk_ftl_dev *dev, void ftl_trace_completion(struct spdk_ftl_dev *dev,
const struct ftl_io *io, const struct ftl_io *io,
enum ftl_trace_completion type); enum ftl_trace_completion type);

View File

@ -41,7 +41,7 @@ struct spdk_ftl_dev *test_init_ftl_dev(const struct spdk_ocssd_geometry_data *ge
struct ftl_band *test_init_ftl_band(struct spdk_ftl_dev *dev, size_t id); struct ftl_band *test_init_ftl_band(struct spdk_ftl_dev *dev, size_t id);
void test_free_ftl_dev(struct spdk_ftl_dev *dev); void test_free_ftl_dev(struct spdk_ftl_dev *dev);
void test_free_ftl_band(struct ftl_band *band); void test_free_ftl_band(struct ftl_band *band);
uint64_t test_offset_from_ppa(struct ftl_ppa ppa, struct ftl_band *band); uint64_t test_offset_from_addr(struct ftl_addr addr, struct ftl_band *band);
struct spdk_ftl_dev * struct spdk_ftl_dev *
test_init_ftl_dev(const struct spdk_ocssd_geometry_data *geo, test_init_ftl_dev(const struct spdk_ocssd_geometry_data *geo,
@ -73,7 +73,7 @@ test_init_ftl_dev(const struct spdk_ocssd_geometry_data *geo,
for (size_t i = 0; i < ftl_dev_num_punits(dev); ++i) { for (size_t i = 0; i < ftl_dev_num_punits(dev); ++i) {
punit = range->begin + i; punit = range->begin + i;
dev->punits[i].dev = dev; dev->punits[i].dev = dev;
dev->punits[i].start_ppa.pu = punit; dev->punits[i].start_addr.pu = punit;
} }
LIST_INIT(&dev->free_bands); LIST_INIT(&dev->free_bands);
@ -113,8 +113,8 @@ test_init_ftl_band(struct spdk_ftl_dev *dev, size_t id)
zone->pos = i; zone->pos = i;
zone->state = SPDK_BDEV_ZONE_STATE_CLOSED; zone->state = SPDK_BDEV_ZONE_STATE_CLOSED;
zone->punit = &dev->punits[i]; zone->punit = &dev->punits[i];
zone->start_ppa = dev->punits[i].start_ppa; zone->start_addr = dev->punits[i].start_addr;
zone->start_ppa.zone_id = band->id; zone->start_addr.zone_id = band->id;
CIRCLEQ_INSERT_TAIL(&band->zones, zone, circleq); CIRCLEQ_INSERT_TAIL(&band->zones, zone, circleq);
band->num_zones++; band->num_zones++;
} }
@ -147,14 +147,14 @@ test_free_ftl_band(struct ftl_band *band)
} }
uint64_t uint64_t
test_offset_from_ppa(struct ftl_ppa ppa, struct ftl_band *band) test_offset_from_addr(struct ftl_addr addr, struct ftl_band *band)
{ {
struct spdk_ftl_dev *dev = band->dev; struct spdk_ftl_dev *dev = band->dev;
unsigned int punit; unsigned int punit;
/* TODO: ftl_ppa_flatten_punit should return uint32_t */ /* TODO: ftl_addr_flatten_punit should return uint32_t */
punit = ftl_ppa_flatten_punit(dev, ppa); punit = ftl_addr_flatten_punit(dev, addr);
CU_ASSERT_EQUAL(ppa.zone_id, band->id); CU_ASSERT_EQUAL(addr.zone_id, band->id);
return punit * ftl_dev_lbks_in_zone(dev) + ppa.offset; return punit * ftl_dev_lbks_in_zone(dev) + addr.offset;
} }

View File

@ -78,27 +78,27 @@ cleanup_band(void)
test_free_ftl_dev(g_dev); test_free_ftl_dev(g_dev);
} }
static struct ftl_ppa static struct ftl_addr
ppa_from_punit(uint64_t punit) addr_from_punit(uint64_t punit)
{ {
struct ftl_ppa ppa = {}; struct ftl_addr addr = {};
ppa.pu = punit; addr.pu = punit;
return ppa; return addr;
} }
static void static void
test_band_lbkoff_from_ppa_base(void) test_band_lbkoff_from_addr_base(void)
{ {
struct ftl_ppa ppa; struct ftl_addr addr;
uint64_t offset, i, flat_lun = 0; uint64_t offset, i, flat_lun = 0;
setup_band(); setup_band();
for (i = g_range.begin; i < g_range.end; ++i) { for (i = g_range.begin; i < g_range.end; ++i) {
ppa = ppa_from_punit(i); addr = addr_from_punit(i);
ppa.zone_id = TEST_BAND_IDX; addr.zone_id = TEST_BAND_IDX;
offset = ftl_band_lbkoff_from_ppa(g_band, ppa); offset = ftl_band_lbkoff_from_addr(g_band, addr);
CU_ASSERT_EQUAL(offset, flat_lun * ftl_dev_lbks_in_zone(g_dev)); CU_ASSERT_EQUAL(offset, flat_lun * ftl_dev_lbks_in_zone(g_dev));
flat_lun++; flat_lun++;
} }
@ -106,21 +106,21 @@ test_band_lbkoff_from_ppa_base(void)
} }
static void static void
test_band_lbkoff_from_ppa_lbk(void) test_band_lbkoff_from_addr_offset(void)
{ {
struct ftl_ppa ppa; struct ftl_addr addr;
uint64_t offset, expect, i, j; uint64_t offset, expect, i, j;
setup_band(); setup_band();
for (i = g_range.begin; i < g_range.end; ++i) { for (i = g_range.begin; i < g_range.end; ++i) {
for (j = 0; j < g_geo.clba; ++j) { for (j = 0; j < g_geo.clba; ++j) {
ppa = ppa_from_punit(i); addr = addr_from_punit(i);
ppa.zone_id = TEST_BAND_IDX; addr.zone_id = TEST_BAND_IDX;
ppa.offset = j; addr.offset = j;
offset = ftl_band_lbkoff_from_ppa(g_band, ppa); offset = ftl_band_lbkoff_from_addr(g_band, addr);
expect = test_offset_from_ppa(ppa, g_band); expect = test_offset_from_addr(addr, g_band);
CU_ASSERT_EQUAL(offset, expect); CU_ASSERT_EQUAL(offset, expect);
} }
} }
@ -128,22 +128,22 @@ test_band_lbkoff_from_ppa_lbk(void)
} }
static void static void
test_band_ppa_from_lbkoff(void) test_band_addr_from_lbkoff(void)
{ {
struct ftl_ppa ppa, expect; struct ftl_addr addr, expect;
uint64_t offset, i, j; uint64_t offset, i, j;
setup_band(); setup_band();
for (i = g_range.begin; i < g_range.end; ++i) { for (i = g_range.begin; i < g_range.end; ++i) {
for (j = 0; j < g_geo.clba; ++j) { for (j = 0; j < g_geo.clba; ++j) {
expect = ppa_from_punit(i); expect = addr_from_punit(i);
expect.zone_id = TEST_BAND_IDX; expect.zone_id = TEST_BAND_IDX;
expect.offset = j; expect.offset = j;
offset = ftl_band_lbkoff_from_ppa(g_band, expect); offset = ftl_band_lbkoff_from_addr(g_band, expect);
ppa = ftl_band_ppa_from_lbkoff(g_band, offset); addr = ftl_band_addr_from_lbkoff(g_band, offset);
CU_ASSERT_EQUAL(ppa.addr, expect.addr); CU_ASSERT_EQUAL(addr.addr, expect.addr);
} }
} }
cleanup_band(); cleanup_band();
@ -153,31 +153,31 @@ static void
test_band_set_addr(void) test_band_set_addr(void)
{ {
struct ftl_lba_map *lba_map; struct ftl_lba_map *lba_map;
struct ftl_ppa ppa; struct ftl_addr addr;
uint64_t offset = 0; uint64_t offset = 0;
setup_band(); setup_band();
lba_map = &g_band->lba_map; lba_map = &g_band->lba_map;
ppa = ppa_from_punit(g_range.begin); addr = addr_from_punit(g_range.begin);
ppa.zone_id = TEST_BAND_IDX; addr.zone_id = TEST_BAND_IDX;
CU_ASSERT_EQUAL(lba_map->num_vld, 0); CU_ASSERT_EQUAL(lba_map->num_vld, 0);
offset = test_offset_from_ppa(ppa, g_band); offset = test_offset_from_addr(addr, g_band);
ftl_band_set_addr(g_band, TEST_LBA, ppa); ftl_band_set_addr(g_band, TEST_LBA, addr);
CU_ASSERT_EQUAL(lba_map->num_vld, 1); CU_ASSERT_EQUAL(lba_map->num_vld, 1);
CU_ASSERT_EQUAL(lba_map->map[offset], TEST_LBA); CU_ASSERT_EQUAL(lba_map->map[offset], TEST_LBA);
CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset)); CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset));
ppa.pu++; addr.pu++;
offset = test_offset_from_ppa(ppa, g_band); offset = test_offset_from_addr(addr, g_band);
ftl_band_set_addr(g_band, TEST_LBA + 1, ppa); ftl_band_set_addr(g_band, TEST_LBA + 1, addr);
CU_ASSERT_EQUAL(lba_map->num_vld, 2); CU_ASSERT_EQUAL(lba_map->num_vld, 2);
CU_ASSERT_EQUAL(lba_map->map[offset], TEST_LBA + 1); CU_ASSERT_EQUAL(lba_map->map[offset], TEST_LBA + 1);
CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset)); CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset));
ppa.pu--; addr.pu--;
offset = test_offset_from_ppa(ppa, g_band); offset = test_offset_from_addr(addr, g_band);
CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset)); CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset));
cleanup_band(); cleanup_band();
} }
@ -186,31 +186,31 @@ static void
test_invalidate_addr(void) test_invalidate_addr(void)
{ {
struct ftl_lba_map *lba_map; struct ftl_lba_map *lba_map;
struct ftl_ppa ppa; struct ftl_addr addr;
uint64_t offset[2]; uint64_t offset[2];
setup_band(); setup_band();
lba_map = &g_band->lba_map; lba_map = &g_band->lba_map;
ppa = ppa_from_punit(g_range.begin); addr = addr_from_punit(g_range.begin);
ppa.zone_id = TEST_BAND_IDX; addr.zone_id = TEST_BAND_IDX;
offset[0] = test_offset_from_ppa(ppa, g_band); offset[0] = test_offset_from_addr(addr, g_band);
ftl_band_set_addr(g_band, TEST_LBA, ppa); ftl_band_set_addr(g_band, TEST_LBA, addr);
CU_ASSERT_EQUAL(lba_map->num_vld, 1); CU_ASSERT_EQUAL(lba_map->num_vld, 1);
CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset[0])); CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset[0]));
ftl_invalidate_addr(g_band->dev, ppa); ftl_invalidate_addr(g_band->dev, addr);
CU_ASSERT_EQUAL(lba_map->num_vld, 0); CU_ASSERT_EQUAL(lba_map->num_vld, 0);
CU_ASSERT_FALSE(spdk_bit_array_get(lba_map->vld, offset[0])); CU_ASSERT_FALSE(spdk_bit_array_get(lba_map->vld, offset[0]));
offset[0] = test_offset_from_ppa(ppa, g_band); offset[0] = test_offset_from_addr(addr, g_band);
ftl_band_set_addr(g_band, TEST_LBA, ppa); ftl_band_set_addr(g_band, TEST_LBA, addr);
ppa.pu++; addr.pu++;
offset[1] = test_offset_from_ppa(ppa, g_band); offset[1] = test_offset_from_addr(addr, g_band);
ftl_band_set_addr(g_band, TEST_LBA + 1, ppa); ftl_band_set_addr(g_band, TEST_LBA + 1, addr);
CU_ASSERT_EQUAL(lba_map->num_vld, 2); CU_ASSERT_EQUAL(lba_map->num_vld, 2);
CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset[0])); CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset[0]));
CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset[1])); CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset[1]));
ftl_invalidate_addr(g_band->dev, ppa); ftl_invalidate_addr(g_band->dev, addr);
CU_ASSERT_EQUAL(lba_map->num_vld, 1); CU_ASSERT_EQUAL(lba_map->num_vld, 1);
CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset[0])); CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset[0]));
CU_ASSERT_FALSE(spdk_bit_array_get(lba_map->vld, offset[1])); CU_ASSERT_FALSE(spdk_bit_array_get(lba_map->vld, offset[1]));
@ -218,75 +218,75 @@ test_invalidate_addr(void)
} }
static void static void
test_next_xfer_ppa(void) test_next_xfer_addr(void)
{ {
struct ftl_ppa ppa, result, expect; struct ftl_addr addr, result, expect;
setup_band(); setup_band();
/* Verify simple one lbk incremention */ /* Verify simple one lbk incremention */
ppa = ppa_from_punit(g_range.begin); addr = addr_from_punit(g_range.begin);
ppa.zone_id = TEST_BAND_IDX; addr.zone_id = TEST_BAND_IDX;
ppa.offset = 0; addr.offset = 0;
expect = ppa; expect = addr;
expect.offset = 1; expect.offset = 1;
result = ftl_band_next_xfer_ppa(g_band, ppa, 1); result = ftl_band_next_xfer_addr(g_band, addr, 1);
CU_ASSERT_EQUAL(result.addr, expect.addr); CU_ASSERT_EQUAL(result.addr, expect.addr);
/* Verify jumping between zones */ /* Verify jumping between zones */
expect = ppa_from_punit(g_range.begin + 1); expect = addr_from_punit(g_range.begin + 1);
expect.zone_id = TEST_BAND_IDX; expect.zone_id = TEST_BAND_IDX;
result = ftl_band_next_xfer_ppa(g_band, ppa, g_dev->xfer_size); result = ftl_band_next_xfer_addr(g_band, addr, g_dev->xfer_size);
CU_ASSERT_EQUAL(result.addr, expect.addr); CU_ASSERT_EQUAL(result.addr, expect.addr);
/* Verify jumping works with unaligned offsets */ /* Verify jumping works with unaligned offsets */
expect = ppa_from_punit(g_range.begin + 1); expect = addr_from_punit(g_range.begin + 1);
expect.zone_id = TEST_BAND_IDX; expect.zone_id = TEST_BAND_IDX;
expect.offset = 3; expect.offset = 3;
result = ftl_band_next_xfer_ppa(g_band, ppa, g_dev->xfer_size + 3); result = ftl_band_next_xfer_addr(g_band, addr, g_dev->xfer_size + 3);
CU_ASSERT_EQUAL(result.addr, expect.addr); CU_ASSERT_EQUAL(result.addr, expect.addr);
/* Verify jumping from last zone to the first one */ /* Verify jumping from last zone to the first one */
expect = ppa_from_punit(g_range.begin); expect = addr_from_punit(g_range.begin);
expect.zone_id = TEST_BAND_IDX; expect.zone_id = TEST_BAND_IDX;
expect.offset = g_dev->xfer_size; expect.offset = g_dev->xfer_size;
ppa = ppa_from_punit(g_range.end); addr = addr_from_punit(g_range.end);
ppa.zone_id = TEST_BAND_IDX; addr.zone_id = TEST_BAND_IDX;
result = ftl_band_next_xfer_ppa(g_band, ppa, g_dev->xfer_size); result = ftl_band_next_xfer_addr(g_band, addr, g_dev->xfer_size);
CU_ASSERT_EQUAL(result.addr, expect.addr); CU_ASSERT_EQUAL(result.addr, expect.addr);
/* Verify jumping from last zone to the first one with unaligned offset */ /* Verify jumping from last zone to the first one with unaligned offset */
expect = ppa_from_punit(g_range.begin); expect = addr_from_punit(g_range.begin);
expect.zone_id = TEST_BAND_IDX; expect.zone_id = TEST_BAND_IDX;
expect.offset = g_dev->xfer_size + 2; expect.offset = g_dev->xfer_size + 2;
ppa = ppa_from_punit(g_range.end); addr = addr_from_punit(g_range.end);
ppa.zone_id = TEST_BAND_IDX; addr.zone_id = TEST_BAND_IDX;
result = ftl_band_next_xfer_ppa(g_band, ppa, g_dev->xfer_size + 2); result = ftl_band_next_xfer_addr(g_band, addr, g_dev->xfer_size + 2);
CU_ASSERT_EQUAL(result.addr, expect.addr); CU_ASSERT_EQUAL(result.addr, expect.addr);
/* Verify large offset spanning across the whole band multiple times */ /* Verify large offset spanning across the whole band multiple times */
expect = ppa_from_punit(g_range.begin); expect = addr_from_punit(g_range.begin);
expect.zone_id = TEST_BAND_IDX; expect.zone_id = TEST_BAND_IDX;
expect.offset = g_dev->xfer_size * 5 + 4; expect.offset = g_dev->xfer_size * 5 + 4;
ppa = ppa_from_punit(g_range.begin); addr = addr_from_punit(g_range.begin);
ppa.zone_id = TEST_BAND_IDX; addr.zone_id = TEST_BAND_IDX;
ppa.offset = g_dev->xfer_size * 2 + 1; addr.offset = g_dev->xfer_size * 2 + 1;
result = ftl_band_next_xfer_ppa(g_band, ppa, 3 * g_dev->xfer_size * result = ftl_band_next_xfer_addr(g_band, addr, 3 * g_dev->xfer_size *
ftl_dev_num_punits(g_dev) + 3); ftl_dev_num_punits(g_dev) + 3);
CU_ASSERT_EQUAL(result.addr, expect.addr); CU_ASSERT_EQUAL(result.addr, expect.addr);
/* Remove one zone and verify it's skipped properly */ /* Remove one zone and verify it's skipped properly */
g_band->zone_buf[1].state = SPDK_BDEV_ZONE_STATE_OFFLINE; g_band->zone_buf[1].state = SPDK_BDEV_ZONE_STATE_OFFLINE;
CIRCLEQ_REMOVE(&g_band->zones, &g_band->zone_buf[1], circleq); CIRCLEQ_REMOVE(&g_band->zones, &g_band->zone_buf[1], circleq);
g_band->num_zones--; g_band->num_zones--;
expect = ppa_from_punit(g_range.begin + 2); expect = addr_from_punit(g_range.begin + 2);
expect.zone_id = TEST_BAND_IDX; expect.zone_id = TEST_BAND_IDX;
expect.offset = g_dev->xfer_size * 5 + 4; expect.offset = g_dev->xfer_size * 5 + 4;
ppa = ppa_from_punit(g_range.begin); addr = addr_from_punit(g_range.begin);
ppa.zone_id = TEST_BAND_IDX; addr.zone_id = TEST_BAND_IDX;
ppa.offset = g_dev->xfer_size * 2 + 1; addr.offset = g_dev->xfer_size * 2 + 1;
result = ftl_band_next_xfer_ppa(g_band, ppa, 3 * g_dev->xfer_size * result = ftl_band_next_xfer_addr(g_band, addr, 3 * g_dev->xfer_size *
(ftl_dev_num_punits(g_dev) - 1) + g_dev->xfer_size + 3); (ftl_dev_num_punits(g_dev) - 1) + g_dev->xfer_size + 3);
CU_ASSERT_EQUAL(result.addr, expect.addr); CU_ASSERT_EQUAL(result.addr, expect.addr);
cleanup_band(); cleanup_band();
} }
@ -308,18 +308,18 @@ main(int argc, char **argv)
} }
if ( if (
CU_add_test(suite, "test_band_lbkoff_from_ppa_base", CU_add_test(suite, "test_band_lbkoff_from_addr_base",
test_band_lbkoff_from_ppa_base) == NULL test_band_lbkoff_from_addr_base) == NULL
|| CU_add_test(suite, "test_band_lbkoff_from_ppa_lbk", || CU_add_test(suite, "test_band_lbkoff_from_addr_offset",
test_band_lbkoff_from_ppa_lbk) == NULL test_band_lbkoff_from_addr_offset) == NULL
|| CU_add_test(suite, "test_band_ppa_from_lbkoff", || CU_add_test(suite, "test_band_addr_from_lbkoff",
test_band_ppa_from_lbkoff) == NULL test_band_addr_from_lbkoff) == NULL
|| CU_add_test(suite, "test_band_set_addr", || CU_add_test(suite, "test_band_set_addr",
test_band_set_addr) == NULL test_band_set_addr) == NULL
|| CU_add_test(suite, "test_invalidate_addr", || CU_add_test(suite, "test_invalidate_addr",
test_invalidate_addr) == NULL test_invalidate_addr) == NULL
|| CU_add_test(suite, "test_next_xfer_ppa", || CU_add_test(suite, "test_next_xfer_addr",
test_next_xfer_ppa) == NULL test_next_xfer_addr) == NULL
) { ) {
CU_cleanup_registry(); CU_cleanup_registry();
return CU_get_error(); return CU_get_error();

View File

@ -61,7 +61,7 @@ clean_l2p(void)
{ {
size_t l2p_elem_size; size_t l2p_elem_size;
if (ftl_ppa_packed(g_dev)) { if (ftl_addr_packed(g_dev)) {
l2p_elem_size = sizeof(uint32_t); l2p_elem_size = sizeof(uint32_t);
} else { } else {
l2p_elem_size = sizeof(uint64_t); l2p_elem_size = sizeof(uint64_t);
@ -81,7 +81,7 @@ setup_l2p_32bit(void)
g_dev->ppaf.pu_mask = (1 << 3) - 1; g_dev->ppaf.pu_mask = (1 << 3) - 1;
g_dev->ppaf.grp_offset = g_dev->ppaf.pu_offset + 3; g_dev->ppaf.grp_offset = g_dev->ppaf.pu_offset + 3;
g_dev->ppaf.grp_mask = (1 << 2) - 1; g_dev->ppaf.grp_mask = (1 << 2) - 1;
g_dev->ppa_len = g_dev->ppaf.grp_offset + 2; g_dev->addr_len = g_dev->ppaf.grp_offset + 2;
return 0; return 0;
} }
@ -98,7 +98,7 @@ setup_l2p_64bit(void)
g_dev->ppaf.pu_mask = (1 << 3) - 1; g_dev->ppaf.pu_mask = (1 << 3) - 1;
g_dev->ppaf.grp_offset = g_dev->ppaf.pu_offset + 3; g_dev->ppaf.grp_offset = g_dev->ppaf.pu_offset + 3;
g_dev->ppaf.grp_mask = (1 << 2) - 1; g_dev->ppaf.grp_mask = (1 << 2) - 1;
g_dev->ppa_len = g_dev->ppaf.grp_offset + 2; g_dev->addr_len = g_dev->ppaf.grp_offset + 2;
return 0; return 0;
} }
@ -113,146 +113,146 @@ cleanup(void)
} }
static void static void
test_ppa_pack32(void) test_addr_pack32(void)
{ {
struct ftl_ppa orig = {}, ppa; struct ftl_addr orig = {}, addr;
/* Check valid address transformation */ /* Check valid address transformation */
orig.offset = 4; orig.offset = 4;
orig.zone_id = 3; orig.zone_id = 3;
orig.pu = 2; orig.pu = 2;
ppa = ftl_ppa_to_packed(g_dev, orig); addr = ftl_addr_to_packed(g_dev, orig);
CU_ASSERT_TRUE(ppa.addr <= UINT32_MAX); CU_ASSERT_TRUE(addr.addr <= UINT32_MAX);
CU_ASSERT_FALSE(ppa.pack.cached); CU_ASSERT_FALSE(addr.pack.cached);
ppa = ftl_ppa_from_packed(g_dev, ppa); addr = ftl_addr_from_packed(g_dev, addr);
CU_ASSERT_FALSE(ftl_ppa_invalid(ppa)); CU_ASSERT_FALSE(ftl_addr_invalid(addr));
CU_ASSERT_EQUAL(ppa.addr, orig.addr); CU_ASSERT_EQUAL(addr.addr, orig.addr);
/* Check invalid address transformation */ /* Check invalid address transformation */
orig = ftl_to_ppa(FTL_PPA_INVALID); orig = ftl_to_addr(FTL_ADDR_INVALID);
ppa = ftl_ppa_to_packed(g_dev, orig); addr = ftl_addr_to_packed(g_dev, orig);
CU_ASSERT_TRUE(ppa.addr <= UINT32_MAX); CU_ASSERT_TRUE(addr.addr <= UINT32_MAX);
ppa = ftl_ppa_from_packed(g_dev, ppa); addr = ftl_addr_from_packed(g_dev, addr);
CU_ASSERT_TRUE(ftl_ppa_invalid(ppa)); CU_ASSERT_TRUE(ftl_addr_invalid(addr));
/* Check cached entry offset transformation */ /* Check cached entry offset transformation */
orig.cached = 1; orig.cached = 1;
orig.cache_offset = 1024; orig.cache_offset = 1024;
ppa = ftl_ppa_to_packed(g_dev, orig); addr = ftl_addr_to_packed(g_dev, orig);
CU_ASSERT_TRUE(ppa.addr <= UINT32_MAX); CU_ASSERT_TRUE(addr.addr <= UINT32_MAX);
CU_ASSERT_TRUE(ppa.pack.cached); CU_ASSERT_TRUE(addr.pack.cached);
ppa = ftl_ppa_from_packed(g_dev, ppa); addr = ftl_addr_from_packed(g_dev, addr);
CU_ASSERT_FALSE(ftl_ppa_invalid(ppa)); CU_ASSERT_FALSE(ftl_addr_invalid(addr));
CU_ASSERT_TRUE(ftl_ppa_cached(ppa)); CU_ASSERT_TRUE(ftl_addr_cached(addr));
CU_ASSERT_EQUAL(ppa.addr, orig.addr); CU_ASSERT_EQUAL(addr.addr, orig.addr);
clean_l2p(); clean_l2p();
} }
static void static void
test_ppa_pack64(void) test_addr_pack64(void)
{ {
struct ftl_ppa orig = {}, ppa; struct ftl_addr orig = {}, addr;
orig.offset = 4; orig.offset = 4;
orig.zone_id = 3; orig.zone_id = 3;
orig.pu = 2; orig.pu = 2;
/* Check valid address transformation */ /* Check valid address transformation */
ppa.addr = ftl_ppa_addr_pack(g_dev, orig); addr.addr = ftl_addr_addr_pack(g_dev, orig);
ppa = ftl_ppa_addr_unpack(g_dev, ppa.addr); addr = ftl_addr_addr_unpack(g_dev, addr.addr);
CU_ASSERT_FALSE(ftl_ppa_invalid(ppa)); CU_ASSERT_FALSE(ftl_addr_invalid(addr));
CU_ASSERT_EQUAL(ppa.addr, orig.addr); CU_ASSERT_EQUAL(addr.addr, orig.addr);
orig.offset = 0x7ea0be0f; orig.offset = 0x7ea0be0f;
orig.zone_id = 0x6; orig.zone_id = 0x6;
orig.pu = 0x4; orig.pu = 0x4;
ppa.addr = ftl_ppa_addr_pack(g_dev, orig); addr.addr = ftl_addr_addr_pack(g_dev, orig);
ppa = ftl_ppa_addr_unpack(g_dev, ppa.addr); addr = ftl_addr_addr_unpack(g_dev, addr.addr);
CU_ASSERT_FALSE(ftl_ppa_invalid(ppa)); CU_ASSERT_FALSE(ftl_addr_invalid(addr));
CU_ASSERT_EQUAL(ppa.addr, orig.addr); CU_ASSERT_EQUAL(addr.addr, orig.addr);
/* Check maximum valid address for ppaf */ /* Check maximum valid address for addrf */
orig.offset = 0x7fffffff; orig.offset = 0x7fffffff;
orig.zone_id = 0xf; orig.zone_id = 0xf;
orig.pu = 0x7; orig.pu = 0x7;
ppa.addr = ftl_ppa_addr_pack(g_dev, orig); addr.addr = ftl_addr_addr_pack(g_dev, orig);
ppa = ftl_ppa_addr_unpack(g_dev, ppa.addr); addr = ftl_addr_addr_unpack(g_dev, addr.addr);
CU_ASSERT_FALSE(ftl_ppa_invalid(ppa)); CU_ASSERT_FALSE(ftl_addr_invalid(addr));
CU_ASSERT_EQUAL(ppa.addr, orig.addr); CU_ASSERT_EQUAL(addr.addr, orig.addr);
clean_l2p(); clean_l2p();
} }
static void static void
test_ppa_trans(void) test_addr_trans(void)
{ {
struct ftl_ppa ppa = {}, orig = {}; struct ftl_addr addr = {}, orig = {};
size_t i; size_t i;
for (i = 0; i < L2P_TABLE_SIZE; ++i) { for (i = 0; i < L2P_TABLE_SIZE; ++i) {
ppa.offset = i % (g_dev->ppaf.lbk_mask + 1); addr.offset = i % (g_dev->ppaf.lbk_mask + 1);
ppa.zone_id = i % (g_dev->ppaf.chk_mask + 1); addr.zone_id = i % (g_dev->ppaf.chk_mask + 1);
ppa.pu = i % (g_dev->ppaf.pu_mask + 1); addr.pu = i % (g_dev->ppaf.pu_mask + 1);
ftl_l2p_set(g_dev, i, ppa); ftl_l2p_set(g_dev, i, addr);
} }
for (i = 0; i < L2P_TABLE_SIZE; ++i) { for (i = 0; i < L2P_TABLE_SIZE; ++i) {
orig.offset = i % (g_dev->ppaf.lbk_mask + 1); orig.offset = i % (g_dev->ppaf.lbk_mask + 1);
orig.zone_id = i % (g_dev->ppaf.chk_mask + 1); orig.zone_id = i % (g_dev->ppaf.chk_mask + 1);
orig.pu = i % (g_dev->ppaf.pu_mask + 1); orig.pu = i % (g_dev->ppaf.pu_mask + 1);
ppa = ftl_l2p_get(g_dev, i); addr = ftl_l2p_get(g_dev, i);
CU_ASSERT_EQUAL(ppa.addr, orig.addr); CU_ASSERT_EQUAL(addr.addr, orig.addr);
} }
clean_l2p(); clean_l2p();
} }
static void static void
test_ppa_invalid(void) test_addr_invalid(void)
{ {
struct ftl_ppa ppa; struct ftl_addr addr;
size_t i; size_t i;
/* Set every other LBA as invalid */ /* Set every other LBA as invalid */
for (i = 0; i < L2P_TABLE_SIZE; i += 2) { for (i = 0; i < L2P_TABLE_SIZE; i += 2) {
ftl_l2p_set(g_dev, i, ftl_to_ppa(FTL_PPA_INVALID)); ftl_l2p_set(g_dev, i, ftl_to_addr(FTL_ADDR_INVALID));
} }
/* Check every even LBA is invalid while others are fine */ /* Check every even LBA is invalid while others are fine */
for (i = 0; i < L2P_TABLE_SIZE; ++i) { for (i = 0; i < L2P_TABLE_SIZE; ++i) {
ppa = ftl_l2p_get(g_dev, i); addr = ftl_l2p_get(g_dev, i);
if (i % 2 == 0) { if (i % 2 == 0) {
CU_ASSERT_TRUE(ftl_ppa_invalid(ppa)); CU_ASSERT_TRUE(ftl_addr_invalid(addr));
} else { } else {
CU_ASSERT_FALSE(ftl_ppa_invalid(ppa)); CU_ASSERT_FALSE(ftl_addr_invalid(addr));
} }
} }
clean_l2p(); clean_l2p();
} }
static void static void
test_ppa_cached(void) test_addr_cached(void)
{ {
struct ftl_ppa ppa; struct ftl_addr addr;
size_t i; size_t i;
/* Set every other LBA is cached */ /* Set every other LBA is cached */
for (i = 0; i < L2P_TABLE_SIZE; i += 2) { for (i = 0; i < L2P_TABLE_SIZE; i += 2) {
ppa.cached = 1; addr.cached = 1;
ppa.cache_offset = i; addr.cache_offset = i;
ftl_l2p_set(g_dev, i, ppa); ftl_l2p_set(g_dev, i, addr);
} }
/* Check every even LBA is cached while others are not */ /* Check every even LBA is cached while others are not */
for (i = 0; i < L2P_TABLE_SIZE; ++i) { for (i = 0; i < L2P_TABLE_SIZE; ++i) {
ppa = ftl_l2p_get(g_dev, i); addr = ftl_l2p_get(g_dev, i);
if (i % 2 == 0) { if (i % 2 == 0) {
CU_ASSERT_TRUE(ftl_ppa_cached(ppa)); CU_ASSERT_TRUE(ftl_addr_cached(addr));
CU_ASSERT_EQUAL(ppa.offset, i); CU_ASSERT_EQUAL(addr.cache_offset, i);
} else { } else {
CU_ASSERT_FALSE(ftl_ppa_cached(ppa)); CU_ASSERT_FALSE(ftl_addr_cached(addr));
} }
} }
clean_l2p(); clean_l2p();
@ -268,35 +268,35 @@ main(int argc, char **argv)
return CU_get_error(); return CU_get_error();
} }
suite32 = CU_add_suite("ftl_ppa32_suite", setup_l2p_32bit, cleanup); suite32 = CU_add_suite("ftl_addr32_suite", setup_l2p_32bit, cleanup);
if (!suite32) { if (!suite32) {
CU_cleanup_registry(); CU_cleanup_registry();
return CU_get_error(); return CU_get_error();
} }
suite64 = CU_add_suite("ftl_ppa64_suite", setup_l2p_64bit, cleanup); suite64 = CU_add_suite("ftl_addr64_suite", setup_l2p_64bit, cleanup);
if (!suite64) { if (!suite64) {
CU_cleanup_registry(); CU_cleanup_registry();
return CU_get_error(); return CU_get_error();
} }
if ( if (
CU_add_test(suite32, "test_ppa_pack", CU_add_test(suite32, "test_addr_pack",
test_ppa_pack32) == NULL test_addr_pack32) == NULL
|| CU_add_test(suite32, "test_ppa32_invalid", || CU_add_test(suite32, "test_addr32_invalid",
test_ppa_invalid) == NULL test_addr_invalid) == NULL
|| CU_add_test(suite32, "test_ppa32_trans", || CU_add_test(suite32, "test_addr32_trans",
test_ppa_trans) == NULL test_addr_trans) == NULL
|| CU_add_test(suite32, "test_ppa32_cached", || CU_add_test(suite32, "test_addr32_cached",
test_ppa_cached) == NULL test_addr_cached) == NULL
|| CU_add_test(suite64, "test_ppa64_invalid", || CU_add_test(suite64, "test_addr64_invalid",
test_ppa_invalid) == NULL test_addr_invalid) == NULL
|| CU_add_test(suite64, "test_ppa64_trans", || CU_add_test(suite64, "test_addr64_trans",
test_ppa_trans) == NULL test_addr_trans) == NULL
|| CU_add_test(suite64, "test_ppa64_cached", || CU_add_test(suite64, "test_addr64_cached",
test_ppa_cached) == NULL test_addr_cached) == NULL
|| CU_add_test(suite64, "test_ppa64_pack", || CU_add_test(suite64, "test_addr64_pack",
test_ppa_pack64) == NULL test_addr_pack64) == NULL
) { ) {
CU_cleanup_registry(); CU_cleanup_registry();
return CU_get_error(); return CU_get_error();

View File

@ -57,7 +57,7 @@ static struct spdk_ftl_punit_range g_range = {
}; };
DEFINE_STUB(ftl_dev_tail_md_disk_size, size_t, (const struct spdk_ftl_dev *dev), 1); DEFINE_STUB(ftl_dev_tail_md_disk_size, size_t, (const struct spdk_ftl_dev *dev), 1);
DEFINE_STUB(ftl_ppa_is_written, bool, (struct ftl_band *band, struct ftl_ppa ppa), true); DEFINE_STUB(ftl_addr_is_written, bool, (struct ftl_band *band, struct ftl_addr addr), true);
DEFINE_STUB_V(ftl_band_set_state, (struct ftl_band *band, enum ftl_band_state state)); DEFINE_STUB_V(ftl_band_set_state, (struct ftl_band *band, enum ftl_band_state state));
DEFINE_STUB_V(ftl_trace_lba_io_init, (struct spdk_ftl_dev *dev, const struct ftl_io *io)); DEFINE_STUB_V(ftl_trace_lba_io_init, (struct spdk_ftl_dev *dev, const struct ftl_io *io));
DEFINE_STUB_V(ftl_free_io, (struct ftl_io *io)); DEFINE_STUB_V(ftl_free_io, (struct ftl_io *io));
@ -104,25 +104,25 @@ ftl_band_read_lba_map(struct ftl_band *band, size_t offset,
} }
uint64_t uint64_t
ftl_band_lbkoff_from_ppa(struct ftl_band *band, struct ftl_ppa ppa) ftl_band_lbkoff_from_addr(struct ftl_band *band, struct ftl_addr addr)
{ {
return test_offset_from_ppa(ppa, band); return test_offset_from_addr(addr, band);
} }
struct ftl_ppa struct ftl_addr
ftl_band_ppa_from_lbkoff(struct ftl_band *band, uint64_t lbkoff) ftl_band_addr_from_lbkoff(struct ftl_band *band, uint64_t lbkoff)
{ {
struct ftl_ppa ppa = { .addr = 0 }; struct ftl_addr addr = { .addr = 0 };
struct spdk_ftl_dev *dev = band->dev; struct spdk_ftl_dev *dev = band->dev;
uint64_t punit; uint64_t punit;
punit = lbkoff / ftl_dev_lbks_in_zone(dev) + dev->range.begin; punit = lbkoff / ftl_dev_lbks_in_zone(dev) + dev->range.begin;
ppa.offset = lbkoff % ftl_dev_lbks_in_zone(dev); addr.offset = lbkoff % ftl_dev_lbks_in_zone(dev);
ppa.zone_id = band->id; addr.zone_id = band->id;
ppa.pu = punit; addr.pu = punit;
return ppa; return addr;
} }
void void
@ -267,7 +267,7 @@ test_reloc_iter_full(void)
struct ftl_reloc *reloc; struct ftl_reloc *reloc;
struct ftl_band_reloc *breloc; struct ftl_band_reloc *breloc;
struct ftl_band *band; struct ftl_band *band;
struct ftl_ppa ppa; struct ftl_addr addr;
setup_reloc(&dev, &reloc, &g_geo, &g_range); setup_reloc(&dev, &reloc, &g_geo, &g_range);
@ -285,7 +285,7 @@ test_reloc_iter_full(void)
(ftl_dev_lbks_in_zone(dev) / reloc->xfer_size); (ftl_dev_lbks_in_zone(dev) / reloc->xfer_size);
for (i = 0; i < num_iters; i++) { for (i = 0; i < num_iters; i++) {
num_lbks = ftl_reloc_next_lbks(breloc, &ppa); num_lbks = ftl_reloc_next_lbks(breloc, &addr);
CU_ASSERT_EQUAL(num_lbks, reloc->xfer_size); CU_ASSERT_EQUAL(num_lbks, reloc->xfer_size);
} }
@ -297,7 +297,7 @@ test_reloc_iter_full(void)
/* is not divisible by xfer_size */ /* is not divisible by xfer_size */
reminder = ftl_dev_lbks_in_zone(dev) % reloc->xfer_size; reminder = ftl_dev_lbks_in_zone(dev) % reloc->xfer_size;
for (i = 0; i < num_iters; i++) { for (i = 0; i < num_iters; i++) {
num_lbks = ftl_reloc_next_lbks(breloc, &ppa); num_lbks = ftl_reloc_next_lbks(breloc, &addr);
CU_ASSERT_EQUAL(reminder, num_lbks); CU_ASSERT_EQUAL(reminder, num_lbks);
} }

View File

@ -306,15 +306,15 @@ static void
test_rwb_entry_from_offset(void) test_rwb_entry_from_offset(void)
{ {
struct ftl_rwb_entry *entry; struct ftl_rwb_entry *entry;
struct ftl_ppa ppa = { .cached = 1 }; struct ftl_addr addr = { .cached = 1 };
size_t i; size_t i;
setup_rwb(); setup_rwb();
for (i = 0; i < g_ut.max_allocable_entries; ++i) { for (i = 0; i < g_ut.max_allocable_entries; ++i) {
ppa.offset = i; addr.cache_offset = i;
entry = ftl_rwb_entry_from_offset(g_rwb, i); entry = ftl_rwb_entry_from_offset(g_rwb, i);
CU_ASSERT_EQUAL(ppa.offset, entry->pos); CU_ASSERT_EQUAL(addr.cache_offset, entry->pos);
} }
cleanup_rwb(); cleanup_rwb();
} }

View File

@ -66,7 +66,7 @@ DEFINE_STUB_V(ftl_reloc_add, (struct ftl_reloc *reloc, struct ftl_band *band, si
size_t num_lbks, int prio, bool defrag)); size_t num_lbks, int prio, bool defrag));
DEFINE_STUB_V(ftl_trace_write_band, (struct spdk_ftl_dev *dev, const struct ftl_band *band)); DEFINE_STUB_V(ftl_trace_write_band, (struct spdk_ftl_dev *dev, const struct ftl_band *band));
DEFINE_STUB_V(ftl_trace_submission, (struct spdk_ftl_dev *dev, const struct ftl_io *io, DEFINE_STUB_V(ftl_trace_submission, (struct spdk_ftl_dev *dev, const struct ftl_io *io,
struct ftl_ppa ppa, size_t ppa_cnt)); struct ftl_addr addr, size_t addr_cnt));
DEFINE_STUB_V(ftl_rwb_get_limits, (struct ftl_rwb *rwb, size_t limit[FTL_RWB_TYPE_MAX])); DEFINE_STUB_V(ftl_rwb_get_limits, (struct ftl_rwb *rwb, size_t limit[FTL_RWB_TYPE_MAX]));
DEFINE_STUB_V(ftl_io_process_error, (struct ftl_io *io, const struct spdk_nvme_cpl *status)); DEFINE_STUB_V(ftl_io_process_error, (struct ftl_io *io, const struct spdk_nvme_cpl *status));
DEFINE_STUB_V(ftl_trace_limits, (struct spdk_ftl_dev *dev, const size_t *limits, size_t num_free)); DEFINE_STUB_V(ftl_trace_limits, (struct spdk_ftl_dev *dev, const size_t *limits, size_t num_free));
@ -164,7 +164,7 @@ test_wptr(void)
for (lbk = 0, offset = 0; lbk < ftl_dev_lbks_in_zone(dev) / xfer_size; ++lbk) { for (lbk = 0, offset = 0; lbk < ftl_dev_lbks_in_zone(dev) / xfer_size; ++lbk) {
for (zone = 0; zone < band->num_zones; ++zone) { for (zone = 0; zone < band->num_zones; ++zone) {
CU_ASSERT_EQUAL(wptr->ppa.offset, (lbk * xfer_size)); CU_ASSERT_EQUAL(wptr->addr.offset, (lbk * xfer_size));
CU_ASSERT_EQUAL(wptr->offset, offset); CU_ASSERT_EQUAL(wptr->offset, offset);
ftl_wptr_advance(wptr, xfer_size); ftl_wptr_advance(wptr, xfer_size);
offset += xfer_size; offset += xfer_size;
@ -172,7 +172,7 @@ test_wptr(void)
} }
CU_ASSERT_EQUAL(band->state, FTL_BAND_STATE_FULL); CU_ASSERT_EQUAL(band->state, FTL_BAND_STATE_FULL);
CU_ASSERT_EQUAL(wptr->ppa.offset, ftl_dev_lbks_in_zone(dev)); CU_ASSERT_EQUAL(wptr->addr.offset, ftl_dev_lbks_in_zone(dev));
ftl_band_set_state(band, FTL_BAND_STATE_CLOSING); ftl_band_set_state(band, FTL_BAND_STATE_CLOSING);