lib/ftl: Update lba map during write completion
Lba map could be updated during write completion. This change is needed to have common io path when append support will be added. Change-Id: I942bfd8b54dc6b40136ca53434f0ef8e3c415c5b Signed-off-by: Wojciech Malikowski <wojciech.malikowski@intel.com> Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/471637 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Community-CI: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Konrad Sztyber <konrad.sztyber@intel.com>
This commit is contained in:
parent
d6234332fb
commit
b48113b296
@ -1351,6 +1351,7 @@ ftl_write_cb(struct ftl_io *io, void *arg, int status)
|
||||
struct ftl_rwb_batch *batch = io->rwb_batch;
|
||||
struct ftl_rwb_entry *entry;
|
||||
struct ftl_band *band;
|
||||
struct ftl_addr prev_addr, addr = io->addr;
|
||||
|
||||
if (status) {
|
||||
ftl_write_fail(io, status);
|
||||
@ -1358,9 +1359,11 @@ ftl_write_cb(struct ftl_io *io, void *arg, int status)
|
||||
}
|
||||
|
||||
assert(io->lbk_cnt == dev->xfer_size);
|
||||
assert(!(io->flags & FTL_IO_MD));
|
||||
|
||||
ftl_rwb_foreach(entry, batch) {
|
||||
band = entry->band;
|
||||
if (!(io->flags & FTL_IO_MD) && !(entry->flags & FTL_IO_PAD)) {
|
||||
if (!(entry->flags & FTL_IO_PAD)) {
|
||||
/* Verify that the LBA is set for user lbks */
|
||||
assert(entry->lba != FTL_LBA_INVALID);
|
||||
}
|
||||
@ -1370,8 +1373,26 @@ ftl_write_cb(struct ftl_io *io, void *arg, int status)
|
||||
band->num_reloc_blocks--;
|
||||
}
|
||||
|
||||
entry->addr = addr;
|
||||
if (entry->lba != FTL_LBA_INVALID) {
|
||||
pthread_spin_lock(&entry->lock);
|
||||
prev_addr = ftl_l2p_get(dev, entry->lba);
|
||||
|
||||
/* If the l2p was updated in the meantime, don't update band's metadata */
|
||||
if (ftl_addr_cached(prev_addr) && prev_addr.cache_offset == entry->pos) {
|
||||
/* Setting entry's cache bit needs to be done after metadata */
|
||||
/* within the band is updated to make sure that writes */
|
||||
/* invalidating the entry clear the metadata as well */
|
||||
ftl_band_set_addr(io->band, entry->lba, entry->addr);
|
||||
ftl_rwb_entry_set_valid(entry);
|
||||
}
|
||||
pthread_spin_unlock(&entry->lock);
|
||||
}
|
||||
|
||||
SPDK_DEBUGLOG(SPDK_LOG_FTL_CORE, "Write addr:%lu, lba:%lu\n",
|
||||
entry->addr.offset, entry->lba);
|
||||
|
||||
addr = ftl_band_next_addr(io->band, addr, 1);
|
||||
}
|
||||
|
||||
ftl_process_flush(dev, batch);
|
||||
@ -1554,6 +1575,8 @@ ftl_submit_write(struct ftl_wptr *wptr, struct ftl_io *io)
|
||||
int rc = 0;
|
||||
|
||||
assert(io->lbk_cnt % dev->xfer_size == 0);
|
||||
/* Only one child write make sense in case of user write */
|
||||
assert((io->flags & FTL_IO_MD) || io->iov_cnt == 1);
|
||||
|
||||
while (io->iov_pos < io->iov_cnt) {
|
||||
/* There are no guarantees of the order of completion of NVMe IO submission queue */
|
||||
@ -1615,7 +1638,6 @@ ftl_wptr_process_writes(struct ftl_wptr *wptr)
|
||||
struct ftl_rwb_batch *batch;
|
||||
struct ftl_rwb_entry *entry;
|
||||
struct ftl_io *io;
|
||||
struct ftl_addr addr, prev_addr;
|
||||
|
||||
if (spdk_unlikely(!TAILQ_EMPTY(&wptr->pending_queue))) {
|
||||
io = TAILQ_FIRST(&wptr->pending_queue);
|
||||
@ -1650,12 +1672,11 @@ ftl_wptr_process_writes(struct ftl_wptr *wptr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
io = ftl_io_rwb_init(dev, wptr->band, batch, ftl_write_cb);
|
||||
io = ftl_io_rwb_init(dev, wptr->addr, wptr->band, batch, ftl_write_cb);
|
||||
if (!io) {
|
||||
goto error;
|
||||
}
|
||||
|
||||
addr = wptr->addr;
|
||||
ftl_rwb_foreach(entry, batch) {
|
||||
/* Update band's relocation stats if the IO comes from reloc */
|
||||
if (entry->flags & FTL_IO_WEAK) {
|
||||
@ -1665,26 +1686,8 @@ ftl_wptr_process_writes(struct ftl_wptr *wptr)
|
||||
}
|
||||
}
|
||||
|
||||
entry->addr = addr;
|
||||
if (entry->lba != FTL_LBA_INVALID) {
|
||||
pthread_spin_lock(&entry->lock);
|
||||
prev_addr = ftl_l2p_get(dev, entry->lba);
|
||||
|
||||
/* If the l2p was updated in the meantime, don't update band's metadata */
|
||||
if (ftl_addr_cached(prev_addr) && prev_addr.cache_offset == entry->pos) {
|
||||
/* Setting entry's cache bit needs to be done after metadata */
|
||||
/* within the band is updated to make sure that writes */
|
||||
/* invalidating the entry clear the metadata as well */
|
||||
ftl_band_set_addr(wptr->band, entry->lba, entry->addr);
|
||||
ftl_rwb_entry_set_valid(entry);
|
||||
}
|
||||
pthread_spin_unlock(&entry->lock);
|
||||
}
|
||||
|
||||
ftl_trace_rwb_pop(dev, entry);
|
||||
ftl_update_rwb_stats(dev, entry);
|
||||
|
||||
addr = ftl_band_next_addr(wptr->band, addr, 1);
|
||||
}
|
||||
|
||||
SPDK_DEBUGLOG(SPDK_LOG_FTL_CORE, "Write addr:%lx\n", wptr->addr.offset);
|
||||
|
@ -326,9 +326,10 @@ ftl_io_init_internal(const struct ftl_io_init_opts *opts)
|
||||
}
|
||||
|
||||
struct ftl_io *
|
||||
ftl_io_rwb_init(struct spdk_ftl_dev *dev, struct ftl_band *band,
|
||||
ftl_io_rwb_init(struct spdk_ftl_dev *dev, struct ftl_addr addr, struct ftl_band *band,
|
||||
struct ftl_rwb_batch *batch, ftl_io_fn cb)
|
||||
{
|
||||
struct ftl_io *io;
|
||||
struct ftl_io_init_opts opts = {
|
||||
.dev = dev,
|
||||
.io = NULL,
|
||||
@ -343,7 +344,14 @@ ftl_io_rwb_init(struct spdk_ftl_dev *dev, struct ftl_band *band,
|
||||
.md = ftl_rwb_batch_get_md(batch),
|
||||
};
|
||||
|
||||
return ftl_io_init_internal(&opts);
|
||||
io = ftl_io_init_internal(&opts);
|
||||
if (!io) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
io->addr = addr;
|
||||
|
||||
return io;
|
||||
}
|
||||
|
||||
struct ftl_io *
|
||||
|
@ -273,7 +273,8 @@ void ftl_io_advance(struct ftl_io *io, size_t lbk_cnt);
|
||||
size_t ftl_iovec_num_lbks(struct iovec *iov, size_t iov_cnt);
|
||||
void *ftl_io_iovec_addr(struct ftl_io *io);
|
||||
size_t ftl_io_iovec_len_left(struct ftl_io *io);
|
||||
struct ftl_io *ftl_io_rwb_init(struct spdk_ftl_dev *dev, struct ftl_band *band,
|
||||
struct ftl_io *ftl_io_rwb_init(struct spdk_ftl_dev *dev, struct ftl_addr addr,
|
||||
struct ftl_band *band,
|
||||
struct ftl_rwb_batch *entry, ftl_io_fn cb);
|
||||
struct ftl_io *ftl_io_erase_init(struct ftl_band *band, size_t lbk_cnt, ftl_io_fn cb);
|
||||
struct ftl_io *ftl_io_user_init(struct spdk_io_channel *ioch, uint64_t lba, size_t lbk_cnt,
|
||||
|
Loading…
Reference in New Issue
Block a user