FTL: Add write path

Signed-off-by: Kozlowski Mateusz <mateusz.kozlowski@intel.com>
Signed-off-by: Artur Paszkiewicz <artur.paszkiewicz@intel.com>
Change-Id: I41985617b5879bd3f4bf6d49d2a03eaffdd5ccb5
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/13322
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Mellanox Build Bot
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Kozlowski Mateusz 2022-06-13 10:55:42 +02:00 committed by Tomasz Zawadzki
parent 4a24a7b3e0
commit e8c5ccf039
6 changed files with 300 additions and 0 deletions

View File

@ -16,6 +16,7 @@ extern "C" {
#endif
struct spdk_ftl_dev;
struct ftl_io;
struct spdk_ftl_conf {
/* Device's name */
@ -146,6 +147,25 @@ void spdk_ftl_conf_deinit(struct spdk_ftl_conf *conf);
*/
void spdk_ftl_get_default_conf(struct spdk_ftl_conf *conf);
/**
* Submits a write to the specified device.
*
* \param dev Device
* \param io Allocated ftl_io
* \param ch I/O channel
* \param lba Starting LBA to write the data
* \param lba_cnt Number of sectors to write
* \param iov Single IO vector or pointer to IO vector table
* \param iov_cnt Number of IO vectors
* \param cb_fn Callback function to invoke when the I/O is completed
* \param cb_arg Argument to pass to the callback function
*
* \return 0 if successfully submitted, negative errno otherwise.
*/
int spdk_ftl_writev(struct spdk_ftl_dev *dev, struct ftl_io *io, struct spdk_io_channel *ch,
uint64_t lba, uint64_t lba_cnt,
struct iovec *iov, size_t iov_cnt, spdk_ftl_fn cb_fn, void *cb_arg);
/**
* Returns the size of ftl_io struct that needs to be passed to spdk_ftl_read/write
*

View File

@ -71,6 +71,7 @@ static void
start_io(struct ftl_io *io)
{
struct ftl_io_channel *ioch = ftl_io_channel_get_ctx(io->ioch);
struct spdk_ftl_dev *dev = io->dev;
io->map = ftl_mempool_get(ioch->map_pool);
if (spdk_unlikely(!io->map)) {
@ -81,7 +82,12 @@ start_io(struct ftl_io *io)
switch (io->type) {
case FTL_IO_READ:
io->status = -EOPNOTSUPP;
ftl_io_complete(io);
break;
case FTL_IO_WRITE:
TAILQ_INSERT_TAIL(&dev->wr_sq, io, queue_entry);
break;
case FTL_IO_UNMAP:
default:
io->status = -EOPNOTSUPP;
@ -89,6 +95,53 @@ start_io(struct ftl_io *io)
}
}
static int
queue_io(struct spdk_ftl_dev *dev, struct ftl_io *io)
{
size_t result;
struct ftl_io_channel *ioch = ftl_io_channel_get_ctx(io->ioch);
result = spdk_ring_enqueue(ioch->sq, (void **)&io, 1, NULL);
if (spdk_unlikely(0 == result)) {
return -EAGAIN;
}
return 0;
}
int
spdk_ftl_writev(struct spdk_ftl_dev *dev, struct ftl_io *io, struct spdk_io_channel *ch,
uint64_t lba, uint64_t lba_cnt, struct iovec *iov, size_t iov_cnt, spdk_ftl_fn cb_fn,
void *cb_arg)
{
int rc;
if (iov_cnt == 0) {
return -EINVAL;
}
if (lba_cnt == 0) {
return -EINVAL;
}
if (lba_cnt != ftl_iovec_num_blocks(iov, iov_cnt)) {
FTL_ERRLOG(dev, "Invalid IO vector to handle, device %s, LBA %"PRIu64"\n",
dev->conf.name, lba);
return -EINVAL;
}
if (!dev->initialized) {
return -EBUSY;
}
rc = ftl_io_init(ch, io, lba, lba_cnt, iov, iov_cnt, cb_fn, cb_arg, FTL_IO_WRITE);
if (rc) {
return rc;
}
return queue_io(dev, io);
}
#define FTL_IO_QUEUE_BATCH 16
int
ftl_io_channel_poll(void *arg)
@ -131,6 +184,16 @@ static void
ftl_process_io_queue(struct spdk_ftl_dev *dev)
{
struct ftl_io_channel *ioch;
struct ftl_io *io;
if (!ftl_nv_cache_full(&dev->nv_cache) && !TAILQ_EMPTY(&dev->wr_sq)) {
io = TAILQ_FIRST(&dev->wr_sq);
TAILQ_REMOVE(&dev->wr_sq, io, queue_entry);
assert(io->type == FTL_IO_WRITE);
if (!ftl_nv_cache_write(io)) {
TAILQ_INSERT_HEAD(&dev->wr_sq, io, queue_entry);
}
}
TAILQ_FOREACH(ioch, &dev->ioch_queue, entry) {
ftl_process_io_channel(dev, ioch);

View File

@ -199,6 +199,66 @@ chunk_is_closed(struct ftl_nv_cache_chunk *chunk)
static void ftl_chunk_close(struct ftl_nv_cache_chunk *chunk);
static uint64_t
ftl_nv_cache_get_wr_buffer(struct ftl_nv_cache *nv_cache, struct ftl_io *io)
{
uint64_t address = FTL_LBA_INVALID;
uint64_t num_blocks = io->num_blocks;
uint64_t free_space;
struct ftl_nv_cache_chunk *chunk;
do {
chunk = nv_cache->chunk_current;
/* Chunk has been closed so pick new one */
if (chunk && chunk_is_closed(chunk)) {
chunk = NULL;
}
if (!chunk) {
chunk = TAILQ_FIRST(&nv_cache->chunk_open_list);
if (chunk && chunk->md->state == FTL_CHUNK_STATE_OPEN) {
TAILQ_REMOVE(&nv_cache->chunk_open_list, chunk, entry);
nv_cache->chunk_current = chunk;
} else {
break;
}
}
free_space = chunk_get_free_space(nv_cache, chunk);
if (free_space >= num_blocks) {
/* Enough space in chunk */
/* Calculate address in NV cache */
address = chunk->offset + chunk->md->write_pointer;
/* Set chunk in IO */
io->nv_cache_chunk = chunk;
/* Move write pointer */
chunk->md->write_pointer += num_blocks;
break;
}
/* Not enough space in nv_cache_chunk */
nv_cache->chunk_current = NULL;
if (0 == free_space) {
continue;
}
chunk->md->blocks_skipped = free_space;
chunk->md->blocks_written += free_space;
chunk->md->write_pointer += free_space;
if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) {
ftl_chunk_close(chunk);
}
} while (1);
return address;
}
void
ftl_nv_cache_fill_md(struct ftl_io *io)
{
@ -257,6 +317,130 @@ ftl_chunk_free_md_entry(struct ftl_nv_cache_chunk *chunk)
p2l_map->chunk_dma_md = NULL;
}
static void
ftl_nv_cache_submit_cb_done(struct ftl_io *io)
{
struct ftl_nv_cache *nv_cache = &io->dev->nv_cache;
chunk_advance_blocks(nv_cache, io->nv_cache_chunk, io->num_blocks);
io->nv_cache_chunk = NULL;
ftl_mempool_put(nv_cache->md_pool, io->md);
ftl_io_complete(io);
}
static void
ftl_nv_cache_l2p_update(struct ftl_io *io)
{
struct spdk_ftl_dev *dev = io->dev;
ftl_addr next_addr = io->addr;
size_t i;
for (i = 0; i < io->num_blocks; ++i, ++next_addr) {
ftl_l2p_update_cache(dev, ftl_io_get_lba(io, i), next_addr, io->map[i]);
}
ftl_l2p_unpin(dev, io->lba, io->num_blocks);
ftl_nv_cache_submit_cb_done(io);
}
static void
ftl_nv_cache_submit_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
{
struct ftl_io *io = cb_arg;
spdk_bdev_free_io(bdev_io);
if (spdk_unlikely(!success)) {
FTL_ERRLOG(io->dev, "Non-volatile cache write failed at %"PRIx64"\n",
io->addr);
io->status = -EIO;
ftl_nv_cache_submit_cb_done(io);
} else {
ftl_nv_cache_l2p_update(io);
}
}
static void
nv_cache_write(void *_io)
{
struct ftl_io *io = _io;
struct spdk_ftl_dev *dev = io->dev;
struct ftl_nv_cache *nv_cache = &dev->nv_cache;
int rc;
rc = ftl_nv_cache_bdev_writev_blocks_with_md(dev,
nv_cache->bdev_desc, nv_cache->cache_ioch,
io->iov, io->iov_cnt, io->md,
ftl_addr_to_nvc_offset(dev, io->addr), io->num_blocks,
ftl_nv_cache_submit_cb, io);
if (spdk_unlikely(rc)) {
if (rc == -ENOMEM) {
struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(nv_cache->bdev_desc);
io->bdev_io_wait.bdev = bdev;
io->bdev_io_wait.cb_fn = nv_cache_write;
io->bdev_io_wait.cb_arg = io;
spdk_bdev_queue_io_wait(bdev, nv_cache->cache_ioch, &io->bdev_io_wait);
} else {
ftl_abort();
}
}
}
static void
ftl_nv_cache_pin_cb(struct spdk_ftl_dev *dev, int status, struct ftl_l2p_pin_ctx *pin_ctx)
{
struct ftl_io *io = pin_ctx->cb_ctx;
size_t i;
if (spdk_unlikely(status != 0)) {
/* Retry on the internal L2P fault */
FTL_ERRLOG(dev, "Cannot PIN LBA for NV cache write failed at %"PRIx64"\n",
io->addr);
io->status = -EAGAIN;
ftl_nv_cache_submit_cb_done(io);
return;
}
/* Remember previous l2p mapping to resolve conflicts in case of outstanding write-after-write */
for (i = 0; i < io->num_blocks; ++i) {
io->map[i] = ftl_l2p_get(dev, ftl_io_get_lba(io, i));
}
assert(io->iov_pos == 0);
nv_cache_write(io);
}
bool
ftl_nv_cache_write(struct ftl_io *io)
{
struct spdk_ftl_dev *dev = io->dev;
uint64_t cache_offset;
io->md = ftl_mempool_get(dev->nv_cache.md_pool);
if (spdk_unlikely(!io->md)) {
return false;
}
/* Reserve area on the write buffer cache */
cache_offset = ftl_nv_cache_get_wr_buffer(&dev->nv_cache, io);
if (cache_offset == FTL_LBA_INVALID) {
/* No free space in NV cache, resubmit request */
ftl_mempool_put(dev->nv_cache.md_pool, io->md);
return false;
}
io->addr = ftl_addr_from_nvc_offset(dev, cache_offset);
io->nv_cache_chunk = dev->nv_cache.chunk_current;
ftl_nv_cache_fill_md(io);
ftl_l2p_pin(io->dev, io->lba, io->num_blocks,
ftl_nv_cache_pin_cb, io,
&io->l2p_pin_ctx);
return true;
}
int
ftl_nv_cache_read(struct ftl_io *io, ftl_addr addr, uint32_t num_blocks,
spdk_bdev_io_completion_cb cb, void *cb_arg)

View File

@ -131,6 +131,7 @@ struct ftl_nv_cache {
int ftl_nv_cache_init(struct spdk_ftl_dev *dev);
void ftl_nv_cache_deinit(struct spdk_ftl_dev *dev);
bool ftl_nv_cache_write(struct ftl_io *io);
void ftl_nv_cache_fill_md(struct ftl_io *io);
int ftl_nv_cache_read(struct ftl_io *io, ftl_addr addr, uint32_t num_blocks,
spdk_bdev_io_completion_cb cb, void *cb_arg);

View File

@ -13,6 +13,7 @@
spdk_ftl_conf_deinit;
spdk_ftl_get_io_channel;
spdk_ftl_io_size;
spdk_ftl_writev;
local: *;
};

View File

@ -84,12 +84,43 @@ bdev_ftl_destruct(void *ctx)
return 1;
}
static void
bdev_ftl_cb(void *arg, int rc)
{
struct spdk_bdev_io *bdev_io = arg;
enum spdk_bdev_io_status status;
switch (rc) {
case 0:
status = SPDK_BDEV_IO_STATUS_SUCCESS;
break;
case -ENOMEM:
status = SPDK_BDEV_IO_STATUS_NOMEM;
break;
default:
status = SPDK_BDEV_IO_STATUS_FAILED;
break;
}
spdk_bdev_io_complete(bdev_io, status);
}
static int
_bdev_ftl_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
{
struct ftl_bdev *ftl_bdev = (struct ftl_bdev *)bdev_io->bdev->ctxt;
switch (bdev_io->type) {
case SPDK_BDEV_IO_TYPE_READ:
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
return 0;
case SPDK_BDEV_IO_TYPE_WRITE:
return spdk_ftl_writev(ftl_bdev->dev, (struct ftl_io *)bdev_io->driver_ctx,
ch, bdev_io->u.bdev.offset_blocks,
bdev_io->u.bdev.num_blocks, bdev_io->u.bdev.iovs,
bdev_io->u.bdev.iovcnt, bdev_ftl_cb, bdev_io);
case SPDK_BDEV_IO_TYPE_UNMAP:
case SPDK_BDEV_IO_TYPE_FLUSH:
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);