/* SPDX-License-Identifier: BSD-3-Clause * Copyright (c) Intel Corporation. * All rights reserved. */ #include "spdk/likely.h" #include "spdk/stdinc.h" #include "spdk/nvme.h" #include "spdk/thread.h" #include "spdk/bdev_module.h" #include "spdk/string.h" #include "spdk/ftl.h" #include "spdk/crc32.h" #include "ftl_core.h" #include "ftl_band.h" #include "ftl_io.h" #include "ftl_debug.h" #include "ftl_internal.h" #include "mngt/ftl_mngt.h" size_t spdk_ftl_io_size(void) { return sizeof(struct ftl_io); } static size_t ftl_get_limit(const struct spdk_ftl_dev *dev, int type) { assert(type < SPDK_FTL_LIMIT_MAX); return dev->conf.limits[type]; } static bool ftl_shutdown_complete(struct spdk_ftl_dev *dev) { if (dev->num_inflight) { return false; } if (!ftl_nv_cache_is_halted(&dev->nv_cache)) { ftl_nv_cache_halt(&dev->nv_cache); return false; } if (!ftl_nv_cache_chunks_busy(&dev->nv_cache)) { return false; } if (!ftl_l2p_is_halted(dev)) { ftl_l2p_halt(dev); return false; } return true; } void ftl_apply_limits(struct spdk_ftl_dev *dev) { size_t limit; int i; /* Clear existing limit */ dev->limit = SPDK_FTL_LIMIT_MAX; for (i = SPDK_FTL_LIMIT_CRIT; i < SPDK_FTL_LIMIT_MAX; ++i) { limit = ftl_get_limit(dev, i); if (dev->num_free <= limit) { dev->limit = i; break; } } } void ftl_invalidate_addr(struct spdk_ftl_dev *dev, ftl_addr addr) { struct ftl_band *band; struct ftl_p2l_map *p2l_map; if (ftl_addr_in_nvc(dev, addr)) { return; } band = ftl_band_from_addr(dev, addr); p2l_map = &band->p2l_map; /* TODO: fix case when the same address is invalidated from multiple sources */ assert(p2l_map->num_valid > 0); p2l_map->num_valid--; /* Invalidate open/full band p2l_map entry to keep p2l and l2p * consistency when band is going to close state */ if (FTL_BAND_STATE_OPEN == band->md->state || FTL_BAND_STATE_FULL == band->md->state) { p2l_map->band_map[ftl_band_block_offset_from_addr(band, addr)] = FTL_LBA_INVALID; } } void spdk_ftl_dev_get_attrs(const struct spdk_ftl_dev *dev, struct spdk_ftl_attrs *attrs) { attrs->num_blocks = dev->num_lbas; attrs->block_size = FTL_BLOCK_SIZE; attrs->optimum_io_size = dev->xfer_size; } static void start_io(struct ftl_io *io) { struct ftl_io_channel *ioch = ftl_io_channel_get_ctx(io->ioch); struct spdk_ftl_dev *dev = io->dev; io->map = ftl_mempool_get(ioch->map_pool); if (spdk_unlikely(!io->map)) { io->status = -ENOMEM; ftl_io_complete(io); return; } switch (io->type) { case FTL_IO_READ: io->status = -EOPNOTSUPP; ftl_io_complete(io); break; case FTL_IO_WRITE: TAILQ_INSERT_TAIL(&dev->wr_sq, io, queue_entry); break; case FTL_IO_UNMAP: default: io->status = -EOPNOTSUPP; ftl_io_complete(io); } } static int queue_io(struct spdk_ftl_dev *dev, struct ftl_io *io) { size_t result; struct ftl_io_channel *ioch = ftl_io_channel_get_ctx(io->ioch); result = spdk_ring_enqueue(ioch->sq, (void **)&io, 1, NULL); if (spdk_unlikely(0 == result)) { return -EAGAIN; } return 0; } int spdk_ftl_writev(struct spdk_ftl_dev *dev, struct ftl_io *io, struct spdk_io_channel *ch, uint64_t lba, uint64_t lba_cnt, struct iovec *iov, size_t iov_cnt, spdk_ftl_fn cb_fn, void *cb_arg) { int rc; if (iov_cnt == 0) { return -EINVAL; } if (lba_cnt == 0) { return -EINVAL; } if (lba_cnt != ftl_iovec_num_blocks(iov, iov_cnt)) { FTL_ERRLOG(dev, "Invalid IO vector to handle, device %s, LBA %"PRIu64"\n", dev->conf.name, lba); return -EINVAL; } if (!dev->initialized) { return -EBUSY; } rc = ftl_io_init(ch, io, lba, lba_cnt, iov, iov_cnt, cb_fn, cb_arg, FTL_IO_WRITE); if (rc) { return rc; } return queue_io(dev, io); } #define FTL_IO_QUEUE_BATCH 16 int ftl_io_channel_poll(void *arg) { struct ftl_io_channel *ch = arg; void *ios[FTL_IO_QUEUE_BATCH]; uint64_t i, count; count = spdk_ring_dequeue(ch->cq, ios, FTL_IO_QUEUE_BATCH); if (count == 0) { return SPDK_POLLER_IDLE; } for (i = 0; i < count; i++) { struct ftl_io *io = ios[i]; io->user_fn(io->cb_ctx, io->status); } return SPDK_POLLER_BUSY; } static void ftl_process_io_channel(struct spdk_ftl_dev *dev, struct ftl_io_channel *ioch) { void *ios[FTL_IO_QUEUE_BATCH]; size_t count, i; count = spdk_ring_dequeue(ioch->sq, ios, FTL_IO_QUEUE_BATCH); if (count == 0) { return; } for (i = 0; i < count; i++) { struct ftl_io *io = ios[i]; start_io(io); } } static void ftl_process_io_queue(struct spdk_ftl_dev *dev) { struct ftl_io_channel *ioch; struct ftl_io *io; if (!ftl_nv_cache_full(&dev->nv_cache) && !TAILQ_EMPTY(&dev->wr_sq)) { io = TAILQ_FIRST(&dev->wr_sq); TAILQ_REMOVE(&dev->wr_sq, io, queue_entry); assert(io->type == FTL_IO_WRITE); if (!ftl_nv_cache_write(io)) { TAILQ_INSERT_HEAD(&dev->wr_sq, io, queue_entry); } } TAILQ_FOREACH(ioch, &dev->ioch_queue, entry) { ftl_process_io_channel(dev, ioch); } } int ftl_core_poller(void *ctx) { struct spdk_ftl_dev *dev = ctx; uint64_t io_activity_total_old = dev->io_activity_total; if (dev->halt && ftl_shutdown_complete(dev)) { spdk_poller_unregister(&dev->core_poller); return SPDK_POLLER_IDLE; } ftl_process_io_queue(dev); ftl_nv_cache_process(dev); ftl_l2p_process(dev); if (io_activity_total_old != dev->io_activity_total) { return SPDK_POLLER_BUSY; } return SPDK_POLLER_IDLE; } void *g_ftl_write_buf; void *g_ftl_read_buf; int spdk_ftl_init(void) { g_ftl_write_buf = spdk_zmalloc(FTL_ZERO_BUFFER_SIZE, FTL_ZERO_BUFFER_SIZE, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); if (!g_ftl_write_buf) { return -ENOMEM; } g_ftl_read_buf = spdk_zmalloc(FTL_ZERO_BUFFER_SIZE, FTL_ZERO_BUFFER_SIZE, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); if (!g_ftl_read_buf) { spdk_free(g_ftl_write_buf); g_ftl_write_buf = NULL; return -ENOMEM; } return 0; } void spdk_ftl_fini(void) { spdk_free(g_ftl_write_buf); spdk_free(g_ftl_read_buf); } struct spdk_io_channel * spdk_ftl_get_io_channel(struct spdk_ftl_dev *dev) { return spdk_get_io_channel(dev); } SPDK_LOG_REGISTER_COMPONENT(ftl_core)