ftl: remove deprecated ftl library

Signed-off-by: Wojciech Malikowski <wojciech.malikowski@intel.com>
Change-Id: I3ebb05be3f1b9864b238cb74f469b4fdf573cd0d
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/11120
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
This commit is contained in:
Wojciech Malikowski 2022-01-17 11:41:16 +01:00 committed by Tomasz Zawadzki
parent f656eed6e0
commit 81dca28884
54 changed files with 2 additions and 13930 deletions

View File

@ -305,10 +305,6 @@ if [ $SPDK_RUN_FUNCTIONAL_TEST -eq 1 ]; then
run_test "ocf" ./test/ocf/ocf.sh
fi
if [ $SPDK_TEST_FTL -eq 1 ]; then
run_test "ftl" ./test/ftl/ftl.sh
fi
if [ $SPDK_TEST_VMD -eq 1 ]; then
run_test "vmd" ./test/vmd/vmd.sh
fi

View File

@ -1,223 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) Intel Corporation.
* All rights reserved.
*/
#ifndef SPDK_FTL_H
#define SPDK_FTL_H
#include "spdk/stdinc.h"
#include "spdk/uuid.h"
#include "spdk/thread.h"
#include "spdk/bdev.h"
#ifdef __cplusplus
extern "C" {
#endif
struct spdk_ftl_dev;
/* Limit thresholds */
enum {
SPDK_FTL_LIMIT_CRIT,
SPDK_FTL_LIMIT_HIGH,
SPDK_FTL_LIMIT_LOW,
SPDK_FTL_LIMIT_START,
SPDK_FTL_LIMIT_MAX
};
struct spdk_ftl_limit {
/* Threshold from which the limiting starts */
size_t thld;
/* Limit percentage */
size_t limit;
};
struct spdk_ftl_conf {
/* Number of reserved addresses not exposed to the user */
size_t lba_rsvd;
/* Size of the per-io_channel write buffer */
size_t write_buffer_size;
/* Threshold for opening new band */
size_t band_thld;
/* Maximum IO depth per band relocate */
size_t max_reloc_qdepth;
/* Maximum active band relocates */
size_t max_active_relocs;
/* IO pool size per user thread */
size_t user_io_pool_size;
/* Lowest percentage of invalid blocks for a band to be defragged */
size_t invalid_thld;
/* User writes limits */
struct spdk_ftl_limit limits[SPDK_FTL_LIMIT_MAX];
/* Allow for partial recovery from open bands instead of returning error */
bool allow_open_bands;
/* Use append instead of write */
bool use_append;
/* Maximum supported number of IO channels */
uint32_t max_io_channels;
struct {
/* Maximum number of concurrent requests */
size_t max_request_cnt;
/* Maximum number of blocks per one request */
size_t max_request_size;
} nv_cache;
/* Create l2p table on l2p_path persistent memory file or device instead of in DRAM */
const char *l2p_path;
};
enum spdk_ftl_mode {
/* Create new device */
SPDK_FTL_MODE_CREATE = (1 << 0),
};
struct spdk_ftl_dev_init_opts {
/* Underlying device */
const char *base_bdev;
/* Write buffer cache */
const char *cache_bdev;
/* Thread responsible for core tasks execution */
struct spdk_thread *core_thread;
/* Device's config */
const struct spdk_ftl_conf *conf;
/* Device's name */
const char *name;
/* Mode flags */
unsigned int mode;
/* Device UUID (valid when restoring device from disk) */
struct spdk_uuid uuid;
};
struct spdk_ftl_attrs {
/* Device's UUID */
struct spdk_uuid uuid;
/* Number of logical blocks */
uint64_t num_blocks;
/* Logical block size */
size_t block_size;
/* Underlying device */
const char *base_bdev;
/* Write buffer cache */
const char *cache_bdev;
/* Number of zones per parallel unit in the underlying device (including any offline ones) */
size_t num_zones;
/* Number of logical blocks per zone */
size_t zone_size;
/* Device specific configuration */
struct spdk_ftl_conf conf;
};
typedef void (*spdk_ftl_fn)(void *, int);
typedef void (*spdk_ftl_init_fn)(struct spdk_ftl_dev *, void *, int);
/**
* Initialize the FTL on given NVMe device and parallel unit range.
*
* Covers the following:
* - retrieve zone device information,
* - allocate buffers and resources,
* - initialize internal structures,
* - initialize internal thread(s),
* - restore or create L2P table.
*
* \param opts configuration for new device
* \param cb callback function to call when the device is created
* \param cb_arg callback's argument
*
* \return 0 if initialization was started successfully, negative errno otherwise.
*/
int spdk_ftl_dev_init(const struct spdk_ftl_dev_init_opts *opts, spdk_ftl_init_fn cb, void *cb_arg);
/**
* Deinitialize and free given device.
*
* \param dev device
* \param cb callback function to call when the device is freed
* \param cb_arg callback's argument
*
* \return 0 if successfully scheduled free, negative errno otherwise.
*/
int spdk_ftl_dev_free(struct spdk_ftl_dev *dev, spdk_ftl_init_fn cb, void *cb_arg);
/**
* Initialize FTL configuration structure with default values.
*
* \param conf FTL configuration to initialize
*/
void spdk_ftl_conf_init_defaults(struct spdk_ftl_conf *conf);
/**
* Retrieve devices attributes.
*
* \param dev device
* \param attr Attribute structure to fill
*/
void spdk_ftl_dev_get_attrs(const struct spdk_ftl_dev *dev, struct spdk_ftl_attrs *attr);
/**
* Submits a read to the specified device.
*
* \param dev Device
* \param ch I/O channel
* \param lba Starting LBA to read the data
* \param lba_cnt Number of sectors to read
* \param iov Single IO vector or pointer to IO vector table
* \param iov_cnt Number of IO vectors
* \param cb_fn Callback function to invoke when the I/O is completed
* \param cb_arg Argument to pass to the callback function
*
* \return 0 if successfully submitted, negative errno otherwise.
*/
int spdk_ftl_read(struct spdk_ftl_dev *dev, struct spdk_io_channel *ch, uint64_t lba,
size_t lba_cnt,
struct iovec *iov, size_t iov_cnt, spdk_ftl_fn cb_fn, void *cb_arg);
/**
* Submits a write to the specified device.
*
* \param dev Device
* \param ch I/O channel
* \param lba Starting LBA to write the data
* \param lba_cnt Number of sectors to write
* \param iov Single IO vector or pointer to IO vector table
* \param iov_cnt Number of IO vectors
* \param cb_fn Callback function to invoke when the I/O is completed
* \param cb_arg Argument to pass to the callback function
*
* \return 0 if successfully submitted, negative errno otherwise.
*/
int spdk_ftl_write(struct spdk_ftl_dev *dev, struct spdk_io_channel *ch, uint64_t lba,
size_t lba_cnt,
struct iovec *iov, size_t iov_cnt, spdk_ftl_fn cb_fn, void *cb_arg);
/**
* Submits a flush request to the specified device.
*
* \param dev device
* \param cb_fn Callback function to invoke when all prior IOs have been completed
* \param cb_arg Argument to pass to the callback function
*
* \return 0 if successfully submitted, negative errno otherwise.
*/
int spdk_ftl_flush(struct spdk_ftl_dev *dev, spdk_ftl_fn cb_fn, void *cb_arg);
#ifdef __cplusplus
}
#endif
#endif /* SPDK_FTL_H */

View File

@ -11,7 +11,7 @@ DIRS-y += bdev blob blobfs conf dma accel event json jsonrpc \
log lvol rpc sock thread trace util nvme vmd nvmf scsi \
ioat ut_mock iscsi notify init trace_parser
ifeq ($(OS),Linux)
DIRS-y += nbd ftl
DIRS-y += nbd
endif
DIRS-$(CONFIG_OCF) += env_ocf

View File

@ -1,48 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) Intel Corporation.
* All rights reserved.
*/
#ifndef FTL_ADDR_H
#define FTL_ADDR_H
#include "spdk/stdinc.h"
/* Marks address as invalid */
#define FTL_ADDR_INVALID (-1)
/* Marks LBA as invalid */
#define FTL_LBA_INVALID ((uint64_t)-1)
/* Smallest data unit size */
#define FTL_BLOCK_SIZE 4096
/* This structure represents on-disk address. It can have one of the following */
/* formats: */
/* - offset inside the disk */
/* - cache_offset inside the cache (indicated by the cached flag) */
/* - packed version of the two formats above (can be only used when the */
/* offset can be represented in less than 32 bits) */
/* Packed format is used, when possible, to avoid wasting RAM on the L2P table. */
struct ftl_addr {
union {
struct {
uint64_t cache_offset : 63;
uint64_t cached : 1;
};
struct {
union {
struct {
uint32_t cache_offset : 31;
uint32_t cached : 1;
};
uint32_t offset;
};
uint32_t rsvd;
} pack;
uint64_t offset;
};
};
#endif /* FTL_ADDR_H */

File diff suppressed because it is too large Load Diff

View File

@ -1,259 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) Intel Corporation.
* All rights reserved.
*/
#ifndef FTL_BAND_H
#define FTL_BAND_H
#include "spdk/stdinc.h"
#include "spdk/bit_array.h"
#include "spdk/queue.h"
#include "spdk/bdev_zone.h"
#include "ftl_io.h"
#include "ftl_addr.h"
#include "ftl_core.h"
/* Number of LBAs that could be stored in a single block */
#define FTL_NUM_LBA_IN_BLOCK (FTL_BLOCK_SIZE / sizeof(uint64_t))
struct spdk_ftl_dev;
struct ftl_lba_map_request;
struct ftl_zone {
struct spdk_bdev_zone_info info;
/* Indicates that there is inflight write */
bool busy;
CIRCLEQ_ENTRY(ftl_zone) circleq;
};
enum ftl_md_status {
FTL_MD_SUCCESS,
/* Metadata read failure */
FTL_MD_IO_FAILURE,
/* Invalid version */
FTL_MD_INVALID_VER,
/* UUID doesn't match */
FTL_MD_NO_MD,
/* UUID and version matches but CRC doesn't */
FTL_MD_INVALID_CRC,
/* Vld or lba map size doesn't match */
FTL_MD_INVALID_SIZE
};
enum ftl_lba_map_seg_state {
FTL_LBA_MAP_SEG_CLEAR,
FTL_LBA_MAP_SEG_PENDING,
FTL_LBA_MAP_SEG_CACHED
};
struct ftl_lba_map {
/* LBA/vld map lock */
pthread_spinlock_t lock;
/* Number of valid LBAs */
size_t num_vld;
/* LBA map's reference count */
size_t ref_cnt;
/* Bitmap of valid LBAs */
struct spdk_bit_array *vld;
/* LBA map (only valid for open/relocating bands) */
uint64_t *map;
/* LBA map segment state map (clear, pending, cached) */
uint8_t *segments;
LIST_HEAD(, ftl_lba_map_request) request_list;
/* Metadata DMA buffer (only valid for open/relocating bands) */
void *dma_buf;
};
enum ftl_band_state {
FTL_BAND_STATE_FREE,
FTL_BAND_STATE_PREP,
FTL_BAND_STATE_OPENING,
FTL_BAND_STATE_OPEN,
FTL_BAND_STATE_FULL,
FTL_BAND_STATE_CLOSING,
FTL_BAND_STATE_CLOSED,
FTL_BAND_STATE_MAX
};
struct ftl_lba_map_request {
/* Completion callback */
ftl_io_fn cb;
/* Completion callback context */
void *cb_ctx;
/* Bit array of requested segments */
struct spdk_bit_array *segments;
/* Number of pending segments to read */
size_t num_pending;
LIST_ENTRY(ftl_lba_map_request) list_entry;
};
struct ftl_band {
/* Device this band belongs to */
struct spdk_ftl_dev *dev;
/* Number of operational zones */
size_t num_zones;
/* Array of zones */
struct ftl_zone *zone_buf;
/* List of operational zones */
CIRCLEQ_HEAD(, ftl_zone) zones;
/* LBA map */
struct ftl_lba_map lba_map;
/* Band's state */
enum ftl_band_state state;
/* Band's index */
unsigned int id;
/* Latest merit calculation */
double merit;
/* High defrag priority - means that the metadata should be copied and */
/* the band should be defragged immediately */
int high_prio;
/* Sequence number */
uint64_t seq;
/* Number of defrag cycles */
uint64_t wr_cnt;
/* End metadata start addr */
struct ftl_addr tail_md_addr;
/* Bitmap of all bands that have its data moved onto this band */
struct spdk_bit_array *reloc_bitmap;
/* Number of open bands containing data moved from this band */
size_t num_reloc_bands;
/* Number of blocks currently being moved from this band */
size_t num_reloc_blocks;
/* Free/shut bands' lists */
LIST_ENTRY(ftl_band) list_entry;
/* High priority queue link */
STAILQ_ENTRY(ftl_band) prio_stailq;
};
uint64_t ftl_band_block_offset_from_addr(struct ftl_band *band, struct ftl_addr addr);
struct ftl_addr ftl_band_addr_from_block_offset(struct ftl_band *band, uint64_t block_off);
void ftl_band_set_state(struct ftl_band *band, enum ftl_band_state state);
size_t ftl_band_age(const struct ftl_band *band);
void ftl_band_acquire_lba_map(struct ftl_band *band);
int ftl_band_alloc_lba_map(struct ftl_band *band);
void ftl_band_clear_lba_map(struct ftl_band *band);
void ftl_band_release_lba_map(struct ftl_band *band);
int ftl_band_read_lba_map(struct ftl_band *band,
size_t offset, size_t lba_cnt,
ftl_io_fn cb_fn, void *cb_ctx);
struct ftl_addr ftl_band_next_xfer_addr(struct ftl_band *band, struct ftl_addr addr,
size_t num_blocks);
struct ftl_addr ftl_band_next_addr(struct ftl_band *band, struct ftl_addr addr,
size_t offset);
size_t ftl_band_num_usable_blocks(const struct ftl_band *band);
size_t ftl_band_user_blocks_left(const struct ftl_band *band, size_t offset);
size_t ftl_band_user_blocks(const struct ftl_band *band);
void ftl_band_set_addr(struct ftl_band *band, uint64_t lba,
struct ftl_addr addr);
struct ftl_band *ftl_band_from_addr(struct spdk_ftl_dev *dev, struct ftl_addr addr);
struct ftl_zone *ftl_band_zone_from_addr(struct ftl_band *band, struct ftl_addr);
void ftl_band_md_clear(struct ftl_band *band);
int ftl_band_read_tail_md(struct ftl_band *band, struct ftl_addr,
ftl_io_fn cb_fn, void *cb_ctx);
int ftl_band_read_head_md(struct ftl_band *band, ftl_io_fn cb_fn, void *cb_ctx);
int ftl_band_write_tail_md(struct ftl_band *band, ftl_io_fn cb);
int ftl_band_write_head_md(struct ftl_band *band, ftl_io_fn cb);
struct ftl_addr ftl_band_tail_md_addr(struct ftl_band *band);
struct ftl_addr ftl_band_head_md_addr(struct ftl_band *band);
void ftl_band_write_failed(struct ftl_band *band);
int ftl_band_full(struct ftl_band *band, size_t offset);
int ftl_band_write_prep(struct ftl_band *band);
struct ftl_zone *ftl_band_next_operational_zone(struct ftl_band *band,
struct ftl_zone *zone);
size_t ftl_lba_map_pool_elem_size(struct spdk_ftl_dev *dev);
void ftl_band_remove_zone(struct ftl_band *band, struct ftl_zone *zone);
static inline int
ftl_band_empty(const struct ftl_band *band)
{
return band->lba_map.num_vld == 0;
}
static inline struct ftl_zone *
ftl_band_next_zone(struct ftl_band *band, struct ftl_zone *zone)
{
assert(zone->info.state != SPDK_BDEV_ZONE_STATE_OFFLINE);
return CIRCLEQ_LOOP_NEXT(&band->zones, zone, circleq);
}
static inline void
ftl_band_set_next_state(struct ftl_band *band)
{
ftl_band_set_state(band, (band->state + 1) % FTL_BAND_STATE_MAX);
}
static inline int
ftl_band_state_changing(struct ftl_band *band)
{
return band->state == FTL_BAND_STATE_OPENING ||
band->state == FTL_BAND_STATE_CLOSING;
}
static inline int
ftl_band_block_offset_valid(struct ftl_band *band, size_t block_off)
{
struct ftl_lba_map *lba_map = &band->lba_map;
pthread_spin_lock(&lba_map->lock);
if (spdk_bit_array_get(lba_map->vld, block_off)) {
pthread_spin_unlock(&lba_map->lock);
return 1;
}
pthread_spin_unlock(&lba_map->lock);
return 0;
}
static inline int
ftl_band_zone_is_last(struct ftl_band *band, struct ftl_zone *zone)
{
return zone == CIRCLEQ_LAST(&band->zones);
}
static inline int
ftl_band_zone_is_first(struct ftl_band *band, struct ftl_zone *zone)
{
return zone == CIRCLEQ_FIRST(&band->zones);
}
static inline int
ftl_zone_is_writable(const struct spdk_ftl_dev *dev, const struct ftl_zone *zone)
{
bool busy = ftl_is_append_supported(dev) ? false : zone->busy;
return (zone->info.state == SPDK_BDEV_ZONE_STATE_OPEN ||
zone->info.state == SPDK_BDEV_ZONE_STATE_EMPTY) &&
!busy;
}
#endif /* FTL_BAND_H */

File diff suppressed because it is too large Load Diff

View File

@ -1,524 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) Intel Corporation.
* All rights reserved.
*/
#ifndef FTL_CORE_H
#define FTL_CORE_H
#include "spdk/stdinc.h"
#include "spdk/uuid.h"
#include "spdk/thread.h"
#include "spdk/util.h"
#include "spdk/log.h"
#include "spdk/likely.h"
#include "spdk/queue.h"
#include "spdk/ftl.h"
#include "spdk/bdev.h"
#include "spdk/bdev_zone.h"
#include "ftl_addr.h"
#include "ftl_io.h"
#include "ftl_trace.h"
#ifdef SPDK_CONFIG_PMDK
#include "libpmem.h"
#endif /* SPDK_CONFIG_PMDK */
struct spdk_ftl_dev;
struct ftl_band;
struct ftl_zone;
struct ftl_io;
struct ftl_restore;
struct ftl_wptr;
struct ftl_flush;
struct ftl_reloc;
struct ftl_anm_event;
struct ftl_band_flush;
struct ftl_stats {
/* Number of writes scheduled directly by the user */
uint64_t write_user;
/* Total number of writes */
uint64_t write_total;
/* Traces */
struct ftl_trace trace;
/* Number of limits applied */
uint64_t limits[SPDK_FTL_LIMIT_MAX];
};
struct ftl_global_md {
/* Device instance */
struct spdk_uuid uuid;
/* Size of the l2p table */
uint64_t num_lbas;
};
struct ftl_nv_cache {
/* Write buffer cache bdev */
struct spdk_bdev_desc *bdev_desc;
/* Write pointer */
uint64_t current_addr;
/* Number of available blocks left */
uint64_t num_available;
/* Maximum number of blocks */
uint64_t num_data_blocks;
/*
* Phase of the current cycle of writes. Each time whole cache area is filled, the phase is
* advanced. Current phase is saved in every IO's metadata, as well as in the header saved
* in the first sector. By looking at the phase of each block, it's possible to find the
* oldest block and replay the order of the writes when recovering the data from the cache.
*/
unsigned int phase;
/* Indicates that the data can be written to the cache */
bool ready;
/* Metadata pool */
struct spdk_mempool *md_pool;
/* DMA buffer for writing the header */
void *dma_buf;
/* Cache lock */
pthread_spinlock_t lock;
};
struct ftl_batch {
/* Queue of write buffer entries, can reach up to xfer_size entries */
TAILQ_HEAD(, ftl_wbuf_entry) entries;
/* Number of entries in the queue above */
uint32_t num_entries;
/* Index within spdk_ftl_dev.batch_array */
uint32_t index;
struct iovec *iov;
void *metadata;
TAILQ_ENTRY(ftl_batch) tailq;
};
struct spdk_ftl_dev {
/* Device instance */
struct spdk_uuid uuid;
/* Device name */
char *name;
/* Configuration */
struct spdk_ftl_conf conf;
/* Indicates the device is fully initialized */
int initialized;
/* Indicates the device is about to be stopped */
int halt;
/* Indicates the device is about to start stopping - use to handle multiple stop request */
bool halt_started;
/* Underlying device */
struct spdk_bdev_desc *base_bdev_desc;
/* Non-volatile write buffer cache */
struct ftl_nv_cache nv_cache;
/* LBA map memory pool */
struct spdk_mempool *lba_pool;
/* LBA map requests pool */
struct spdk_mempool *lba_request_pool;
/* Media management events pool */
struct spdk_mempool *media_events_pool;
/* Statistics */
struct ftl_stats stats;
/* Current sequence number */
uint64_t seq;
/* Array of bands */
struct ftl_band *bands;
/* Number of operational bands */
size_t num_bands;
/* Next write band */
struct ftl_band *next_band;
/* Free band list */
LIST_HEAD(, ftl_band) free_bands;
/* Closed bands list */
LIST_HEAD(, ftl_band) shut_bands;
/* Number of free bands */
size_t num_free;
/* List of write pointers */
LIST_HEAD(, ftl_wptr) wptr_list;
/* Logical -> physical table */
void *l2p;
/* Size of the l2p table */
uint64_t num_lbas;
/* Size of pages mmapped for l2p, valid only for mapping on persistent memory */
size_t l2p_pmem_len;
/* Address size */
size_t addr_len;
/* Flush list */
LIST_HEAD(, ftl_flush) flush_list;
/* List of band flush requests */
LIST_HEAD(, ftl_band_flush) band_flush_list;
/* Device specific md buffer */
struct ftl_global_md global_md;
/* Metadata size */
size_t md_size;
void *md_buf;
/* Transfer unit size */
size_t xfer_size;
/* Current user write limit */
int limit;
/* Inflight IO operations */
uint32_t num_inflight;
/* Manages data relocation */
struct ftl_reloc *reloc;
/* Thread on which the poller is running */
struct spdk_thread *core_thread;
/* IO channel */
struct spdk_io_channel *ioch;
/* Poller */
struct spdk_poller *core_poller;
/* IO channel array provides means for retrieving write buffer entries
* from their address stored in L2P. The address is divided into two
* parts - IO channel offset poining at specific IO channel (within this
* array) and entry offset pointing at specific entry within that IO
* channel.
*/
struct ftl_io_channel **ioch_array;
TAILQ_HEAD(, ftl_io_channel) ioch_queue;
uint64_t num_io_channels;
/* Value required to shift address of a write buffer entry to retrieve
* the IO channel it's part of. The other part of the address describes
* the offset of an entry within the IO channel's entry array.
*/
uint64_t ioch_shift;
/* Write buffer batches */
#define FTL_BATCH_COUNT 4096
struct ftl_batch batch_array[FTL_BATCH_COUNT];
/* Iovec buffer used by batches */
struct iovec *iov_buf;
/* Batch currently being filled */
struct ftl_batch *current_batch;
/* Full and ready to be sent batches. A batch is put on this queue in
* case it's already filled, but cannot be sent.
*/
TAILQ_HEAD(, ftl_batch) pending_batches;
TAILQ_HEAD(, ftl_batch) free_batches;
/* Devices' list */
STAILQ_ENTRY(spdk_ftl_dev) stailq;
};
struct ftl_nv_cache_header {
/* Version of the header */
uint32_t version;
/* UUID of the FTL device */
struct spdk_uuid uuid;
/* Size of the non-volatile cache (in blocks) */
uint64_t size;
/* Contains the next address to be written after clean shutdown, invalid LBA otherwise */
uint64_t current_addr;
/* Current phase */
uint8_t phase;
/* Checksum of the header, needs to be last element */
uint32_t checksum;
} __attribute__((packed));
struct ftl_media_event {
/* Owner */
struct spdk_ftl_dev *dev;
/* Media event */
struct spdk_bdev_media_event event;
};
typedef void (*ftl_restore_fn)(struct ftl_restore *, int, void *cb_arg);
void ftl_apply_limits(struct spdk_ftl_dev *dev);
void ftl_io_read(struct ftl_io *io);
void ftl_io_write(struct ftl_io *io);
int ftl_flush_wbuf(struct spdk_ftl_dev *dev, spdk_ftl_fn cb_fn, void *cb_arg);
int ftl_current_limit(const struct spdk_ftl_dev *dev);
int ftl_invalidate_addr(struct spdk_ftl_dev *dev, struct ftl_addr addr);
int ftl_task_core(void *ctx);
int ftl_task_read(void *ctx);
void ftl_process_anm_event(struct ftl_anm_event *event);
size_t ftl_tail_md_num_blocks(const struct spdk_ftl_dev *dev);
size_t ftl_tail_md_hdr_num_blocks(void);
size_t ftl_vld_map_num_blocks(const struct spdk_ftl_dev *dev);
size_t ftl_lba_map_num_blocks(const struct spdk_ftl_dev *dev);
size_t ftl_head_md_num_blocks(const struct spdk_ftl_dev *dev);
int ftl_restore_md(struct spdk_ftl_dev *dev, ftl_restore_fn cb, void *cb_arg);
int ftl_restore_device(struct ftl_restore *restore, ftl_restore_fn cb, void *cb_arg);
void ftl_restore_nv_cache(struct ftl_restore *restore, ftl_restore_fn cb, void *cb_arg);
int ftl_band_set_direct_access(struct ftl_band *band, bool access);
bool ftl_addr_is_written(struct ftl_band *band, struct ftl_addr addr);
int ftl_flush_active_bands(struct spdk_ftl_dev *dev, spdk_ftl_fn cb_fn, void *cb_arg);
int ftl_nv_cache_write_header(struct ftl_nv_cache *nv_cache, bool shutdown,
spdk_bdev_io_completion_cb cb_fn, void *cb_arg);
int ftl_nv_cache_scrub(struct ftl_nv_cache *nv_cache, spdk_bdev_io_completion_cb cb_fn,
void *cb_arg);
void ftl_get_media_events(struct spdk_ftl_dev *dev);
int ftl_io_channel_poll(void *arg);
void ftl_evict_cache_entry(struct spdk_ftl_dev *dev, struct ftl_wbuf_entry *entry);
struct spdk_io_channel *ftl_get_io_channel(const struct spdk_ftl_dev *dev);
struct ftl_io_channel *ftl_io_channel_get_ctx(struct spdk_io_channel *ioch);
#define ftl_to_addr(address) \
(struct ftl_addr) { .offset = (uint64_t)(address) }
#define ftl_to_addr_packed(address) \
(struct ftl_addr) { .pack.offset = (uint32_t)(address) }
static inline struct spdk_thread *
ftl_get_core_thread(const struct spdk_ftl_dev *dev)
{
return dev->core_thread;
}
static inline size_t
ftl_get_num_bands(const struct spdk_ftl_dev *dev)
{
return dev->num_bands;
}
static inline size_t
ftl_get_num_punits(const struct spdk_ftl_dev *dev)
{
return spdk_bdev_get_optimal_open_zones(spdk_bdev_desc_get_bdev(dev->base_bdev_desc));
}
static inline size_t
ftl_get_num_zones(const struct spdk_ftl_dev *dev)
{
return ftl_get_num_bands(dev) * ftl_get_num_punits(dev);
}
static inline size_t
ftl_get_num_blocks_in_zone(const struct spdk_ftl_dev *dev)
{
return spdk_bdev_get_zone_size(spdk_bdev_desc_get_bdev(dev->base_bdev_desc));
}
static inline uint64_t
ftl_get_num_blocks_in_band(const struct spdk_ftl_dev *dev)
{
return ftl_get_num_punits(dev) * ftl_get_num_blocks_in_zone(dev);
}
static inline uint64_t
ftl_addr_get_zone_slba(const struct spdk_ftl_dev *dev, struct ftl_addr addr)
{
return addr.offset -= (addr.offset % ftl_get_num_blocks_in_zone(dev));
}
static inline uint64_t
ftl_addr_get_band(const struct spdk_ftl_dev *dev, struct ftl_addr addr)
{
return addr.offset / ftl_get_num_blocks_in_band(dev);
}
static inline uint64_t
ftl_addr_get_punit(const struct spdk_ftl_dev *dev, struct ftl_addr addr)
{
return (addr.offset / ftl_get_num_blocks_in_zone(dev)) % ftl_get_num_punits(dev);
}
static inline uint64_t
ftl_addr_get_zone_offset(const struct spdk_ftl_dev *dev, struct ftl_addr addr)
{
return addr.offset % ftl_get_num_blocks_in_zone(dev);
}
static inline size_t
ftl_vld_map_size(const struct spdk_ftl_dev *dev)
{
return (size_t)spdk_divide_round_up(ftl_get_num_blocks_in_band(dev), CHAR_BIT);
}
static inline int
ftl_addr_packed(const struct spdk_ftl_dev *dev)
{
return dev->addr_len < 32;
}
static inline void
ftl_l2p_lba_persist(const struct spdk_ftl_dev *dev, uint64_t lba)
{
#ifdef SPDK_CONFIG_PMDK
size_t ftl_addr_size = ftl_addr_packed(dev) ? 4 : 8;
pmem_persist((char *)dev->l2p + (lba * ftl_addr_size), ftl_addr_size);
#else /* SPDK_CONFIG_PMDK */
SPDK_ERRLOG("Libpmem not available, cannot flush l2p to pmem\n");
assert(0);
#endif /* SPDK_CONFIG_PMDK */
}
static inline int
ftl_addr_invalid(struct ftl_addr addr)
{
return addr.offset == ftl_to_addr(FTL_ADDR_INVALID).offset;
}
static inline int
ftl_addr_cached(struct ftl_addr addr)
{
return !ftl_addr_invalid(addr) && addr.cached;
}
static inline struct ftl_addr
ftl_addr_to_packed(const struct spdk_ftl_dev *dev, struct ftl_addr addr)
{
struct ftl_addr p = {};
if (ftl_addr_invalid(addr)) {
p = ftl_to_addr_packed(FTL_ADDR_INVALID);
} else if (ftl_addr_cached(addr)) {
p.pack.cached = 1;
p.pack.cache_offset = (uint32_t) addr.cache_offset;
} else {
p.pack.offset = (uint32_t) addr.offset;
}
return p;
}
static inline struct ftl_addr
ftl_addr_from_packed(const struct spdk_ftl_dev *dev, struct ftl_addr p)
{
struct ftl_addr addr = {};
if (p.pack.offset == (uint32_t)FTL_ADDR_INVALID) {
addr = ftl_to_addr(FTL_ADDR_INVALID);
} else if (p.pack.cached) {
addr.cached = 1;
addr.cache_offset = p.pack.cache_offset;
} else {
addr = p;
}
return addr;
}
#define _ftl_l2p_set(l2p, off, val, bits) \
__atomic_store_n(((uint##bits##_t *)(l2p)) + (off), val, __ATOMIC_SEQ_CST)
#define _ftl_l2p_set32(l2p, off, val) \
_ftl_l2p_set(l2p, off, val, 32)
#define _ftl_l2p_set64(l2p, off, val) \
_ftl_l2p_set(l2p, off, val, 64)
#define _ftl_l2p_get(l2p, off, bits) \
__atomic_load_n(((uint##bits##_t *)(l2p)) + (off), __ATOMIC_SEQ_CST)
#define _ftl_l2p_get32(l2p, off) \
_ftl_l2p_get(l2p, off, 32)
#define _ftl_l2p_get64(l2p, off) \
_ftl_l2p_get(l2p, off, 64)
#define ftl_addr_cmp(p1, p2) \
((p1).offset == (p2).offset)
static inline void
ftl_l2p_set(struct spdk_ftl_dev *dev, uint64_t lba, struct ftl_addr addr)
{
assert(dev->num_lbas > lba);
if (ftl_addr_packed(dev)) {
_ftl_l2p_set32(dev->l2p, lba, ftl_addr_to_packed(dev, addr).offset);
} else {
_ftl_l2p_set64(dev->l2p, lba, addr.offset);
}
if (dev->l2p_pmem_len != 0) {
ftl_l2p_lba_persist(dev, lba);
}
}
static inline struct ftl_addr
ftl_l2p_get(struct spdk_ftl_dev *dev, uint64_t lba)
{
assert(dev->num_lbas > lba);
if (ftl_addr_packed(dev)) {
return ftl_addr_from_packed(dev, ftl_to_addr_packed(
_ftl_l2p_get32(dev->l2p, lba)));
} else {
return ftl_to_addr(_ftl_l2p_get64(dev->l2p, lba));
}
}
static inline bool
ftl_dev_has_nv_cache(const struct spdk_ftl_dev *dev)
{
return dev->nv_cache.bdev_desc != NULL;
}
#define FTL_NV_CACHE_HEADER_VERSION (1)
#define FTL_NV_CACHE_DATA_OFFSET (1)
#define FTL_NV_CACHE_PHASE_OFFSET (62)
#define FTL_NV_CACHE_PHASE_COUNT (4)
#define FTL_NV_CACHE_PHASE_MASK (3ULL << FTL_NV_CACHE_PHASE_OFFSET)
#define FTL_NV_CACHE_LBA_INVALID (FTL_LBA_INVALID & ~FTL_NV_CACHE_PHASE_MASK)
static inline bool
ftl_nv_cache_phase_is_valid(unsigned int phase)
{
return phase > 0 && phase <= 3;
}
static inline unsigned int
ftl_nv_cache_next_phase(unsigned int current)
{
static const unsigned int phases[] = { 0, 2, 3, 1 };
assert(ftl_nv_cache_phase_is_valid(current));
return phases[current];
}
static inline unsigned int
ftl_nv_cache_prev_phase(unsigned int current)
{
static const unsigned int phases[] = { 0, 3, 1, 2 };
assert(ftl_nv_cache_phase_is_valid(current));
return phases[current];
}
static inline uint64_t
ftl_nv_cache_pack_lba(uint64_t lba, unsigned int phase)
{
assert(ftl_nv_cache_phase_is_valid(phase));
return (lba & ~FTL_NV_CACHE_PHASE_MASK) | ((uint64_t)phase << FTL_NV_CACHE_PHASE_OFFSET);
}
static inline void
ftl_nv_cache_unpack_lba(uint64_t in_lba, uint64_t *out_lba, unsigned int *phase)
{
*out_lba = in_lba & ~FTL_NV_CACHE_PHASE_MASK;
*phase = (in_lba & FTL_NV_CACHE_PHASE_MASK) >> FTL_NV_CACHE_PHASE_OFFSET;
/* If the phase is invalid the block wasn't written yet, so treat the LBA as invalid too */
if (!ftl_nv_cache_phase_is_valid(*phase) || *out_lba == FTL_NV_CACHE_LBA_INVALID) {
*out_lba = FTL_LBA_INVALID;
}
}
static inline bool
ftl_is_append_supported(const struct spdk_ftl_dev *dev)
{
return dev->conf.use_append;
}
#endif /* FTL_CORE_H */

View File

@ -1,140 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) Intel Corporation.
* All rights reserved.
*/
#include "spdk/log.h"
#include "spdk/ftl.h"
#include "ftl_debug.h"
#include "ftl_band.h"
#if defined(DEBUG)
#if defined(FTL_META_DEBUG)
static const char *ftl_band_state_str[] = {
"free",
"prep",
"opening",
"open",
"full",
"closing",
"closed",
"max"
};
bool
ftl_band_validate_md(struct ftl_band *band)
{
struct spdk_ftl_dev *dev = band->dev;
struct ftl_lba_map *lba_map = &band->lba_map;
struct ftl_addr addr_md, addr_l2p;
size_t i, size, seg_off;
bool valid = true;
size = ftl_get_num_blocks_in_band(dev);
pthread_spin_lock(&lba_map->lock);
for (i = 0; i < size; ++i) {
if (!spdk_bit_array_get(lba_map->vld, i)) {
continue;
}
seg_off = i / FTL_NUM_LBA_IN_BLOCK;
if (lba_map->segments[seg_off] != FTL_LBA_MAP_SEG_CACHED) {
continue;
}
addr_md = ftl_band_addr_from_block_offset(band, i);
addr_l2p = ftl_l2p_get(dev, lba_map->map[i]);
if (addr_l2p.cached) {
continue;
}
if (addr_l2p.offset != addr_md.offset) {
valid = false;
break;
}
}
pthread_spin_unlock(&lba_map->lock);
return valid;
}
void
ftl_dev_dump_bands(struct spdk_ftl_dev *dev)
{
size_t i;
if (!dev->bands) {
return;
}
ftl_debug("Bands validity:\n");
for (i = 0; i < ftl_get_num_bands(dev); ++i) {
if (dev->bands[i].state == FTL_BAND_STATE_FREE &&
dev->bands[i].wr_cnt == 0) {
continue;
}
if (!dev->bands[i].num_zones) {
ftl_debug(" Band %3zu: all zones are offline\n", i + 1);
continue;
}
ftl_debug(" Band %3zu: %8zu / %zu \tnum_zones: %zu \twr_cnt: %"PRIu64"\tmerit:"
"%10.3f\tstate: %s\n",
i + 1, dev->bands[i].lba_map.num_vld,
ftl_band_user_blocks(&dev->bands[i]),
dev->bands[i].num_zones,
dev->bands[i].wr_cnt,
dev->bands[i].merit,
ftl_band_state_str[dev->bands[i].state]);
}
}
#endif /* defined(FTL_META_DEBUG) */
#if defined(FTL_DUMP_STATS)
void
ftl_dev_dump_stats(const struct spdk_ftl_dev *dev)
{
size_t i, total = 0;
char uuid[SPDK_UUID_STRING_LEN];
double waf;
const char *limits[] = {
[SPDK_FTL_LIMIT_CRIT] = "crit",
[SPDK_FTL_LIMIT_HIGH] = "high",
[SPDK_FTL_LIMIT_LOW] = "low",
[SPDK_FTL_LIMIT_START] = "start"
};
if (!dev->bands) {
return;
}
/* Count the number of valid LBAs */
for (i = 0; i < ftl_get_num_bands(dev); ++i) {
total += dev->bands[i].lba_map.num_vld;
}
waf = (double)dev->stats.write_total / (double)dev->stats.write_user;
spdk_uuid_fmt_lower(uuid, sizeof(uuid), &dev->uuid);
ftl_debug("\n");
ftl_debug("device UUID: %s\n", uuid);
ftl_debug("total valid LBAs: %zu\n", total);
ftl_debug("total writes: %"PRIu64"\n", dev->stats.write_total);
ftl_debug("user writes: %"PRIu64"\n", dev->stats.write_user);
ftl_debug("WAF: %.4lf\n", waf);
ftl_debug("limits:\n");
for (i = 0; i < SPDK_FTL_LIMIT_MAX; ++i) {
ftl_debug(" %5s: %"PRIu64"\n", limits[i], dev->stats.limits[i]);
}
}
#endif /* defined(FTL_DUMP_STATS) */
#endif /* defined(DEBUG) */

View File

@ -1,45 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) Intel Corporation.
* All rights reserved.
*/
#ifndef FTL_DEBUG_H
#define FTL_DEBUG_H
#include "ftl_addr.h"
#include "ftl_band.h"
#include "ftl_core.h"
#if defined(DEBUG)
/* Debug flags - enabled when defined */
#define FTL_META_DEBUG 1
#define FTL_DUMP_STATS 1
#define ftl_debug(msg, ...) \
SPDK_ERRLOG(msg, ## __VA_ARGS__)
#else
#define ftl_debug(msg, ...)
#endif
static inline const char *
ftl_addr2str(struct ftl_addr addr, char *buf, size_t size)
{
snprintf(buf, size, "(%"PRIu64")", addr.offset);
return buf;
}
#if defined(FTL_META_DEBUG)
bool ftl_band_validate_md(struct ftl_band *band);
void ftl_dev_dump_bands(struct spdk_ftl_dev *dev);
#else
#define ftl_band_validate_md(band)
#define ftl_dev_dump_bands(dev)
#endif
#if defined(FTL_DUMP_STATS)
void ftl_dev_dump_stats(const struct spdk_ftl_dev *dev);
#else
#define ftl_dev_dump_stats(dev)
#endif
#endif /* FTL_DEBUG_H */

File diff suppressed because it is too large Load Diff

View File

@ -1,536 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) Intel Corporation.
* All rights reserved.
*/
#include "spdk/stdinc.h"
#include "spdk/ftl.h"
#include "spdk/likely.h"
#include "spdk/util.h"
#include "ftl_io.h"
#include "ftl_core.h"
#include "ftl_band.h"
#include "ftl_debug.h"
void
ftl_io_inc_req(struct ftl_io *io)
{
struct ftl_band *band = io->band;
if (!(io->flags & FTL_IO_CACHE) && io->type != FTL_IO_READ && io->type != FTL_IO_ERASE) {
ftl_band_acquire_lba_map(band);
}
__atomic_fetch_add(&io->dev->num_inflight, 1, __ATOMIC_SEQ_CST);
++io->req_cnt;
}
void
ftl_io_dec_req(struct ftl_io *io)
{
struct ftl_band *band = io->band;
unsigned long num_inflight __attribute__((unused));
if (!(io->flags & FTL_IO_CACHE) && io->type != FTL_IO_READ && io->type != FTL_IO_ERASE) {
ftl_band_release_lba_map(band);
}
num_inflight = __atomic_fetch_sub(&io->dev->num_inflight, 1, __ATOMIC_SEQ_CST);
assert(num_inflight > 0);
assert(io->req_cnt > 0);
--io->req_cnt;
}
struct iovec *
ftl_io_iovec(struct ftl_io *io)
{
return &io->iov[0];
}
uint64_t
ftl_io_get_lba(const struct ftl_io *io, size_t offset)
{
assert(offset < io->num_blocks);
if (io->flags & FTL_IO_VECTOR_LBA) {
return io->lba.vector[offset];
} else {
return io->lba.single + offset;
}
}
uint64_t
ftl_io_current_lba(const struct ftl_io *io)
{
return ftl_io_get_lba(io, io->pos);
}
void
ftl_io_advance(struct ftl_io *io, size_t num_blocks)
{
struct iovec *iov = ftl_io_iovec(io);
size_t iov_blocks, block_left = num_blocks;
io->pos += num_blocks;
if (io->iov_cnt != 0) {
while (block_left > 0) {
assert(io->iov_pos < io->iov_cnt);
iov_blocks = iov[io->iov_pos].iov_len / FTL_BLOCK_SIZE;
if (io->iov_off + block_left < iov_blocks) {
io->iov_off += block_left;
break;
}
assert(iov_blocks > io->iov_off);
block_left -= (iov_blocks - io->iov_off);
io->iov_off = 0;
io->iov_pos++;
}
}
if (io->parent) {
ftl_io_advance(io->parent, num_blocks);
}
}
size_t
ftl_iovec_num_blocks(struct iovec *iov, size_t iov_cnt)
{
size_t num_blocks = 0, i = 0;
for (; i < iov_cnt; ++i) {
num_blocks += iov[i].iov_len / FTL_BLOCK_SIZE;
}
return num_blocks;
}
void *
ftl_io_iovec_addr(struct ftl_io *io)
{
assert(io->iov_pos < io->iov_cnt);
assert(io->iov_off * FTL_BLOCK_SIZE < ftl_io_iovec(io)[io->iov_pos].iov_len);
return (char *)ftl_io_iovec(io)[io->iov_pos].iov_base +
io->iov_off * FTL_BLOCK_SIZE;
}
size_t
ftl_io_iovec_len_left(struct ftl_io *io)
{
struct iovec *iov = ftl_io_iovec(io);
return iov[io->iov_pos].iov_len / FTL_BLOCK_SIZE - io->iov_off;
}
static void
ftl_io_init_iovec(struct ftl_io *io, const struct iovec *iov, size_t iov_cnt, size_t iov_off,
size_t num_blocks)
{
size_t offset = 0, num_left;
io->iov_pos = 0;
io->iov_cnt = 0;
io->num_blocks = num_blocks;
while (offset < num_blocks) {
assert(io->iov_cnt < FTL_IO_MAX_IOVEC && io->iov_cnt < iov_cnt);
num_left = spdk_min(iov[io->iov_cnt].iov_len / FTL_BLOCK_SIZE - iov_off,
num_blocks);
io->iov[io->iov_cnt].iov_base = (char *)iov[io->iov_cnt].iov_base +
iov_off * FTL_BLOCK_SIZE;
io->iov[io->iov_cnt].iov_len = num_left * FTL_BLOCK_SIZE;
offset += num_left;
io->iov_cnt++;
iov_off = 0;
}
}
void
ftl_io_shrink_iovec(struct ftl_io *io, size_t num_blocks)
{
size_t iov_off = 0, block_off = 0;
assert(io->num_blocks >= num_blocks);
assert(io->pos == 0 && io->iov_pos == 0 && io->iov_off == 0);
for (; iov_off < io->iov_cnt; ++iov_off) {
size_t num_iov = io->iov[iov_off].iov_len / FTL_BLOCK_SIZE;
size_t num_left = num_blocks - block_off;
if (num_iov >= num_left) {
io->iov[iov_off].iov_len = num_left * FTL_BLOCK_SIZE;
io->iov_cnt = iov_off + 1;
io->num_blocks = num_blocks;
break;
}
block_off += num_iov;
}
}
static void
ftl_io_init(struct ftl_io *io, struct spdk_ftl_dev *dev,
ftl_io_fn fn, void *ctx, int flags, int type)
{
io->flags |= flags | FTL_IO_INITIALIZED;
io->type = type;
io->dev = dev;
io->lba.single = FTL_LBA_INVALID;
io->addr.offset = FTL_ADDR_INVALID;
io->cb_fn = fn;
io->cb_ctx = ctx;
io->trace = ftl_trace_alloc_id(dev);
}
struct ftl_io *
ftl_io_init_internal(const struct ftl_io_init_opts *opts)
{
struct ftl_io *io = opts->io;
struct ftl_io *parent = opts->parent;
struct spdk_ftl_dev *dev = opts->dev;
const struct iovec *iov;
size_t iov_cnt, iov_off;
if (!io) {
if (parent) {
io = ftl_io_alloc_child(parent);
} else {
io = ftl_io_alloc(ftl_get_io_channel(dev));
}
if (!io) {
return NULL;
}
}
ftl_io_clear(io);
ftl_io_init(io, dev, opts->cb_fn, opts->cb_ctx, opts->flags | FTL_IO_INTERNAL, opts->type);
io->batch = opts->batch;
io->band = opts->band;
io->md = opts->md;
io->iov = &io->iov_buf[0];
if (parent) {
if (parent->flags & FTL_IO_VECTOR_LBA) {
io->lba.vector = parent->lba.vector + parent->pos;
} else {
io->lba.single = parent->lba.single + parent->pos;
}
iov = &parent->iov[parent->iov_pos];
iov_cnt = parent->iov_cnt - parent->iov_pos;
iov_off = parent->iov_off;
} else {
iov = &opts->iovs[0];
iov_cnt = opts->iovcnt;
iov_off = 0;
}
/* Some requests (zone resets) do not use iovecs */
if (iov_cnt > 0) {
ftl_io_init_iovec(io, iov, iov_cnt, iov_off, opts->num_blocks);
}
if (opts->flags & FTL_IO_VECTOR_LBA) {
io->lba.vector = calloc(io->num_blocks, sizeof(uint64_t));
if (!io->lba.vector) {
ftl_io_free(io);
return NULL;
}
}
return io;
}
struct ftl_io *
ftl_io_wbuf_init(struct spdk_ftl_dev *dev, struct ftl_addr addr, struct ftl_band *band,
struct ftl_batch *batch, ftl_io_fn cb)
{
struct ftl_io *io;
struct ftl_io_init_opts opts = {
.dev = dev,
.io = NULL,
.batch = batch,
.band = band,
.size = sizeof(struct ftl_io),
.flags = 0,
.type = FTL_IO_WRITE,
.num_blocks = dev->xfer_size,
.cb_fn = cb,
.iovcnt = dev->xfer_size,
.md = batch->metadata,
};
memcpy(opts.iovs, batch->iov, sizeof(struct iovec) * dev->xfer_size);
io = ftl_io_init_internal(&opts);
if (!io) {
return NULL;
}
io->addr = addr;
return io;
}
struct ftl_io *
ftl_io_erase_init(struct ftl_band *band, size_t num_blocks, ftl_io_fn cb)
{
struct ftl_io *io;
struct ftl_io_init_opts opts = {
.dev = band->dev,
.io = NULL,
.band = band,
.size = sizeof(struct ftl_io),
.flags = FTL_IO_PHYSICAL_MODE,
.type = FTL_IO_ERASE,
.num_blocks = 1,
.cb_fn = cb,
.iovcnt = 0,
.md = NULL,
};
io = ftl_io_init_internal(&opts);
if (!io) {
return NULL;
}
io->num_blocks = num_blocks;
return io;
}
static void
_ftl_user_cb(struct ftl_io *io, void *arg, int status)
{
io->user_fn(arg, status);
}
struct ftl_io *
ftl_io_user_init(struct spdk_io_channel *_ioch, uint64_t lba, size_t num_blocks, struct iovec *iov,
size_t iov_cnt, spdk_ftl_fn cb_fn, void *cb_ctx, int type)
{
struct ftl_io_channel *ioch = ftl_io_channel_get_ctx(_ioch);
struct spdk_ftl_dev *dev = ioch->dev;
struct ftl_io *io;
io = ftl_io_alloc(_ioch);
if (spdk_unlikely(!io)) {
return NULL;
}
ftl_io_init(io, dev, _ftl_user_cb, cb_ctx, 0, type);
io->lba.single = lba;
io->user_fn = cb_fn;
io->iov = iov;
io->iov_cnt = iov_cnt;
io->num_blocks = num_blocks;
ftl_trace_lba_io_init(io->dev, io);
return io;
}
static void
_ftl_io_free(struct ftl_io *io)
{
struct ftl_io_channel *ioch;
assert(LIST_EMPTY(&io->children));
if (io->flags & FTL_IO_VECTOR_LBA) {
free(io->lba.vector);
}
if (pthread_spin_destroy(&io->lock)) {
SPDK_ERRLOG("pthread_spin_destroy failed\n");
}
ioch = ftl_io_channel_get_ctx(io->ioch);
spdk_mempool_put(ioch->io_pool, io);
}
static bool
ftl_io_remove_child(struct ftl_io *io)
{
struct ftl_io *parent = io->parent;
bool parent_done;
pthread_spin_lock(&parent->lock);
LIST_REMOVE(io, child_entry);
parent_done = parent->done && LIST_EMPTY(&parent->children);
parent->status = parent->status ? : io->status;
pthread_spin_unlock(&parent->lock);
return parent_done;
}
void
ftl_io_complete(struct ftl_io *io)
{
struct ftl_io *parent = io->parent;
bool complete;
io->flags &= ~FTL_IO_INITIALIZED;
pthread_spin_lock(&io->lock);
complete = LIST_EMPTY(&io->children);
io->done = true;
pthread_spin_unlock(&io->lock);
if (complete) {
if (io->cb_fn) {
io->cb_fn(io, io->cb_ctx, io->status);
}
if (parent && ftl_io_remove_child(io)) {
ftl_io_complete(parent);
}
_ftl_io_free(io);
}
}
struct ftl_io *
ftl_io_alloc_child(struct ftl_io *parent)
{
struct ftl_io *io;
io = ftl_io_alloc(parent->ioch);
if (spdk_unlikely(!io)) {
return NULL;
}
ftl_io_init(io, parent->dev, NULL, NULL, parent->flags, parent->type);
io->parent = parent;
pthread_spin_lock(&parent->lock);
LIST_INSERT_HEAD(&parent->children, io, child_entry);
pthread_spin_unlock(&parent->lock);
return io;
}
void
ftl_io_fail(struct ftl_io *io, int status)
{
io->status = status;
ftl_io_advance(io, io->num_blocks - io->pos);
}
void *
ftl_io_get_md(const struct ftl_io *io)
{
if (!io->md) {
return NULL;
}
return (char *)io->md + io->pos * io->dev->md_size;
}
struct ftl_io *
ftl_io_alloc(struct spdk_io_channel *ch)
{
struct ftl_io *io;
struct ftl_io_channel *ioch = ftl_io_channel_get_ctx(ch);
io = spdk_mempool_get(ioch->io_pool);
if (!io) {
return NULL;
}
memset(io, 0, ioch->elem_size);
io->ioch = ch;
if (pthread_spin_init(&io->lock, PTHREAD_PROCESS_PRIVATE)) {
SPDK_ERRLOG("pthread_spin_init failed\n");
spdk_mempool_put(ioch->io_pool, io);
return NULL;
}
return io;
}
void
ftl_io_reinit(struct ftl_io *io, ftl_io_fn cb, void *ctx, int flags, int type)
{
ftl_io_clear(io);
ftl_io_init(io, io->dev, cb, ctx, flags, type);
}
void
ftl_io_clear(struct ftl_io *io)
{
ftl_io_reset(io);
io->flags = 0;
io->batch = NULL;
io->band = NULL;
}
void
ftl_io_reset(struct ftl_io *io)
{
io->req_cnt = io->pos = io->iov_pos = io->iov_off = 0;
io->done = false;
}
void
ftl_io_free(struct ftl_io *io)
{
struct ftl_io *parent;
if (!io) {
return;
}
parent = io->parent;
if (parent && ftl_io_remove_child(io)) {
ftl_io_complete(parent);
}
_ftl_io_free(io);
}
void
ftl_io_call_foreach_child(struct ftl_io *io, int (*callback)(struct ftl_io *))
{
struct ftl_io *child, *tmp;
assert(!io->done);
/*
* If the IO doesn't have any children, it means that it directly describes a request (i.e.
* all of the buffers, LBAs, etc. are filled). Otherwise the IO only groups together several
* requests and may be partially filled, so the callback needs to be called on all of its
* children instead.
*/
if (LIST_EMPTY(&io->children)) {
callback(io);
return;
}
LIST_FOREACH_SAFE(child, &io->children, child_entry, tmp) {
int rc = callback(child);
if (rc) {
assert(rc != -EAGAIN);
ftl_io_fail(io, rc);
break;
}
}
/*
* If all the callbacks were processed or an error occurred, treat this IO as completed.
* Multiple calls to ftl_io_call_foreach_child are not supported, resubmissions are supposed
* to be handled in the callback.
*/
ftl_io_complete(io);
}

View File

@ -1,323 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) Intel Corporation.
* All rights reserved.
*/
#ifndef FTL_IO_H
#define FTL_IO_H
#include "spdk/stdinc.h"
#include "spdk/nvme.h"
#include "spdk/ftl.h"
#include "ftl_addr.h"
#include "ftl_trace.h"
struct spdk_ftl_dev;
struct ftl_band;
struct ftl_batch;
struct ftl_io;
typedef int (*ftl_md_pack_fn)(struct ftl_band *);
typedef void (*ftl_io_fn)(struct ftl_io *, void *, int);
/* IO flags */
enum ftl_io_flags {
/* Indicates whether IO is already initialized */
FTL_IO_INITIALIZED = (1 << 0),
/* Internal based IO (defrag, metadata etc.) */
FTL_IO_INTERNAL = (1 << 1),
/* Indicates that the IO should not go through if there's */
/* already another one scheduled to the same LBA */
FTL_IO_WEAK = (1 << 2),
/* Indicates that the IO is used for padding */
FTL_IO_PAD = (1 << 3),
/* The IO operates on metadata */
FTL_IO_MD = (1 << 4),
/* Using physical instead of logical address */
FTL_IO_PHYSICAL_MODE = (1 << 5),
/* Indicates that IO contains noncontiguous LBAs */
FTL_IO_VECTOR_LBA = (1 << 6),
/* The IO is directed to non-volatile cache */
FTL_IO_CACHE = (1 << 7),
/* Indicates that physical address should be taken from IO struct, */
/* not assigned by wptr, only works if wptr is also in direct mode */
FTL_IO_DIRECT_ACCESS = (1 << 8),
/* Bypass the non-volatile cache */
FTL_IO_BYPASS_CACHE = (1 << 9),
};
enum ftl_io_type {
FTL_IO_READ,
FTL_IO_WRITE,
FTL_IO_ERASE,
};
#define FTL_IO_MAX_IOVEC 64
struct ftl_io_init_opts {
struct spdk_ftl_dev *dev;
/* IO descriptor */
struct ftl_io *io;
/* Parent request */
struct ftl_io *parent;
/* Size of IO descriptor */
size_t size;
/* IO flags */
int flags;
/* IO type */
enum ftl_io_type type;
/* Transfer batch, set for IO going through the write buffer */
struct ftl_batch *batch;
/* Band to which the IO is directed */
struct ftl_band *band;
/* Number of logical blocks */
size_t num_blocks;
/* Data */
struct iovec iovs[FTL_IO_MAX_IOVEC];
int iovcnt;
/* Metadata */
void *md;
/* Callback's function */
ftl_io_fn cb_fn;
/* Callback's context */
void *cb_ctx;
};
struct ftl_io_channel;
struct ftl_wbuf_entry {
/* IO channel that owns the write buffer entry */
struct ftl_io_channel *ioch;
/* Data payload (single block) */
void *payload;
/* Index within the IO channel's wbuf_entries array */
uint32_t index;
uint32_t io_flags;
/* Points at the band the data is copied from. Only valid for internal
* requests coming from reloc.
*/
struct ftl_band *band;
/* Physical address of that particular block. Valid once the data has
* been written out.
*/
struct ftl_addr addr;
/* Logical block address */
uint64_t lba;
/* Trace ID of the requests the entry is part of */
uint64_t trace;
/* Indicates that the entry was written out and is still present in the
* L2P table.
*/
bool valid;
/* Lock that protects the entry from being evicted from the L2P */
pthread_spinlock_t lock;
TAILQ_ENTRY(ftl_wbuf_entry) tailq;
};
#define FTL_IO_CHANNEL_INDEX_INVALID ((uint64_t)-1)
struct ftl_io_channel {
/* Device */
struct spdk_ftl_dev *dev;
/* IO pool element size */
size_t elem_size;
/* Index within the IO channel array */
uint64_t index;
/* IO pool */
struct spdk_mempool *io_pool;
/* Underlying device IO channel */
struct spdk_io_channel *base_ioch;
/* Persistent cache IO channel */
struct spdk_io_channel *cache_ioch;
/* Poller used for completing write requests and retrying IO */
struct spdk_poller *poller;
/* Write completion queue */
TAILQ_HEAD(, ftl_io) write_cmpl_queue;
TAILQ_HEAD(, ftl_io) retry_queue;
TAILQ_ENTRY(ftl_io_channel) tailq;
/* Array of write buffer entries */
struct ftl_wbuf_entry *wbuf_entries;
/* Write buffer data payload */
void *wbuf_payload;
/* Number of write buffer entries */
uint32_t num_entries;
/* Write buffer queues */
struct spdk_ring *free_queue;
struct spdk_ring *submit_queue;
/* Maximum number of concurrent user writes */
uint32_t qdepth_limit;
/* Current number of concurrent user writes */
uint32_t qdepth_current;
/* Means that the IO channel is being flushed */
bool flush;
};
/* General IO descriptor */
struct ftl_io {
/* Device */
struct spdk_ftl_dev *dev;
/* IO channel */
struct spdk_io_channel *ioch;
union {
/* LBA table */
uint64_t *vector;
/* First LBA */
uint64_t single;
} lba;
/* First block address */
struct ftl_addr addr;
/* Number of processed blocks */
size_t pos;
/* Number of blocks */
size_t num_blocks;
/* IO vector pointer */
struct iovec *iov;
/* IO vector buffer for internal requests */
struct iovec iov_buf[FTL_IO_MAX_IOVEC];
/* Metadata */
void *md;
/* Number of IO vectors */
size_t iov_cnt;
/* Position within the iovec */
size_t iov_pos;
/* Offset within the iovec (in blocks) */
size_t iov_off;
/* Transfer batch (valid only for writes going through the write buffer) */
struct ftl_batch *batch;
/* Band this IO is being written to */
struct ftl_band *band;
/* Request status */
int status;
/* Number of split requests */
size_t req_cnt;
/* Callback's function */
ftl_io_fn cb_fn;
/* Callback's context */
void *cb_ctx;
/* User callback function */
spdk_ftl_fn user_fn;
/* Flags */
int flags;
/* IO type */
enum ftl_io_type type;
/* Done flag */
bool done;
/* Parent request */
struct ftl_io *parent;
/* Child requests list */
LIST_HEAD(, ftl_io) children;
/* Child list link */
LIST_ENTRY(ftl_io) child_entry;
/* Children lock */
pthread_spinlock_t lock;
/* Trace group id */
uint64_t trace;
/* Used by retry and write completion queues */
TAILQ_ENTRY(ftl_io) ioch_entry;
};
/* Metadata IO */
struct ftl_md_io {
/* Parent IO structure */
struct ftl_io io;
/* Serialization/deserialization callback */
ftl_md_pack_fn pack_fn;
/* Callback's function */
ftl_io_fn cb_fn;
/* Callback's context */
void *cb_ctx;
};
static inline bool
ftl_io_mode_physical(const struct ftl_io *io)
{
return io->flags & FTL_IO_PHYSICAL_MODE;
}
static inline bool
ftl_io_mode_logical(const struct ftl_io *io)
{
return !ftl_io_mode_physical(io);
}
static inline bool
ftl_io_done(const struct ftl_io *io)
{
return io->req_cnt == 0 && io->pos == io->num_blocks;
}
struct ftl_io *ftl_io_alloc(struct spdk_io_channel *ch);
struct ftl_io *ftl_io_alloc_child(struct ftl_io *parent);
void ftl_io_fail(struct ftl_io *io, int status);
void ftl_io_free(struct ftl_io *io);
struct ftl_io *ftl_io_init_internal(const struct ftl_io_init_opts *opts);
void ftl_io_reinit(struct ftl_io *io, ftl_io_fn cb,
void *ctx, int flags, int type);
void ftl_io_clear(struct ftl_io *io);
void ftl_io_inc_req(struct ftl_io *io);
void ftl_io_dec_req(struct ftl_io *io);
struct iovec *ftl_io_iovec(struct ftl_io *io);
uint64_t ftl_io_current_lba(const struct ftl_io *io);
uint64_t ftl_io_get_lba(const struct ftl_io *io, size_t offset);
void ftl_io_advance(struct ftl_io *io, size_t num_blocks);
size_t ftl_iovec_num_blocks(struct iovec *iov, size_t iov_cnt);
void *ftl_io_iovec_addr(struct ftl_io *io);
size_t ftl_io_iovec_len_left(struct ftl_io *io);
struct ftl_io *ftl_io_wbuf_init(struct spdk_ftl_dev *dev, struct ftl_addr addr,
struct ftl_band *band, struct ftl_batch *batch, ftl_io_fn cb);
struct ftl_io *ftl_io_erase_init(struct ftl_band *band, size_t num_blocks, ftl_io_fn cb);
struct ftl_io *ftl_io_user_init(struct spdk_io_channel *ioch, uint64_t lba, size_t num_blocks,
struct iovec *iov, size_t iov_cnt, spdk_ftl_fn cb_fn,
void *cb_arg, int type);
void *ftl_io_get_md(const struct ftl_io *io);
void ftl_io_complete(struct ftl_io *io);
void ftl_io_shrink_iovec(struct ftl_io *io, size_t num_blocks);
void ftl_io_process_error(struct ftl_io *io, const struct spdk_nvme_cpl *status);
void ftl_io_reset(struct ftl_io *io);
void ftl_io_call_foreach_child(struct ftl_io *io, int (*callback)(struct ftl_io *));
#endif /* FTL_IO_H */

View File

@ -1,834 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) Intel Corporation.
* All rights reserved.
*/
#include "spdk/likely.h"
#include "spdk/log.h"
#include "spdk/ftl.h"
#include "ftl_reloc.h"
#include "ftl_core.h"
#include "ftl_io.h"
#include "ftl_band.h"
#include "ftl_debug.h"
/* Maximum active reloc moves */
#define FTL_RELOC_MAX_MOVES 256
struct ftl_reloc;
struct ftl_band_reloc;
enum ftl_reloc_move_state {
FTL_RELOC_STATE_READ_LBA_MAP,
FTL_RELOC_STATE_READ,
FTL_RELOC_STATE_WRITE,
};
enum ftl_band_reloc_state {
FTL_BAND_RELOC_STATE_INACTIVE,
FTL_BAND_RELOC_STATE_PENDING,
FTL_BAND_RELOC_STATE_ACTIVE,
FTL_BAND_RELOC_STATE_HIGH_PRIO
};
struct ftl_reloc_move {
struct ftl_band_reloc *breloc;
/* Start addr */
struct ftl_addr addr;
/* Number of logical blocks */
size_t num_blocks;
/* Data buffer */
void *data;
/* Move state (read lba_map, read, write) */
enum ftl_reloc_move_state state;
/* IO associated with move */
struct ftl_io *io;
STAILQ_ENTRY(ftl_reloc_move) entry;
};
struct ftl_band_reloc {
struct ftl_reloc *parent;
/* Band being relocated */
struct ftl_band *band;
/* Number of logical blocks to be relocated */
size_t num_blocks;
/* Bitmap of logical blocks to be relocated */
struct spdk_bit_array *reloc_map;
/* State of the band reloc */
enum ftl_band_reloc_state state;
/* The band is being defragged */
bool defrag;
/* Reloc map iterator */
struct {
/* Array of zone offsets */
size_t *zone_offset;
/* Current zone */
size_t zone_current;
} iter;
/* Number of outstanding moves */
size_t num_outstanding;
/* Pool of move objects */
struct ftl_reloc_move *moves;
/* Move queue */
STAILQ_HEAD(, ftl_reloc_move) move_queue;
TAILQ_ENTRY(ftl_band_reloc) entry;
};
struct ftl_reloc {
/* Device associated with relocate */
struct spdk_ftl_dev *dev;
/* Indicates relocate is about to halt */
bool halt;
/* Maximum number of IOs per band */
size_t max_qdepth;
/* Maximum number of active band relocates */
size_t max_active;
/* Maximum transfer size (in logical blocks) per single IO */
size_t xfer_size;
/* Number of bands being defragged */
size_t num_defrag_bands;
/* Array of band relocates */
struct ftl_band_reloc *brelocs;
/* Number of active/priority band relocates */
size_t num_active;
/* Priority band relocates queue */
TAILQ_HEAD(, ftl_band_reloc) prio_queue;
/* Active band relocates queue */
TAILQ_HEAD(, ftl_band_reloc) active_queue;
/* Pending band relocates queue */
TAILQ_HEAD(, ftl_band_reloc) pending_queue;
};
bool
ftl_reloc_is_defrag_active(const struct ftl_reloc *reloc)
{
return reloc->num_defrag_bands > 0;
}
static size_t
ftl_reloc_iter_zone_offset(struct ftl_band_reloc *breloc)
{
size_t zone = breloc->iter.zone_current;
return breloc->iter.zone_offset[zone];
}
static size_t
ftl_reloc_iter_zone_done(struct ftl_band_reloc *breloc)
{
size_t num_blocks = ftl_get_num_blocks_in_zone(breloc->parent->dev);
return ftl_reloc_iter_zone_offset(breloc) == num_blocks;
}
static void
ftl_reloc_clr_block(struct ftl_band_reloc *breloc, size_t block_off)
{
if (!spdk_bit_array_get(breloc->reloc_map, block_off)) {
return;
}
spdk_bit_array_clear(breloc->reloc_map, block_off);
assert(breloc->num_blocks);
breloc->num_blocks--;
}
static void
ftl_reloc_read_lba_map_cb(struct ftl_io *io, void *arg, int status)
{
struct ftl_reloc_move *move = arg;
struct ftl_band_reloc *breloc = move->breloc;
breloc->num_outstanding--;
assert(status == 0);
move->state = FTL_RELOC_STATE_WRITE;
STAILQ_INSERT_TAIL(&breloc->move_queue, move, entry);
}
static int
ftl_reloc_read_lba_map(struct ftl_band_reloc *breloc, struct ftl_reloc_move *move)
{
struct ftl_band *band = breloc->band;
breloc->num_outstanding++;
return ftl_band_read_lba_map(band, ftl_band_block_offset_from_addr(band, move->addr),
move->num_blocks, ftl_reloc_read_lba_map_cb, move);
}
static void
ftl_reloc_prep(struct ftl_band_reloc *breloc)
{
struct ftl_band *band = breloc->band;
struct ftl_reloc *reloc = breloc->parent;
struct ftl_reloc_move *move;
size_t i;
reloc->num_active++;
if (!band->high_prio) {
if (ftl_band_alloc_lba_map(band)) {
SPDK_ERRLOG("Failed to allocate lba map\n");
assert(false);
}
} else {
ftl_band_acquire_lba_map(band);
}
for (i = 0; i < reloc->max_qdepth; ++i) {
move = &breloc->moves[i];
move->state = FTL_RELOC_STATE_READ;
STAILQ_INSERT_TAIL(&breloc->move_queue, move, entry);
}
}
static void
ftl_reloc_free_move(struct ftl_band_reloc *breloc, struct ftl_reloc_move *move)
{
assert(move);
spdk_dma_free(move->data);
memset(move, 0, sizeof(*move));
move->state = FTL_RELOC_STATE_READ;
}
static void
ftl_reloc_write_cb(struct ftl_io *io, void *arg, int status)
{
struct ftl_reloc_move *move = arg;
struct ftl_addr addr = move->addr;
struct ftl_band_reloc *breloc = move->breloc;
size_t i;
breloc->num_outstanding--;
if (status) {
SPDK_ERRLOG("Reloc write failed with status: %d\n", status);
assert(false);
return;
}
for (i = 0; i < move->num_blocks; ++i) {
addr.offset = move->addr.offset + i;
size_t block_off = ftl_band_block_offset_from_addr(breloc->band, addr);
ftl_reloc_clr_block(breloc, block_off);
}
ftl_reloc_free_move(breloc, move);
STAILQ_INSERT_TAIL(&breloc->move_queue, move, entry);
}
static void
ftl_reloc_read_cb(struct ftl_io *io, void *arg, int status)
{
struct ftl_reloc_move *move = arg;
struct ftl_band_reloc *breloc = move->breloc;
breloc->num_outstanding--;
/* TODO: We should handle fail on relocation read. We need to inform */
/* user that this group of blocks is bad (update l2p with bad block address and */
/* put it to lba_map/sector_lba). Maybe we could also retry read with smaller granularity? */
if (status) {
SPDK_ERRLOG("Reloc read failed with status: %d\n", status);
assert(false);
return;
}
move->state = FTL_RELOC_STATE_READ_LBA_MAP;
move->io = NULL;
STAILQ_INSERT_TAIL(&breloc->move_queue, move, entry);
}
static void
ftl_reloc_iter_reset(struct ftl_band_reloc *breloc)
{
memset(breloc->iter.zone_offset, 0, ftl_get_num_punits(breloc->band->dev) *
sizeof(*breloc->iter.zone_offset));
breloc->iter.zone_current = 0;
}
static size_t
ftl_reloc_iter_block_offset(struct ftl_band_reloc *breloc)
{
size_t zone_offset = breloc->iter.zone_current * ftl_get_num_blocks_in_zone(breloc->parent->dev);
return breloc->iter.zone_offset[breloc->iter.zone_current] + zone_offset;
}
static void
ftl_reloc_iter_next_zone(struct ftl_band_reloc *breloc)
{
size_t num_zones = ftl_get_num_punits(breloc->band->dev);
breloc->iter.zone_current = (breloc->iter.zone_current + 1) % num_zones;
}
static int
ftl_reloc_block_valid(struct ftl_band_reloc *breloc, size_t block_off)
{
struct ftl_addr addr = ftl_band_addr_from_block_offset(breloc->band, block_off);
return ftl_addr_is_written(breloc->band, addr) &&
spdk_bit_array_get(breloc->reloc_map, block_off) &&
ftl_band_block_offset_valid(breloc->band, block_off);
}
static int
ftl_reloc_iter_next(struct ftl_band_reloc *breloc, size_t *block_off)
{
size_t zone = breloc->iter.zone_current;
*block_off = ftl_reloc_iter_block_offset(breloc);
if (ftl_reloc_iter_zone_done(breloc)) {
return 0;
}
breloc->iter.zone_offset[zone]++;
if (!ftl_reloc_block_valid(breloc, *block_off)) {
ftl_reloc_clr_block(breloc, *block_off);
return 0;
}
return 1;
}
static int
ftl_reloc_first_valid_block(struct ftl_band_reloc *breloc, size_t *block_off)
{
size_t i, num_blocks = ftl_get_num_blocks_in_zone(breloc->parent->dev);
for (i = ftl_reloc_iter_zone_offset(breloc); i < num_blocks; ++i) {
if (ftl_reloc_iter_next(breloc, block_off)) {
return 1;
}
}
return 0;
}
static int
ftl_reloc_iter_done(struct ftl_band_reloc *breloc)
{
size_t i;
size_t num_zones = ftl_get_num_punits(breloc->band->dev);
size_t num_blocks = ftl_get_num_blocks_in_zone(breloc->parent->dev);
for (i = 0; i < num_zones; ++i) {
if (breloc->iter.zone_offset[i] != num_blocks) {
return 0;
}
}
return 1;
}
static size_t
ftl_reloc_find_valid_blocks(struct ftl_band_reloc *breloc,
size_t _num_blocks, struct ftl_addr *addr)
{
size_t block_off, num_blocks = 0;
if (!ftl_reloc_first_valid_block(breloc, &block_off)) {
return 0;
}
*addr = ftl_band_addr_from_block_offset(breloc->band, block_off);
for (num_blocks = 1; num_blocks < _num_blocks; num_blocks++) {
if (!ftl_reloc_iter_next(breloc, &block_off)) {
break;
}
}
return num_blocks;
}
static size_t
ftl_reloc_next_blocks(struct ftl_band_reloc *breloc, struct ftl_addr *addr)
{
size_t i, num_blocks = 0;
struct spdk_ftl_dev *dev = breloc->parent->dev;
for (i = 0; i < ftl_get_num_punits(dev); ++i) {
num_blocks = ftl_reloc_find_valid_blocks(breloc, breloc->parent->xfer_size, addr);
ftl_reloc_iter_next_zone(breloc);
if (num_blocks || ftl_reloc_iter_done(breloc)) {
break;
}
}
return num_blocks;
}
static struct ftl_io *
ftl_reloc_io_init(struct ftl_band_reloc *breloc, struct ftl_reloc_move *move,
ftl_io_fn fn, enum ftl_io_type io_type, int flags)
{
size_t block_off, i;
struct ftl_addr addr = move->addr;
struct ftl_io *io = NULL;
struct ftl_io_init_opts opts = {
.dev = breloc->parent->dev,
.band = breloc->band,
.size = sizeof(*io),
.flags = flags | FTL_IO_INTERNAL | FTL_IO_PHYSICAL_MODE,
.type = io_type,
.num_blocks = move->num_blocks,
.iovs = {
{
.iov_base = move->data,
.iov_len = move->num_blocks * FTL_BLOCK_SIZE,
}
},
.iovcnt = 1,
.cb_fn = fn,
};
io = ftl_io_init_internal(&opts);
if (!io) {
return NULL;
}
io->cb_ctx = move;
io->addr = move->addr;
if (flags & FTL_IO_VECTOR_LBA) {
for (i = 0; i < io->num_blocks; ++i, ++addr.offset) {
block_off = ftl_band_block_offset_from_addr(breloc->band, addr);
if (!ftl_band_block_offset_valid(breloc->band, block_off)) {
io->lba.vector[i] = FTL_LBA_INVALID;
continue;
}
io->lba.vector[i] = breloc->band->lba_map.map[block_off];
}
}
ftl_trace_lba_io_init(io->dev, io);
return io;
}
static int
ftl_reloc_write(struct ftl_band_reloc *breloc, struct ftl_reloc_move *move)
{
int io_flags = FTL_IO_WEAK | FTL_IO_VECTOR_LBA | FTL_IO_BYPASS_CACHE;
if (spdk_likely(!move->io)) {
move->io = ftl_reloc_io_init(breloc, move, ftl_reloc_write_cb,
FTL_IO_WRITE, io_flags);
if (!move->io) {
ftl_reloc_free_move(breloc, move);
STAILQ_INSERT_TAIL(&breloc->move_queue, move, entry);
return -ENOMEM;
}
}
breloc->num_outstanding++;
ftl_io_write(move->io);
return 0;
}
static int
ftl_reloc_read(struct ftl_band_reloc *breloc, struct ftl_reloc_move *move)
{
struct ftl_addr addr = {};
move->num_blocks = ftl_reloc_next_blocks(breloc, &addr);
move->breloc = breloc;
move->addr = addr;
if (!move->num_blocks) {
return 0;
}
move->data = spdk_dma_malloc(FTL_BLOCK_SIZE * move->num_blocks, 4096, NULL);
if (!move->data) {
return -1;
}
move->io = ftl_reloc_io_init(breloc, move, ftl_reloc_read_cb, FTL_IO_READ, 0);
if (!move->io) {
ftl_reloc_free_move(breloc, move);
STAILQ_INSERT_TAIL(&breloc->move_queue, move, entry);
SPDK_ERRLOG("Failed to initialize io for relocation.");
return -1;
}
breloc->num_outstanding++;
ftl_io_read(move->io);
return 0;
}
static void
ftl_reloc_process_moves(struct ftl_band_reloc *breloc)
{
struct ftl_reloc_move *move;
STAILQ_HEAD(, ftl_reloc_move) move_queue;
int rc = 0;
/*
* When IO allocation fails, we do not want to retry immediately so keep moves on
* temporary queue
*/
STAILQ_INIT(&move_queue);
STAILQ_SWAP(&breloc->move_queue, &move_queue, ftl_reloc_move);
while (!STAILQ_EMPTY(&move_queue)) {
move = STAILQ_FIRST(&move_queue);
STAILQ_REMOVE_HEAD(&move_queue, entry);
switch (move->state) {
case FTL_RELOC_STATE_READ_LBA_MAP:
rc = ftl_reloc_read_lba_map(breloc, move);
break;
case FTL_RELOC_STATE_READ:
rc = ftl_reloc_read(breloc, move);
break;
case FTL_RELOC_STATE_WRITE:
rc = ftl_reloc_write(breloc, move);
break;
default:
assert(false);
break;
}
if (rc) {
SPDK_ERRLOG("Move queue processing failed\n");
assert(false);
}
}
}
static bool
ftl_reloc_done(struct ftl_band_reloc *breloc)
{
return !breloc->num_outstanding && STAILQ_EMPTY(&breloc->move_queue);
}
static void
ftl_reloc_release(struct ftl_band_reloc *breloc)
{
struct ftl_reloc *reloc = breloc->parent;
struct ftl_band *band = breloc->band;
ftl_reloc_iter_reset(breloc);
ftl_band_release_lba_map(band);
reloc->num_active--;
if (breloc->state == FTL_BAND_RELOC_STATE_HIGH_PRIO) {
/* High prio band must be relocated as a whole and ANM events will be ignored */
assert(breloc->num_blocks == 0 && ftl_band_empty(band));
TAILQ_REMOVE(&reloc->prio_queue, breloc, entry);
band->high_prio = 0;
breloc->state = FTL_BAND_RELOC_STATE_INACTIVE;
} else {
assert(breloc->state == FTL_BAND_RELOC_STATE_ACTIVE);
TAILQ_REMOVE(&reloc->active_queue, breloc, entry);
breloc->state = FTL_BAND_RELOC_STATE_INACTIVE;
/* If we got ANM event during relocation put such band back to pending queue */
if (breloc->num_blocks != 0) {
breloc->state = FTL_BAND_RELOC_STATE_PENDING;
TAILQ_INSERT_TAIL(&reloc->pending_queue, breloc, entry);
return;
}
}
if (ftl_band_empty(band) && band->state == FTL_BAND_STATE_CLOSED) {
ftl_band_set_state(breloc->band, FTL_BAND_STATE_FREE);
if (breloc->defrag) {
breloc->defrag = false;
assert(reloc->num_defrag_bands > 0);
reloc->num_defrag_bands--;
}
}
}
static void
ftl_process_reloc(struct ftl_band_reloc *breloc)
{
ftl_reloc_process_moves(breloc);
if (ftl_reloc_done(breloc)) {
ftl_reloc_release(breloc);
}
}
static int
ftl_band_reloc_init(struct ftl_reloc *reloc, struct ftl_band_reloc *breloc,
struct ftl_band *band)
{
breloc->band = band;
breloc->parent = reloc;
breloc->reloc_map = spdk_bit_array_create(ftl_get_num_blocks_in_band(reloc->dev));
if (!breloc->reloc_map) {
SPDK_ERRLOG("Failed to initialize reloc map");
return -1;
}
breloc->iter.zone_offset = calloc(ftl_get_num_punits(band->dev),
sizeof(*breloc->iter.zone_offset));
if (!breloc->iter.zone_offset) {
SPDK_ERRLOG("Failed to initialize reloc iterator");
return -1;
}
STAILQ_INIT(&breloc->move_queue);
breloc->moves = calloc(reloc->max_qdepth, sizeof(*breloc->moves));
if (!breloc->moves) {
return -1;
}
return 0;
}
static void
ftl_band_reloc_free(struct ftl_band_reloc *breloc)
{
struct ftl_reloc_move *move;
if (!breloc) {
return;
}
assert(breloc->num_outstanding == 0);
/* Drain write queue if there is active band relocation during shutdown */
if (breloc->state == FTL_BAND_RELOC_STATE_ACTIVE ||
breloc->state == FTL_BAND_RELOC_STATE_HIGH_PRIO) {
assert(breloc->parent->halt);
STAILQ_FOREACH(move, &breloc->move_queue, entry) {
ftl_reloc_free_move(breloc, move);
}
}
spdk_bit_array_free(&breloc->reloc_map);
free(breloc->iter.zone_offset);
free(breloc->moves);
}
struct ftl_reloc *
ftl_reloc_init(struct spdk_ftl_dev *dev)
{
struct ftl_reloc *reloc;
size_t i;
reloc = calloc(1, sizeof(*reloc));
if (!reloc) {
return NULL;
}
reloc->dev = dev;
reloc->halt = true;
reloc->max_qdepth = dev->conf.max_reloc_qdepth;
reloc->max_active = dev->conf.max_active_relocs;
reloc->xfer_size = dev->xfer_size;
reloc->num_defrag_bands = 0;
if (reloc->max_qdepth > FTL_RELOC_MAX_MOVES) {
goto error;
}
reloc->brelocs = calloc(ftl_get_num_bands(dev), sizeof(*reloc->brelocs));
if (!reloc->brelocs) {
goto error;
}
for (i = 0; i < ftl_get_num_bands(reloc->dev); ++i) {
if (ftl_band_reloc_init(reloc, &reloc->brelocs[i], &dev->bands[i])) {
goto error;
}
}
TAILQ_INIT(&reloc->pending_queue);
TAILQ_INIT(&reloc->active_queue);
TAILQ_INIT(&reloc->prio_queue);
return reloc;
error:
ftl_reloc_free(reloc);
return NULL;
}
void
ftl_reloc_free(struct ftl_reloc *reloc)
{
size_t i;
if (!reloc) {
return;
}
for (i = 0; i < ftl_get_num_bands(reloc->dev); ++i) {
ftl_band_reloc_free(&reloc->brelocs[i]);
}
free(reloc->brelocs);
free(reloc);
}
bool
ftl_reloc_is_halted(const struct ftl_reloc *reloc)
{
return reloc->halt;
}
void
ftl_reloc_halt(struct ftl_reloc *reloc)
{
reloc->halt = true;
}
void
ftl_reloc_resume(struct ftl_reloc *reloc)
{
reloc->halt = false;
}
bool
ftl_reloc(struct ftl_reloc *reloc)
{
struct ftl_band_reloc *breloc, *tbreloc;
if (ftl_reloc_is_halted(reloc)) {
return false;
}
/* Process first band from priority queue and return */
breloc = TAILQ_FIRST(&reloc->prio_queue);
if (breloc) {
ftl_process_reloc(breloc);
return true;
}
TAILQ_FOREACH_SAFE(breloc, &reloc->pending_queue, entry, tbreloc) {
if (reloc->num_active == reloc->max_active) {
break;
}
/* Wait for band to close before relocating */
if (breloc->band->state != FTL_BAND_STATE_CLOSED) {
continue;
}
ftl_reloc_prep(breloc);
assert(breloc->state == FTL_BAND_RELOC_STATE_PENDING);
TAILQ_REMOVE(&reloc->pending_queue, breloc, entry);
breloc->state = FTL_BAND_RELOC_STATE_ACTIVE;
TAILQ_INSERT_HEAD(&reloc->active_queue, breloc, entry);
}
TAILQ_FOREACH_SAFE(breloc, &reloc->active_queue, entry, tbreloc) {
assert(breloc->state == FTL_BAND_RELOC_STATE_ACTIVE);
ftl_process_reloc(breloc);
}
return reloc->num_active != 0;
}
void
ftl_reloc_add(struct ftl_reloc *reloc, struct ftl_band *band, size_t offset,
size_t num_blocks, int prio, bool is_defrag)
{
struct ftl_band_reloc *breloc = &reloc->brelocs[band->id];
size_t i;
/* No need to add anything if already at high prio - whole band should be relocated */
if (!prio && band->high_prio) {
return;
}
pthread_spin_lock(&band->lba_map.lock);
if (band->lba_map.num_vld == 0) {
pthread_spin_unlock(&band->lba_map.lock);
/* If the band is closed and has no valid blocks, free it */
if (band->state == FTL_BAND_STATE_CLOSED) {
ftl_band_set_state(band, FTL_BAND_STATE_FREE);
}
return;
}
pthread_spin_unlock(&band->lba_map.lock);
for (i = offset; i < offset + num_blocks; ++i) {
if (spdk_bit_array_get(breloc->reloc_map, i)) {
continue;
}
spdk_bit_array_set(breloc->reloc_map, i);
breloc->num_blocks++;
}
/* If the band is coming from the defrag process, mark it appropriately */
if (is_defrag) {
assert(offset == 0 && num_blocks == ftl_get_num_blocks_in_band(band->dev));
reloc->num_defrag_bands++;
breloc->defrag = true;
}
if (!prio) {
if (breloc->state == FTL_BAND_RELOC_STATE_INACTIVE) {
breloc->state = FTL_BAND_RELOC_STATE_PENDING;
TAILQ_INSERT_HEAD(&reloc->pending_queue, breloc, entry);
}
} else {
bool active = false;
/* If priority band is already on pending or active queue, remove it from it */
switch (breloc->state) {
case FTL_BAND_RELOC_STATE_PENDING:
TAILQ_REMOVE(&reloc->pending_queue, breloc, entry);
break;
case FTL_BAND_RELOC_STATE_ACTIVE:
active = true;
TAILQ_REMOVE(&reloc->active_queue, breloc, entry);
break;
default:
break;
}
breloc->state = FTL_BAND_RELOC_STATE_HIGH_PRIO;
TAILQ_INSERT_TAIL(&reloc->prio_queue, breloc, entry);
/*
* If band has been already on active queue it doesn't need any additional
* resources
*/
if (!active) {
ftl_reloc_prep(breloc);
}
}
}

View File

@ -1,25 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) Intel Corporation.
* All rights reserved.
*/
#ifndef FTL_RELOC_H
#define FTL_RELOC_H
#include "spdk/stdinc.h"
#include "spdk/ftl.h"
struct ftl_reloc;
struct ftl_band;
struct ftl_reloc *ftl_reloc_init(struct spdk_ftl_dev *dev);
void ftl_reloc_free(struct ftl_reloc *reloc);
void ftl_reloc_add(struct ftl_reloc *reloc, struct ftl_band *band,
size_t offset, size_t num_blocks, int prio, bool is_defrag);
bool ftl_reloc(struct ftl_reloc *reloc);
void ftl_reloc_halt(struct ftl_reloc *reloc);
void ftl_reloc_resume(struct ftl_reloc *reloc);
bool ftl_reloc_is_halted(const struct ftl_reloc *reloc);
bool ftl_reloc_is_defrag_active(const struct ftl_reloc *reloc);
#endif /* FTL_RELOC_H */

File diff suppressed because it is too large Load Diff

View File

@ -1,353 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) Intel Corporation.
* All rights reserved.
*/
#include "spdk/trace.h"
#include "ftl_core.h"
#include "ftl_trace.h"
#include "ftl_io.h"
#include "ftl_band.h"
#include "spdk_internal/trace_defs.h"
#if defined(DEBUG)
enum ftl_trace_source {
FTL_TRACE_SOURCE_INTERNAL,
FTL_TRACE_SOURCE_USER,
FTL_TRACE_SOURCE_MAX,
};
#define FTL_TPOINT_ID(id, src) SPDK_TPOINT_ID(TRACE_GROUP_FTL, (((id) << 1) | (!!(src))))
#define FTL_TRACE_BAND_DEFRAG(src) FTL_TPOINT_ID(0, src)
#define FTL_TRACE_BAND_WRITE(src) FTL_TPOINT_ID(1, src)
#define FTL_TRACE_LIMITS(src) FTL_TPOINT_ID(2, src)
#define FTL_TRACE_WBUF_POP(src) FTL_TPOINT_ID(3, src)
#define FTL_TRACE_READ_SCHEDULE(src) FTL_TPOINT_ID(4, src)
#define FTL_TRACE_READ_SUBMISSION(src) FTL_TPOINT_ID(5, src)
#define FTL_TRACE_READ_COMPLETION_INVALID(src) FTL_TPOINT_ID(6, src)
#define FTL_TRACE_READ_COMPLETION_CACHE(src) FTL_TPOINT_ID(7, src)
#define FTL_TRACE_READ_COMPLETION_DISK(src) FTL_TPOINT_ID(8, src)
#define FTL_TRACE_MD_READ_SCHEDULE(src) FTL_TPOINT_ID(9, src)
#define FTL_TRACE_MD_READ_SUBMISSION(src) FTL_TPOINT_ID(10, src)
#define FTL_TRACE_MD_READ_COMPLETION(src) FTL_TPOINT_ID(11, src)
#define FTL_TRACE_WRITE_SCHEDULE(src) FTL_TPOINT_ID(12, src)
#define FTL_TRACE_WRITE_WBUF_FILL(src) FTL_TPOINT_ID(13, src)
#define FTL_TRACE_WRITE_SUBMISSION(src) FTL_TPOINT_ID(14, src)
#define FTL_TRACE_WRITE_COMPLETION(src) FTL_TPOINT_ID(15, src)
#define FTL_TRACE_MD_WRITE_SCHEDULE(src) FTL_TPOINT_ID(16, src)
#define FTL_TRACE_MD_WRITE_SUBMISSION(src) FTL_TPOINT_ID(17, src)
#define FTL_TRACE_MD_WRITE_COMPLETION(src) FTL_TPOINT_ID(18, src)
#define FTL_TRACE_ERASE_SUBMISSION(src) FTL_TPOINT_ID(19, src)
#define FTL_TRACE_ERASE_COMPLETION(src) FTL_TPOINT_ID(20, src)
SPDK_TRACE_REGISTER_FN(ftl_trace_func, "ftl", TRACE_GROUP_FTL)
{
const char source[] = { 'i', 'u' };
char descbuf[128];
int i;
spdk_trace_register_owner(OWNER_FTL, 'f');
for (i = 0; i < FTL_TRACE_SOURCE_MAX; ++i) {
snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "band_defrag");
spdk_trace_register_description(descbuf, FTL_TRACE_BAND_DEFRAG(i),
OWNER_FTL, OBJECT_NONE, 0,
SPDK_TRACE_ARG_TYPE_INT, "band");
snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "band_write");
spdk_trace_register_description(descbuf, FTL_TRACE_BAND_WRITE(i),
OWNER_FTL, OBJECT_NONE, 0,
SPDK_TRACE_ARG_TYPE_INT, "band");
snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "limits");
spdk_trace_register_description(descbuf, FTL_TRACE_LIMITS(i),
OWNER_FTL, OBJECT_NONE, 0,
SPDK_TRACE_ARG_TYPE_INT, "limits");
snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "rwb_pop");
spdk_trace_register_description(descbuf, FTL_TRACE_WBUF_POP(i),
OWNER_FTL, OBJECT_NONE, 0,
SPDK_TRACE_ARG_TYPE_INT, "lba");
snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "md_read_sched");
spdk_trace_register_description(descbuf, FTL_TRACE_MD_READ_SCHEDULE(i),
OWNER_FTL, OBJECT_NONE, 0,
SPDK_TRACE_ARG_TYPE_INT, "addr");
snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "md_read_submit");
spdk_trace_register_description(descbuf, FTL_TRACE_MD_READ_SUBMISSION(i),
OWNER_FTL, OBJECT_NONE, 0,
SPDK_TRACE_ARG_TYPE_INT, "addr");
snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "md_read_cmpl");
spdk_trace_register_description(descbuf, FTL_TRACE_MD_READ_COMPLETION(i),
OWNER_FTL, OBJECT_NONE, 0,
SPDK_TRACE_ARG_TYPE_INT, "lba");
snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "md_write_sched");
spdk_trace_register_description(descbuf, FTL_TRACE_MD_WRITE_SCHEDULE(i),
OWNER_FTL, OBJECT_NONE, 0,
SPDK_TRACE_ARG_TYPE_INT, "addr");
snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "md_write_submit");
spdk_trace_register_description(descbuf, FTL_TRACE_MD_WRITE_SUBMISSION(i),
OWNER_FTL, OBJECT_NONE, 0,
SPDK_TRACE_ARG_TYPE_INT, "addr");
snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "md_write_cmpl");
spdk_trace_register_description(descbuf, FTL_TRACE_MD_WRITE_COMPLETION(i),
OWNER_FTL, OBJECT_NONE, 0,
SPDK_TRACE_ARG_TYPE_INT, "lba");
snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "read_sched");
spdk_trace_register_description(descbuf, FTL_TRACE_READ_SCHEDULE(i),
OWNER_FTL, OBJECT_NONE, 0,
SPDK_TRACE_ARG_TYPE_INT, "lba");
snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "read_submit");
spdk_trace_register_description(descbuf, FTL_TRACE_READ_SUBMISSION(i),
OWNER_FTL, OBJECT_NONE, 0,
SPDK_TRACE_ARG_TYPE_INT, "addr");
snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "read_cmpl_invld");
spdk_trace_register_description(descbuf, FTL_TRACE_READ_COMPLETION_INVALID(i),
OWNER_FTL, OBJECT_NONE, 0,
SPDK_TRACE_ARG_TYPE_INT, "lba");
snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "read_cmpl_cache");
spdk_trace_register_description(descbuf, FTL_TRACE_READ_COMPLETION_CACHE(i),
OWNER_FTL, OBJECT_NONE, 0,
SPDK_TRACE_ARG_TYPE_INT, "lba");
snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "read_cmpl_ssd");
spdk_trace_register_description(descbuf, FTL_TRACE_READ_COMPLETION_DISK(i),
OWNER_FTL, OBJECT_NONE, 0,
SPDK_TRACE_ARG_TYPE_INT, "lba");
snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "write_sched");
spdk_trace_register_description(descbuf, FTL_TRACE_WRITE_SCHEDULE(i),
OWNER_FTL, OBJECT_NONE, 0,
SPDK_TRACE_ARG_TYPE_INT, "lba");
snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "rwb_fill");
spdk_trace_register_description(descbuf, FTL_TRACE_WRITE_WBUF_FILL(i),
OWNER_FTL, OBJECT_NONE, 0,
SPDK_TRACE_ARG_TYPE_INT, "lba");
snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "write_submit");
spdk_trace_register_description(descbuf, FTL_TRACE_WRITE_SUBMISSION(i),
OWNER_FTL, OBJECT_NONE, 0,
SPDK_TRACE_ARG_TYPE_INT, "addr");
snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "write_cmpl");
spdk_trace_register_description(descbuf, FTL_TRACE_WRITE_COMPLETION(i),
OWNER_FTL, OBJECT_NONE, 0,
SPDK_TRACE_ARG_TYPE_INT, "lba");
snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "erase_submit");
spdk_trace_register_description(descbuf, FTL_TRACE_ERASE_SUBMISSION(i),
OWNER_FTL, OBJECT_NONE, 0,
SPDK_TRACE_ARG_TYPE_INT, "addr");
snprintf(descbuf, sizeof(descbuf), "%c %s", source[i], "erase_cmpl");
spdk_trace_register_description(descbuf, FTL_TRACE_ERASE_COMPLETION(i),
OWNER_FTL, OBJECT_NONE, 0,
SPDK_TRACE_ARG_TYPE_INT, "addr");
}
}
static uint16_t
ftl_trace_io_source(const struct ftl_io *io)
{
if (io->flags & FTL_IO_INTERNAL) {
return FTL_TRACE_SOURCE_INTERNAL;
} else {
return FTL_TRACE_SOURCE_USER;
}
}
static uint64_t
ftl_trace_next_id(struct ftl_trace *trace)
{
assert(trace->id != FTL_TRACE_INVALID_ID);
return __atomic_fetch_add(&trace->id, 1, __ATOMIC_SEQ_CST);
}
void
ftl_trace_defrag_band(struct spdk_ftl_dev *dev, const struct ftl_band *band)
{
struct ftl_trace *trace = &dev->stats.trace;
spdk_trace_record(FTL_TRACE_BAND_DEFRAG(FTL_TRACE_SOURCE_INTERNAL),
ftl_trace_next_id(trace), 0, band->lba_map.num_vld, band->id);
}
void
ftl_trace_write_band(struct spdk_ftl_dev *dev, const struct ftl_band *band)
{
struct ftl_trace *trace = &dev->stats.trace;
spdk_trace_record(FTL_TRACE_BAND_WRITE(FTL_TRACE_SOURCE_INTERNAL),
ftl_trace_next_id(trace), 0, 0, band->id);
}
void
ftl_trace_lba_io_init(struct spdk_ftl_dev *dev, const struct ftl_io *io)
{
uint16_t tpoint_id = 0, source;
assert(io->trace != FTL_TRACE_INVALID_ID);
source = ftl_trace_io_source(io);
if (io->flags & FTL_IO_MD) {
switch (io->type) {
case FTL_IO_READ:
tpoint_id = FTL_TRACE_MD_READ_SCHEDULE(source);
break;
case FTL_IO_WRITE:
tpoint_id = FTL_TRACE_MD_WRITE_SCHEDULE(source);
break;
default:
assert(0);
}
} else {
switch (io->type) {
case FTL_IO_READ:
tpoint_id = FTL_TRACE_READ_SCHEDULE(source);
break;
case FTL_IO_WRITE:
tpoint_id = FTL_TRACE_WRITE_SCHEDULE(source);
break;
default:
assert(0);
}
}
spdk_trace_record(tpoint_id, io->trace, io->num_blocks, 0, ftl_io_get_lba(io, 0));
}
void
ftl_trace_wbuf_fill(struct spdk_ftl_dev *dev, const struct ftl_io *io)
{
assert(io->trace != FTL_TRACE_INVALID_ID);
spdk_trace_record(FTL_TRACE_WRITE_WBUF_FILL(ftl_trace_io_source(io)), io->trace,
0, 0, ftl_io_current_lba(io));
}
void
ftl_trace_wbuf_pop(struct spdk_ftl_dev *dev, const struct ftl_wbuf_entry *entry)
{
uint16_t tpoint_id;
assert(entry->trace != FTL_TRACE_INVALID_ID);
if (entry->io_flags & FTL_IO_INTERNAL) {
tpoint_id = FTL_TRACE_WBUF_POP(FTL_TRACE_SOURCE_INTERNAL);
} else {
tpoint_id = FTL_TRACE_WBUF_POP(FTL_TRACE_SOURCE_USER);
}
spdk_trace_record(tpoint_id, entry->trace, 0, entry->addr.offset, entry->lba);
}
void
ftl_trace_completion(struct spdk_ftl_dev *dev, const struct ftl_io *io,
enum ftl_trace_completion completion)
{
uint16_t tpoint_id = 0, source;
assert(io->trace != FTL_TRACE_INVALID_ID);
source = ftl_trace_io_source(io);
if (io->flags & FTL_IO_MD) {
switch (io->type) {
case FTL_IO_READ:
tpoint_id = FTL_TRACE_MD_READ_COMPLETION(source);
break;
case FTL_IO_WRITE:
tpoint_id = FTL_TRACE_MD_WRITE_COMPLETION(source);
break;
default:
assert(0);
}
} else {
switch (io->type) {
case FTL_IO_READ:
switch (completion) {
case FTL_TRACE_COMPLETION_INVALID:
tpoint_id = FTL_TRACE_READ_COMPLETION_INVALID(source);
break;
case FTL_TRACE_COMPLETION_CACHE:
tpoint_id = FTL_TRACE_READ_COMPLETION_CACHE(source);
break;
case FTL_TRACE_COMPLETION_DISK:
tpoint_id = FTL_TRACE_READ_COMPLETION_DISK(source);
break;
}
break;
case FTL_IO_WRITE:
tpoint_id = FTL_TRACE_WRITE_COMPLETION(source);
break;
case FTL_IO_ERASE:
tpoint_id = FTL_TRACE_ERASE_COMPLETION(source);
break;
default:
assert(0);
}
}
spdk_trace_record(tpoint_id, io->trace, 0, 0, ftl_io_get_lba(io, io->pos - 1));
}
void
ftl_trace_submission(struct spdk_ftl_dev *dev, const struct ftl_io *io, struct ftl_addr addr,
size_t addr_cnt)
{
uint16_t tpoint_id = 0, source;
assert(io->trace != FTL_TRACE_INVALID_ID);
source = ftl_trace_io_source(io);
if (io->flags & FTL_IO_MD) {
switch (io->type) {
case FTL_IO_READ:
tpoint_id = FTL_TRACE_MD_READ_SUBMISSION(source);
break;
case FTL_IO_WRITE:
tpoint_id = FTL_TRACE_MD_WRITE_SUBMISSION(source);
break;
default:
assert(0);
}
} else {
switch (io->type) {
case FTL_IO_READ:
tpoint_id = FTL_TRACE_READ_SUBMISSION(source);
break;
case FTL_IO_WRITE:
tpoint_id = FTL_TRACE_WRITE_SUBMISSION(source);
break;
case FTL_IO_ERASE:
tpoint_id = FTL_TRACE_ERASE_SUBMISSION(source);
break;
default:
assert(0);
}
}
spdk_trace_record(tpoint_id, io->trace, addr_cnt, 0, addr.offset);
}
void
ftl_trace_limits(struct spdk_ftl_dev *dev, int limit, size_t num_free)
{
struct ftl_trace *trace = &dev->stats.trace;
spdk_trace_record(FTL_TRACE_LIMITS(FTL_TRACE_SOURCE_INTERNAL), ftl_trace_next_id(trace),
num_free, limit, 0);
}
uint64_t
ftl_trace_alloc_id(struct spdk_ftl_dev *dev)
{
struct ftl_trace *trace = &dev->stats.trace;
return ftl_trace_next_id(trace);
}
#endif /* defined(DEBUG) */

View File

@ -1,56 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) Intel Corporation.
* All rights reserved.
*/
#ifndef FTL_TRACE_H
#define FTL_TRACE_H
#include "ftl_addr.h"
#define FTL_TRACE_INVALID_ID ((uint64_t) -1)
enum ftl_trace_completion {
FTL_TRACE_COMPLETION_INVALID,
FTL_TRACE_COMPLETION_CACHE,
FTL_TRACE_COMPLETION_DISK,
};
struct ftl_trace {
/* Monotonically incrementing event id */
uint64_t id;
};
struct spdk_ftl_dev;
struct ftl_trace;
struct ftl_io;
struct ftl_wbuf_entry;
struct ftl_band;
#if defined(DEBUG)
uint64_t ftl_trace_alloc_id(struct spdk_ftl_dev *dev);
void ftl_trace_defrag_band(struct spdk_ftl_dev *dev, const struct ftl_band *band);
void ftl_trace_write_band(struct spdk_ftl_dev *dev, const struct ftl_band *band);
void ftl_trace_lba_io_init(struct spdk_ftl_dev *dev, const struct ftl_io *io);
void ftl_trace_wbuf_fill(struct spdk_ftl_dev *dev, const struct ftl_io *io);
void ftl_trace_wbuf_pop(struct spdk_ftl_dev *dev, const struct ftl_wbuf_entry *entry);
void ftl_trace_submission(struct spdk_ftl_dev *dev,
const struct ftl_io *io,
struct ftl_addr addr, size_t addr_cnt);
void ftl_trace_completion(struct spdk_ftl_dev *dev,
const struct ftl_io *io,
enum ftl_trace_completion type);
void ftl_trace_limits(struct spdk_ftl_dev *dev, int limit, size_t num_free);
#else /* defined(DEBUG) */
#define ftl_trace_alloc_id(dev) FTL_TRACE_INVALID_ID
#define ftl_trace_defrag_band(dev, band)
#define ftl_trace_write_band(dev, band)
#define ftl_trace_lba_io_init(dev, io)
#define ftl_trace_wbuf_fill(dev, io)
#define ftl_trace_wbuf_pop(dev, entry)
#define ftl_trace_submission(dev, io, addr, addr_cnt)
#define ftl_trace_completion(dev, io, type)
#define ftl_trace_limits(dev, limits, num_free)
#endif
#endif /* FTL_TRACE_H */

View File

@ -1,14 +0,0 @@
{
global:
# public functions
spdk_ftl_dev_init;
spdk_ftl_dev_free;
spdk_ftl_conf_init_defaults;
spdk_ftl_dev_get_attrs;
spdk_ftl_read;
spdk_ftl_write;
spdk_ftl_flush;
local: *;
};

View File

@ -132,9 +132,6 @@ DEPDIRS-bdev_rbd := $(BDEV_DEPS_THREAD)
DEPDIRS-bdev_uring := $(BDEV_DEPS_THREAD)
DEPDIRS-bdev_virtio := $(BDEV_DEPS_THREAD) virtio
DEPDIRS-bdev_zone_block := $(BDEV_DEPS_THREAD)
ifeq ($(OS),Linux)
DEPDIRS-bdev_ftl := $(BDEV_DEPS_THREAD) ftl
endif
# module/event

View File

@ -47,7 +47,6 @@ endif
endif
ifeq ($(OS),Linux)
BLOCKDEV_MODULES_LIST += bdev_ftl ftl
BLOCKDEV_MODULES_LIST += bdev_aio
BLOCKDEV_MODULES_PRIVATE_LIBS += -laio
INTR_BLOCKDEV_MODULES_LIST += bdev_aio

View File

@ -17,7 +17,7 @@ DIRS-$(CONFIG_REDUCE) += compress
DIRS-$(CONFIG_URING) += uring
ifeq ($(OS),Linux)
DIRS-y += aio ftl
DIRS-y += aio
DIRS-$(CONFIG_ISCSI_INITIATOR) += iscsi
DIRS-$(CONFIG_VIRTIO) += virtio
DIRS-$(CONFIG_PMDK) += pmem

View File

@ -1,17 +0,0 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) Intel Corporation.
# All rights reserved.
#
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
SO_VER := 4
SO_MINOR := 0
C_SRCS += bdev_ftl.c bdev_ftl_rpc.c
LIBNAME = bdev_ftl
SPDK_MAP_FILE = $(SPDK_ROOT_DIR)/mk/spdk_blank.map
include $(SPDK_ROOT_DIR)/mk/spdk.lib.mk

View File

@ -1,485 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) Intel Corporation.
* All rights reserved.
*/
#include "spdk/stdinc.h"
#include "spdk/bdev.h"
#include "spdk/env.h"
#include "spdk/thread.h"
#include "spdk/json.h"
#include "spdk/string.h"
#include "spdk/likely.h"
#include "spdk/util.h"
#include "spdk/string.h"
#include "spdk/ftl.h"
#include "spdk/log.h"
#include "bdev_ftl.h"
struct ftl_bdev {
struct spdk_bdev bdev;
struct spdk_ftl_dev *dev;
ftl_bdev_init_fn init_cb;
void *init_arg;
};
struct ftl_deferred_init {
struct ftl_bdev_init_opts opts;
LIST_ENTRY(ftl_deferred_init) entry;
};
static LIST_HEAD(, ftl_deferred_init) g_deferred_init = LIST_HEAD_INITIALIZER(g_deferred_init);
static int bdev_ftl_initialize(void);
static void bdev_ftl_finish(void);
static void bdev_ftl_examine(struct spdk_bdev *bdev);
static struct spdk_bdev_module g_ftl_if = {
.name = "ftl",
.module_init = bdev_ftl_initialize,
.module_fini = bdev_ftl_finish,
.examine_disk = bdev_ftl_examine,
};
SPDK_BDEV_MODULE_REGISTER(ftl, &g_ftl_if)
static void
bdev_ftl_free_cb(struct spdk_ftl_dev *dev, void *ctx, int status)
{
struct ftl_bdev *ftl_bdev = ctx;
spdk_bdev_destruct_done(&ftl_bdev->bdev, status);
free(ftl_bdev->bdev.name);
free(ftl_bdev);
}
static int
bdev_ftl_destruct(void *ctx)
{
struct ftl_bdev *ftl_bdev = ctx;
spdk_ftl_dev_free(ftl_bdev->dev, bdev_ftl_free_cb, ftl_bdev);
/* return 1 to indicate that the destruction is asynchronous */
return 1;
}
static void
bdev_ftl_cb(void *arg, int rc)
{
struct spdk_bdev_io *bdev_io = arg;
enum spdk_bdev_io_status status;
switch (rc) {
case 0:
status = SPDK_BDEV_IO_STATUS_SUCCESS;
break;
case -ENOMEM:
status = SPDK_BDEV_IO_STATUS_NOMEM;
break;
default:
status = SPDK_BDEV_IO_STATUS_FAILED;
break;
}
spdk_bdev_io_complete(bdev_io, status);
}
static void
bdev_ftl_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
bool success)
{
struct ftl_bdev *ftl_bdev;
int rc;
ftl_bdev = bdev_io->bdev->ctxt;
if (!success) {
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
return;
}
rc = spdk_ftl_read(ftl_bdev->dev,
ch,
bdev_io->u.bdev.offset_blocks,
bdev_io->u.bdev.num_blocks,
bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, bdev_ftl_cb, bdev_io);
if (spdk_unlikely(rc != 0)) {
spdk_bdev_io_complete(bdev_io, rc);
}
}
static int
_bdev_ftl_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
{
struct ftl_bdev *ftl_bdev = (struct ftl_bdev *)bdev_io->bdev->ctxt;
switch (bdev_io->type) {
case SPDK_BDEV_IO_TYPE_READ:
spdk_bdev_io_get_buf(bdev_io, bdev_ftl_get_buf_cb,
bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
return 0;
case SPDK_BDEV_IO_TYPE_WRITE:
return spdk_ftl_write(ftl_bdev->dev, ch, bdev_io->u.bdev.offset_blocks,
bdev_io->u.bdev.num_blocks, bdev_io->u.bdev.iovs,
bdev_io->u.bdev.iovcnt, bdev_ftl_cb, bdev_io);
case SPDK_BDEV_IO_TYPE_FLUSH:
return spdk_ftl_flush(ftl_bdev->dev, bdev_ftl_cb, bdev_io);
case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
case SPDK_BDEV_IO_TYPE_RESET:
case SPDK_BDEV_IO_TYPE_UNMAP:
default:
return -ENOTSUP;
break;
}
}
static void
bdev_ftl_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
{
int rc = _bdev_ftl_submit_request(ch, bdev_io);
if (spdk_unlikely(rc != 0)) {
spdk_bdev_io_complete(bdev_io, rc);
}
}
static bool
bdev_ftl_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type)
{
switch (io_type) {
case SPDK_BDEV_IO_TYPE_READ:
case SPDK_BDEV_IO_TYPE_WRITE:
case SPDK_BDEV_IO_TYPE_FLUSH:
return true;
case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
case SPDK_BDEV_IO_TYPE_RESET:
case SPDK_BDEV_IO_TYPE_UNMAP:
default:
return false;
}
}
static struct spdk_io_channel *
bdev_ftl_get_io_channel(void *ctx)
{
struct ftl_bdev *ftl_bdev = ctx;
return spdk_get_io_channel(ftl_bdev->dev);
}
static void
_bdev_ftl_write_config_info(struct ftl_bdev *ftl_bdev, struct spdk_json_write_ctx *w)
{
struct spdk_ftl_attrs attrs = {};
spdk_ftl_dev_get_attrs(ftl_bdev->dev, &attrs);
spdk_json_write_named_string(w, "base_bdev", attrs.base_bdev);
if (attrs.cache_bdev) {
spdk_json_write_named_string(w, "cache", attrs.cache_bdev);
}
}
static void
bdev_ftl_write_config_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w)
{
struct ftl_bdev *ftl_bdev = bdev->ctxt;
struct spdk_ftl_attrs attrs;
struct spdk_ftl_conf *conf = &attrs.conf;
char uuid[SPDK_UUID_STRING_LEN];
spdk_ftl_dev_get_attrs(ftl_bdev->dev, &attrs);
spdk_json_write_object_begin(w);
spdk_json_write_named_string(w, "method", "bdev_ftl_create");
spdk_json_write_named_object_begin(w, "params");
spdk_json_write_named_string(w, "name", ftl_bdev->bdev.name);
spdk_json_write_named_bool(w, "allow_open_bands", conf->allow_open_bands);
spdk_json_write_named_uint64(w, "overprovisioning", conf->lba_rsvd);
spdk_json_write_named_uint64(w, "limit_crit", conf->limits[SPDK_FTL_LIMIT_CRIT].limit);
spdk_json_write_named_uint64(w, "limit_crit_threshold", conf->limits[SPDK_FTL_LIMIT_CRIT].thld);
spdk_json_write_named_uint64(w, "limit_high", conf->limits[SPDK_FTL_LIMIT_HIGH].limit);
spdk_json_write_named_uint64(w, "limit_high_threshold", conf->limits[SPDK_FTL_LIMIT_HIGH].thld);
spdk_json_write_named_uint64(w, "limit_low", conf->limits[SPDK_FTL_LIMIT_LOW].limit);
spdk_json_write_named_uint64(w, "limit_low_threshold", conf->limits[SPDK_FTL_LIMIT_LOW].thld);
spdk_json_write_named_uint64(w, "limit_start", conf->limits[SPDK_FTL_LIMIT_START].limit);
spdk_json_write_named_uint64(w, "limit_start_threshold", conf->limits[SPDK_FTL_LIMIT_START].thld);
if (conf->l2p_path) {
spdk_json_write_named_string(w, "l2p_path", conf->l2p_path);
}
spdk_uuid_fmt_lower(uuid, sizeof(uuid), &attrs.uuid);
spdk_json_write_named_string(w, "uuid", uuid);
_bdev_ftl_write_config_info(ftl_bdev, w);
spdk_json_write_object_end(w);
spdk_json_write_object_end(w);
}
static int
bdev_ftl_dump_info_json(void *ctx, struct spdk_json_write_ctx *w)
{
struct ftl_bdev *ftl_bdev = ctx;
struct spdk_ftl_attrs attrs;
spdk_ftl_dev_get_attrs(ftl_bdev->dev, &attrs);
spdk_json_write_named_object_begin(w, "ftl");
_bdev_ftl_write_config_info(ftl_bdev, w);
spdk_json_write_named_string_fmt(w, "num_zones", "%zu", attrs.num_zones);
spdk_json_write_named_string_fmt(w, "zone_size", "%zu", attrs.zone_size);
/* ftl */
spdk_json_write_object_end(w);
return 0;
}
static const struct spdk_bdev_fn_table ftl_fn_table = {
.destruct = bdev_ftl_destruct,
.submit_request = bdev_ftl_submit_request,
.io_type_supported = bdev_ftl_io_type_supported,
.get_io_channel = bdev_ftl_get_io_channel,
.write_config_json = bdev_ftl_write_config_json,
.dump_info_json = bdev_ftl_dump_info_json,
};
static void
bdev_ftl_create_cb(struct spdk_ftl_dev *dev, void *ctx, int status)
{
struct ftl_bdev *ftl_bdev = ctx;
struct ftl_bdev_info info = {};
struct spdk_ftl_attrs attrs;
ftl_bdev_init_fn init_cb = ftl_bdev->init_cb;
void *init_arg = ftl_bdev->init_arg;
int rc = -ENODEV;
if (status) {
SPDK_ERRLOG("Failed to create FTL device (%d)\n", status);
rc = status;
goto error;
}
spdk_ftl_dev_get_attrs(dev, &attrs);
ftl_bdev->dev = dev;
ftl_bdev->bdev.product_name = "FTL disk";
ftl_bdev->bdev.write_cache = 0;
ftl_bdev->bdev.blocklen = attrs.block_size;
ftl_bdev->bdev.blockcnt = attrs.num_blocks;
ftl_bdev->bdev.uuid = attrs.uuid;
SPDK_DEBUGLOG(bdev_ftl, "Creating bdev %s:\n", ftl_bdev->bdev.name);
SPDK_DEBUGLOG(bdev_ftl, "\tblock_len:\t%zu\n", attrs.block_size);
SPDK_DEBUGLOG(bdev_ftl, "\tnum_blocks:\t%"PRIu64"\n", attrs.num_blocks);
ftl_bdev->bdev.ctxt = ftl_bdev;
ftl_bdev->bdev.fn_table = &ftl_fn_table;
ftl_bdev->bdev.module = &g_ftl_if;
if (spdk_bdev_register(&ftl_bdev->bdev)) {
goto error;
}
info.name = ftl_bdev->bdev.name;
info.uuid = ftl_bdev->bdev.uuid;
init_cb(&info, init_arg, 0);
return;
error:
free(ftl_bdev->bdev.name);
free(ftl_bdev);
init_cb(NULL, init_arg, rc);
}
static void
bdev_ftl_defer_free(struct ftl_deferred_init *init)
{
free((char *)init->opts.name);
free((char *)init->opts.base_bdev);
free((char *)init->opts.cache_bdev);
free(init);
}
static int
bdev_ftl_defer_init(const struct ftl_bdev_init_opts *opts)
{
struct ftl_deferred_init *init;
init = calloc(1, sizeof(*init));
if (!init) {
return -ENOMEM;
}
init->opts.mode = opts->mode;
init->opts.uuid = opts->uuid;
init->opts.ftl_conf = opts->ftl_conf;
init->opts.name = strdup(opts->name);
if (!init->opts.name) {
SPDK_ERRLOG("Could not allocate bdev name\n");
goto error;
}
init->opts.base_bdev = strdup(opts->base_bdev);
if (!init->opts.base_bdev) {
SPDK_ERRLOG("Could not allocate base bdev name\n");
goto error;
}
if (opts->cache_bdev) {
init->opts.cache_bdev = strdup(opts->cache_bdev);
if (!init->opts.cache_bdev) {
SPDK_ERRLOG("Could not allocate cache bdev name\n");
goto error;
}
}
LIST_INSERT_HEAD(&g_deferred_init, init, entry);
return 0;
error:
bdev_ftl_defer_free(init);
return -ENOMEM;
}
int
bdev_ftl_create_bdev(const struct ftl_bdev_init_opts *bdev_opts,
ftl_bdev_init_fn cb, void *cb_arg)
{
struct ftl_bdev *ftl_bdev = NULL;
struct spdk_ftl_dev_init_opts opts = {};
int rc;
ftl_bdev = calloc(1, sizeof(*ftl_bdev));
if (!ftl_bdev) {
SPDK_ERRLOG("Could not allocate ftl_bdev\n");
return -ENOMEM;
}
ftl_bdev->bdev.name = strdup(bdev_opts->name);
if (!ftl_bdev->bdev.name) {
rc = -ENOMEM;
goto error_bdev;
}
if (spdk_bdev_get_by_name(bdev_opts->base_bdev) == NULL ||
(bdev_opts->cache_bdev && spdk_bdev_get_by_name(bdev_opts->cache_bdev) == NULL)) {
rc = bdev_ftl_defer_init(bdev_opts);
if (rc == 0) {
rc = -ENODEV;
}
goto error_name;
}
ftl_bdev->init_cb = cb;
ftl_bdev->init_arg = cb_arg;
opts.mode = bdev_opts->mode;
opts.uuid = bdev_opts->uuid;
opts.name = ftl_bdev->bdev.name;
opts.base_bdev = bdev_opts->base_bdev;
opts.cache_bdev = bdev_opts->cache_bdev;
opts.conf = &bdev_opts->ftl_conf;
/* TODO: set threads based on config */
opts.core_thread = spdk_get_thread();
rc = spdk_ftl_dev_init(&opts, bdev_ftl_create_cb, ftl_bdev);
if (rc) {
SPDK_ERRLOG("Could not create FTL device\n");
goto error_name;
}
return 0;
error_name:
free(ftl_bdev->bdev.name);
error_bdev:
free(ftl_bdev);
return rc;
}
static int
bdev_ftl_initialize(void)
{
return 0;
}
void
bdev_ftl_delete_bdev(const char *name, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
{
int rc;
rc = spdk_bdev_unregister_by_name(name, &g_ftl_if, cb_fn, cb_arg);
if (rc != 0) {
cb_fn(cb_arg, rc);
}
}
static void
bdev_ftl_finish(void)
{
}
static void
bdev_ftl_create_deferred_cb(const struct ftl_bdev_info *info, void *ctx, int status)
{
struct ftl_deferred_init *opts = ctx;
if (status) {
SPDK_ERRLOG("Failed to initialize FTL bdev '%s'\n", opts->opts.name);
}
bdev_ftl_defer_free(opts);
spdk_bdev_module_examine_done(&g_ftl_if);
}
static void
bdev_ftl_examine(struct spdk_bdev *bdev)
{
struct ftl_deferred_init *opts;
LIST_FOREACH(opts, &g_deferred_init, entry) {
if (spdk_bdev_get_by_name(opts->opts.base_bdev) == NULL) {
continue;
}
if (opts->opts.cache_bdev && spdk_bdev_get_by_name(opts->opts.base_bdev) == NULL) {
continue;
}
LIST_REMOVE(opts, entry);
/* spdk_bdev_module_examine_done will be called by bdev_ftl_create_deferred_cb */
if (bdev_ftl_create_bdev(&opts->opts, bdev_ftl_create_deferred_cb, opts)) {
SPDK_ERRLOG("Failed to initialize FTL bdev '%s'\n", opts->opts.name);
bdev_ftl_defer_free(opts);
break;
}
return;
}
spdk_bdev_module_examine_done(&g_ftl_if);
}
SPDK_LOG_REGISTER_COMPONENT(bdev_ftl)

View File

@ -1,42 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) Intel Corporation.
* All rights reserved.
*/
#ifndef SPDK_BDEV_FTL_H
#define SPDK_BDEV_FTL_H
#include "spdk/stdinc.h"
#include "spdk/bdev_module.h"
#include "spdk/ftl.h"
struct spdk_bdev;
struct spdk_uuid;
struct ftl_bdev_info {
const char *name;
struct spdk_uuid uuid;
};
struct ftl_bdev_init_opts {
/* Bdev's name */
const char *name;
/* Base bdev's name */
const char *base_bdev;
/* Write buffer bdev's name */
const char *cache_bdev;
/* Bdev's mode */
uint32_t mode;
/* UUID if device is restored from SSD */
struct spdk_uuid uuid;
/* FTL library configuration */
struct spdk_ftl_conf ftl_conf;
};
typedef void (*ftl_bdev_init_fn)(const struct ftl_bdev_info *, void *, int);
int bdev_ftl_create_bdev(const struct ftl_bdev_init_opts *bdev_opts,
ftl_bdev_init_fn cb, void *cb_arg);
void bdev_ftl_delete_bdev(const char *name, spdk_bdev_unregister_cb cb_fn, void *cb_arg);
#endif /* SPDK_BDEV_FTL_H */

View File

@ -1,230 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) Intel Corporation.
* All rights reserved.
*/
#include "spdk/rpc.h"
#include "spdk/util.h"
#include "spdk/bdev_module.h"
#include "spdk/string.h"
#include "spdk/log.h"
#include "bdev_ftl.h"
struct rpc_bdev_ftl_create {
char *name;
char *base_bdev;
char *uuid;
char *cache_bdev;
struct spdk_ftl_conf ftl_conf;
};
static void
free_rpc_bdev_ftl_create(struct rpc_bdev_ftl_create *req)
{
free(req->name);
free(req->base_bdev);
free(req->uuid);
free(req->cache_bdev);
free((char *)req->ftl_conf.l2p_path);
}
static const struct spdk_json_object_decoder rpc_bdev_ftl_create_decoders[] = {
{"name", offsetof(struct rpc_bdev_ftl_create, name), spdk_json_decode_string},
{"base_bdev", offsetof(struct rpc_bdev_ftl_create, base_bdev), spdk_json_decode_string},
{"uuid", offsetof(struct rpc_bdev_ftl_create, uuid), spdk_json_decode_string, true},
{"cache", offsetof(struct rpc_bdev_ftl_create, cache_bdev), spdk_json_decode_string, true},
{
"allow_open_bands", offsetof(struct rpc_bdev_ftl_create, ftl_conf) +
offsetof(struct spdk_ftl_conf, allow_open_bands), spdk_json_decode_bool, true
},
{
"overprovisioning", offsetof(struct rpc_bdev_ftl_create, ftl_conf) +
offsetof(struct spdk_ftl_conf, lba_rsvd), spdk_json_decode_uint64, true
},
{
"use_append", offsetof(struct rpc_bdev_ftl_create, ftl_conf) +
offsetof(struct spdk_ftl_conf, use_append), spdk_json_decode_bool, true
},
{
"l2p_path", offsetof(struct rpc_bdev_ftl_create, ftl_conf) +
offsetof(struct spdk_ftl_conf, l2p_path),
spdk_json_decode_string, true
},
{
"limit_crit", offsetof(struct rpc_bdev_ftl_create, ftl_conf) +
offsetof(struct spdk_ftl_conf, limits[SPDK_FTL_LIMIT_CRIT]) +
offsetof(struct spdk_ftl_limit, limit),
spdk_json_decode_uint64, true
},
{
"limit_crit_threshold", offsetof(struct rpc_bdev_ftl_create, ftl_conf) +
offsetof(struct spdk_ftl_conf, limits[SPDK_FTL_LIMIT_CRIT]) +
offsetof(struct spdk_ftl_limit, thld),
spdk_json_decode_uint64, true
},
{
"limit_high", offsetof(struct rpc_bdev_ftl_create, ftl_conf) +
offsetof(struct spdk_ftl_conf, limits[SPDK_FTL_LIMIT_HIGH]) +
offsetof(struct spdk_ftl_limit, limit),
spdk_json_decode_uint64, true
},
{
"limit_high_threshold", offsetof(struct rpc_bdev_ftl_create, ftl_conf) +
offsetof(struct spdk_ftl_conf, limits[SPDK_FTL_LIMIT_HIGH]) +
offsetof(struct spdk_ftl_limit, thld),
spdk_json_decode_uint64, true
},
{
"limit_low", offsetof(struct rpc_bdev_ftl_create, ftl_conf) +
offsetof(struct spdk_ftl_conf, limits[SPDK_FTL_LIMIT_LOW]) +
offsetof(struct spdk_ftl_limit, limit),
spdk_json_decode_uint64, true
},
{
"limit_low_threshold", offsetof(struct rpc_bdev_ftl_create, ftl_conf) +
offsetof(struct spdk_ftl_conf, limits[SPDK_FTL_LIMIT_LOW]) +
offsetof(struct spdk_ftl_limit, thld),
spdk_json_decode_uint64, true
},
{
"limit_start", offsetof(struct rpc_bdev_ftl_create, ftl_conf) +
offsetof(struct spdk_ftl_conf, limits[SPDK_FTL_LIMIT_START]) +
offsetof(struct spdk_ftl_limit, limit),
spdk_json_decode_uint64, true
},
{
"limit_start_threshold", offsetof(struct rpc_bdev_ftl_create, ftl_conf) +
offsetof(struct spdk_ftl_conf, limits[SPDK_FTL_LIMIT_START]) +
offsetof(struct spdk_ftl_limit, thld),
spdk_json_decode_uint64, true
},
};
static void
rpc_bdev_ftl_create_cb(const struct ftl_bdev_info *bdev_info, void *ctx, int status)
{
struct spdk_jsonrpc_request *request = ctx;
char bdev_uuid[SPDK_UUID_STRING_LEN];
struct spdk_json_write_ctx *w;
if (status) {
spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
"Failed to create FTL bdev: %s",
spdk_strerror(-status));
return;
}
w = spdk_jsonrpc_begin_result(request);
spdk_uuid_fmt_lower(bdev_uuid, sizeof(bdev_uuid), &bdev_info->uuid);
spdk_json_write_object_begin(w);
spdk_json_write_named_string(w, "name", bdev_info->name);
spdk_json_write_named_string(w, "uuid", bdev_uuid);
spdk_json_write_object_end(w);
spdk_jsonrpc_end_result(request, w);
}
static void
rpc_bdev_ftl_create(struct spdk_jsonrpc_request *request,
const struct spdk_json_val *params)
{
struct rpc_bdev_ftl_create req = {};
struct ftl_bdev_init_opts opts = {};
struct spdk_json_write_ctx *w;
int rc;
spdk_ftl_conf_init_defaults(&req.ftl_conf);
if (spdk_json_decode_object(params, rpc_bdev_ftl_create_decoders,
SPDK_COUNTOF(rpc_bdev_ftl_create_decoders),
&req)) {
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS,
"Invalid parameters");
goto invalid;
}
if (req.cache_bdev && !spdk_bdev_get_by_name(req.cache_bdev)) {
spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS,
"No such bdev: %s", req.cache_bdev);
goto invalid;
}
opts.name = req.name;
opts.mode = SPDK_FTL_MODE_CREATE;
opts.base_bdev = req.base_bdev;
opts.cache_bdev = req.cache_bdev;
opts.ftl_conf = req.ftl_conf;
if (req.uuid) {
if (spdk_uuid_parse(&opts.uuid, req.uuid) < 0) {
spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS,
"Failed to parse uuid: %s",
req.uuid);
goto invalid;
}
if (!spdk_mem_all_zero(&opts.uuid, sizeof(opts.uuid))) {
opts.mode &= ~SPDK_FTL_MODE_CREATE;
}
}
rc = bdev_ftl_create_bdev(&opts, rpc_bdev_ftl_create_cb, request);
if (rc) {
if (rc == -ENODEV) {
w = spdk_jsonrpc_begin_result(request);
spdk_json_write_string_fmt(w, "FTL bdev: %s creation deferred", req.name);
spdk_jsonrpc_end_result(request, w);
} else {
spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
"Failed to create FTL bdev: %s",
spdk_strerror(-rc));
}
goto invalid;
}
invalid:
free_rpc_bdev_ftl_create(&req);
}
SPDK_RPC_REGISTER("bdev_ftl_create", rpc_bdev_ftl_create, SPDK_RPC_RUNTIME)
struct rpc_delete_ftl {
char *name;
};
static const struct spdk_json_object_decoder rpc_delete_ftl_decoders[] = {
{"name", offsetof(struct rpc_bdev_ftl_create, name), spdk_json_decode_string},
};
static void
rpc_bdev_ftl_delete_cb(void *cb_arg, int bdeverrno)
{
struct spdk_jsonrpc_request *request = cb_arg;
if (bdeverrno == 0) {
spdk_jsonrpc_send_bool_response(request, true);
} else {
spdk_jsonrpc_send_error_response(request, bdeverrno, spdk_strerror(-bdeverrno));
}
}
static void
rpc_bdev_ftl_delete(struct spdk_jsonrpc_request *request,
const struct spdk_json_val *params)
{
struct rpc_delete_ftl attrs = {};
if (spdk_json_decode_object(params, rpc_delete_ftl_decoders,
SPDK_COUNTOF(rpc_delete_ftl_decoders),
&attrs)) {
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS,
"Invalid parameters");
goto invalid;
}
bdev_ftl_delete_bdev(attrs.name, rpc_bdev_ftl_delete_cb, request);
invalid:
free(attrs.name);
}
SPDK_RPC_REGISTER("bdev_ftl_delete", rpc_bdev_ftl_delete, SPDK_RPC_RUNTIME)

View File

@ -1,30 +0,0 @@
#!/usr/bin/env bash
testdir=$(readlink -f $(dirname $0))
rootdir=$(readlink -f $testdir/../..)
source $rootdir/test/common/autotest_common.sh
source $testdir/common.sh
tests=('-q 1 -w randwrite -t 4 -o 69632' '-q 128 -w randwrite -t 4 -o 4096' '-q 128 -w verify -t 4 -o 4096')
device=$1
use_append=$2
for ((i = 0; i < ${#tests[@]}; i++)); do
timing_enter "${tests[$i]}"
"$rootdir/test/bdev/bdevperf/bdevperf" -z -T ftl0 ${tests[$i]} --json <(gen_ftl_nvme_conf) &
bdevperf_pid=$!
trap 'killprocess $bdevperf_pid; exit 1' SIGINT SIGTERM EXIT
waitforlisten $bdevperf_pid
$rpc_py bdev_nvme_attach_controller -b nvme0 -a $device -t pcie
bdev_create_zone nvme0n1
$rpc_py bdev_ftl_create -b ftl0 -d "$ZONE_DEV" $use_append
$rootdir/test/bdev/bdevperf/bdevperf.py perform_tests
$rpc_py bdev_ftl_delete -b ftl0
bdev_delete_zone "$ZONE_DEV"
$rpc_py bdev_nvme_detach_controller nvme0
killprocess $bdevperf_pid
trap - SIGINT SIGTERM EXIT
timing_exit "${tests[$i]}"
done

View File

@ -1,82 +0,0 @@
# Common utility functions to be sourced by the libftl test scripts
function get_chunk_size() {
$SPDK_EXAMPLE_DIR/identify -r "trtype:PCIe traddr:$1" \
| grep 'Logical blks per chunk' | sed 's/[^0-9]//g'
}
function get_num_group() {
$SPDK_EXAMPLE_DIR/identify -r "trtype:PCIe traddr:$1" \
| grep 'Groups' | sed 's/[^0-9]//g'
}
function get_num_pu() {
$SPDK_EXAMPLE_DIR/identify -r "trtype:PCIe traddr:$1" \
| grep 'PUs' | sed 's/[^0-9]//g'
}
function gen_ftl_nvme_conf() {
jq . <<- JSON
{
"subsystems": [
{
"subsystem": "bdev",
"config": [
{
"params": {
"nvme_adminq_poll_period_us": 100
},
"method": "bdev_nvme_set_options"
}
]
}
]
}
JSON
}
get_ftl_nvme_dev() {
# Find device with LBA matching the FTL_BLOCK_SIZE
local nvmes nvme identify lba
for nvme in $(nvme_in_userspace); do
identify=$("$SPDK_EXAMPLE_DIR/identify" -r trtype:pcie -r "traddr:$nvme")
# TODO: Skip zoned nvme devices - such setup for FTL is currently not
# supported. See https://github.com/spdk/spdk/issues/1992 for details.
[[ $identity =~ "NVMe ZNS Zone Report" ]] && continue
[[ $identify =~ "Current LBA Format:"\ +"LBA Format #"([0-9]+) ]]
[[ $identify =~ "LBA Format #${BASH_REMATCH[1]}: Data Size:"\ +([0-9]+) ]]
lba=${BASH_REMATCH[1]}
((lba && lba % FTL_BLOCK_SIZE == 0)) && nvmes+=("$nvme")
done
((${#nvmes[@]} > 0)) || return 1
printf '%s\n' "${nvmes[@]}"
}
bdev_create_zone() {
local base_bdev=$1
# TODO: Consider use of ZNSed nvme controllers
"$rpc_py" bdev_zone_block_create \
-b "$ZONE_DEV" \
-o "$OPTIMAL_OPEN_ZONES" \
-z "$ZONE_CAPACITY" \
-n "$base_bdev"
}
bdev_delete_zone() {
local zone_dev=$1
# TODO: Consider use of ZNSed nvme controllers
"$rpc_py" bdev_zone_block_delete "$zone_dev"
}
# Optimal number of zones refers to the number of zones that need to be written at the same
# time in order to maximize drive's write bandwidth.
# ZONE_CAPACITY * FTL_BLOCK_SIZE * OPTIMAL_OPEN_ZONES should be <= size of the drive.
FTL_BLOCK_SIZE=4096
ZONE_CAPACITY=4096
OPTIMAL_OPEN_ZONES=32
ZONE_DEV=zone0
rpc_py=$rootdir/scripts/rpc.py

View File

@ -1,2 +0,0 @@
ftl.conf
fio/*.fio

View File

@ -1,20 +0,0 @@
[global]
ioengine=spdk_bdev
spdk_json_conf=${FTL_JSON_CONF}
filename=${FTL_BDEV_NAME}
thread=1
direct=1
iodepth=128
rw=randwrite
verify=crc32c
do_verify=1
verify_dump=0
verify_state_save=0
verify_fatal=1
bs=4k
random_distribution=normal
serialize_overlap=1
io_size=256M
[test]
numjobs=1

View File

@ -1,25 +0,0 @@
[global]
ioengine=spdk_bdev
spdk_json_conf=${FTL_JSON_CONF}
filename=${FTL_BDEV_NAME}
thread=1
direct=1
iodepth=128
rw=randwrite
verify=crc32c
do_verify=1
verify_dump=0
verify_state_save=0
verify_backlog=5000
verify_fatal=1
bs=4k
random_distribution=normal
serialize_overlap=1
io_size=256M
[first_half]
offset=0%
size=50%
[second_half]
offset=50%

View File

@ -1,20 +0,0 @@
[global]
ioengine=spdk_bdev
spdk_json_conf=${FTL_JSON_CONF}
filename=${FTL_BDEV_NAME}
thread=1
direct=1
iodepth=1
rw=randwrite
size=256M
verify=crc32c
do_verify=1
verify_dump=0
verify_state_save=0
verify_backlog=16
verify_fatal=1
bs=68k
random_distribution=normal
[test]
numjobs=1

View File

@ -1,18 +0,0 @@
[global]
ioengine=spdk_bdev
spdk_json_conf=${FTL_JSON_CONF}
filename=${FTL_BDEV_NAME}
direct=1
thread=1
buffered=0
size=256M
randrepeat=0
time_based
norandommap
[test]
bs=4k
numjobs=1
rw=randwrite
iodepth=64
runtime=10

View File

@ -1,76 +0,0 @@
#!/usr/bin/env bash
testdir=$(readlink -f $(dirname $0))
rootdir=$(readlink -f $testdir/../..)
source $rootdir/test/common/autotest_common.sh
source $testdir/common.sh
device=$1
restore_kill() {
rm -f "$config"
rm -f "$SPDK_TEST_STORAGE/testfile.md5"
rm -f "$SPDK_TEST_STORAGE/testfile2.md5"
killprocess $svcpid || true
rmmod nbd || true
}
trap "restore_kill; exit 1" SIGINT SIGTERM EXIT
chunk_size=$(get_chunk_size $device)
num_group=$(get_num_group $device)
num_pu=$(get_num_pu $device)
pu_count=$((num_group * num_pu))
config=$SPDK_TEST_STORAGE/ftl.json
# Write one band worth of data + one extra chunk
data_size=$((chunk_size * (pu_count + 1)))
"$SPDK_BIN_DIR/spdk_tgt" --json <(gen_ftl_nvme_conf) &
svcpid=$!
waitforlisten $svcpid
$rpc_py bdev_nvme_attach_controller -b nvme0 -a $device -t pcie
bdev_create_zone nvme0n1
ftl_construct_args="bdev_ftl_create -b ftl0 -d $ZONE_DEV -o"
$rpc_py $ftl_construct_args
# Load the nbd driver
modprobe nbd
$rpc_py nbd_start_disk ftl0 /dev/nbd0
waitfornbd nbd0
$rpc_py save_config > "$config"
dd if=/dev/urandom of=/dev/nbd0 bs=4K count=$data_size oflag=dsync
# Calculate checksum of the data written
dd if=/dev/nbd0 bs=4K count=$data_size | md5sum > "$SPDK_TEST_STORAGE/testfile.md5"
$rpc_py nbd_stop_disk /dev/nbd0
# Force kill bdev service (dirty shutdown) and start it again
kill -9 $svcpid
rm -f /dev/shm/spdk_tgt_trace.pid$svcpid
"$SPDK_BIN_DIR/spdk_tgt" --json <(gen_ftl_nvme_conf) -L ftl_init &
svcpid=$!
waitforlisten $svcpid
$rpc_py load_config < "$config"
waitfornbd nbd0
# Write extra data after restore
dd if=/dev/urandom of=/dev/nbd0 bs=4K count=$chunk_size seek=$data_size oflag=dsync
# Save md5 data
dd if=/dev/nbd0 bs=4K count=$chunk_size skip=$data_size | md5sum > "$SPDK_TEST_STORAGE/testfile2.md5"
# Make sure all data will be read from disk
echo 3 > /proc/sys/vm/drop_caches
# Verify that the checksum matches and the data is consistent
dd if=/dev/nbd0 bs=4K count=$data_size | md5sum -c "$SPDK_TEST_STORAGE/testfile.md5"
dd if=/dev/nbd0 bs=4K count=$chunk_size skip=$data_size | md5sum -c "$SPDK_TEST_STORAGE/testfile2.md5"
trap - SIGINT SIGTERM EXIT
restore_kill

View File

@ -1,54 +0,0 @@
#!/usr/bin/env bash
testdir=$(readlink -f $(dirname $0))
rootdir=$(readlink -f $testdir/../..)
source $rootdir/test/common/autotest_common.sh
source $testdir/common.sh
tests=(randw randw-verify randw-verify-j2 randw-verify-depth128)
rpc_py=$rootdir/scripts/rpc.py
fio_kill() {
killprocess $svcpid
rm -f $FTL_JSON_CONF
}
device=$1
if [[ $CONFIG_FIO_PLUGIN != y ]]; then
echo "FIO not available"
exit 1
fi
export FTL_BDEV_NAME=ftl0
export FTL_JSON_CONF=$testdir/config/ftl.json
trap "fio_kill; exit 1" SIGINT SIGTERM EXIT
"$SPDK_BIN_DIR/spdk_tgt" --json <(gen_ftl_nvme_conf) &
svcpid=$!
waitforlisten $svcpid
$rpc_py bdev_nvme_attach_controller -b nvme0 -a $device -t pcie
bdev_create_zone nvme0n1
$rpc_py bdev_ftl_create -b ftl0 -d "$ZONE_DEV"
waitforbdev ftl0
(
echo '{"subsystems": ['
$rpc_py save_subsystem_config -n bdev
echo ']}'
) > $FTL_JSON_CONF
killprocess $svcpid
trap - SIGINT SIGTERM EXIT
for test in "${tests[@]}"; do
timing_enter $test
fio_bdev $testdir/config/fio/$test.fio
timing_exit $test
done
rm -f $FTL_JSON_CONF

View File

@ -1,14 +0,0 @@
#!/usr/bin/env bash
testdir=$(readlink -f $(dirname $0))
rootdir=$(readlink -f $testdir/../..)
source $rootdir/test/common/autotest_common.sh
source $testdir/common.sh
devices=($(get_ftl_nvme_dev)) device=${devices[0]}
run_test "ftl_bdevperf" $testdir/bdevperf.sh $device
run_test "ftl_bdevperf_append" $testdir/bdevperf.sh $device --use_append
run_test "ftl_restore" $testdir/restore.sh $device
run_test "ftl_json" $testdir/json.sh $device
run_test "ftl_fio" "$testdir/fio.sh" "$device"

View File

@ -1,36 +0,0 @@
#!/usr/bin/env bash
testdir=$(readlink -f $(dirname $0))
rootdir=$(readlink -f $testdir/../..)
source $rootdir/test/common/autotest_common.sh
source $testdir/common.sh
device=$1
json_kill() {
killprocess $svcpid
}
trap "json_kill; exit 1" SIGINT SIGTERM EXIT
"$SPDK_BIN_DIR/spdk_tgt" --json <(gen_ftl_nvme_conf) &
svcpid=$!
waitforlisten $svcpid
# Create new bdev from json configuration
$rpc_py bdev_nvme_attach_controller -b nvme0 -a $device -t pcie
bdev_create_zone nvme0n1
$rootdir/scripts/gen_ftl.sh -n ftl0 -d "$ZONE_DEV" | $rpc_py load_subsystem_config
waitforbdev ftl0
uuid=$($rpc_py bdev_get_bdevs | jq -r ".[] | select(.name==\"ftl0\").uuid")
$rpc_py bdev_ftl_delete -b ftl0
# Restore bdev from json configuration
$rootdir/scripts/gen_ftl.sh -n ftl0 -d "$ZONE_DEV" -u $uuid | $rpc_py load_subsystem_config
$rpc_py bdev_ftl_delete -b ftl0
$rpc_py bdev_nvme_detach_controller nvme0
trap - SIGINT SIGTERM EXIT
json_kill

View File

@ -1,80 +0,0 @@
#!/usr/bin/env bash
testdir=$(readlink -f $(dirname $0))
rootdir=$(readlink -f $testdir/../..)
source $rootdir/test/common/autotest_common.sh
source $testdir/common.sh
mount_dir=$(mktemp -d)
device=$1
config=$SPDK_TEST_STORAGE/ftl.json
restore_kill() {
if mount | grep $mount_dir; then
umount $mount_dir
fi
rm -rf $mount_dir
rm -f "$SPDK_TEST_STORAGE/testfile.md5"
rm -f "$SPDK_TEST_STORAGE/testfile2.md5"
rm -f "$config"
killprocess $svcpid
rmmod nbd || true
}
trap "restore_kill; exit 1" SIGINT SIGTERM EXIT
"$SPDK_BIN_DIR/spdk_tgt" --json <(gen_ftl_nvme_conf) &
svcpid=$!
# Wait until spdk_tgt starts
waitforlisten $svcpid
$rpc_py bdev_nvme_attach_controller -b nvme0 -a $device -t pcie
bdev_create_zone nvme0n1
ftl_construct_args="bdev_ftl_create -b ftl0 -d $ZONE_DEV"
$rpc_py $ftl_construct_args
# Load the nbd driver
modprobe nbd
$rpc_py nbd_start_disk ftl0 /dev/nbd0
waitfornbd nbd0
$rpc_py save_config > "$config"
# Prepare the disk by creating ext4 fs and putting a file on it
make_filesystem ext4 /dev/nbd0
mount /dev/nbd0 $mount_dir
dd if=/dev/urandom of=$mount_dir/testfile bs=4K count=4k
sync
mount -o remount /dev/nbd0 $mount_dir
md5sum $mount_dir/testfile > "$SPDK_TEST_STORAGE/testfile.md5"
# Kill bdev service and start it again
umount $mount_dir
killprocess $svcpid
"$SPDK_BIN_DIR/spdk_tgt" --json <(gen_ftl_nvme_conf) -L ftl_init &
svcpid=$!
# Wait until spdk_tgt starts
waitforlisten $svcpid
$rpc_py load_config < "$config"
waitfornbd nbd0
mount /dev/nbd0 $mount_dir
# Write second file, to make sure writer thread has restored properly
dd if=/dev/urandom of=$mount_dir/testfile2 bs=4K count=4k
md5sum $mount_dir/testfile2 > "$SPDK_TEST_STORAGE/testfile2.md5"
# Make sure second file will be read from disk
echo 3 > /proc/sys/vm/drop_caches
# Check both files have proper data
md5sum -c "$SPDK_TEST_STORAGE/testfile.md5"
md5sum -c "$SPDK_TEST_STORAGE/testfile2.md5"
trap - SIGINT SIGTERM EXIT
restore_kill

View File

@ -12,7 +12,6 @@ DIRS-$(CONFIG_IDXD) += idxd
DIRS-$(CONFIG_REDUCE) += reduce
ifeq ($(OS),Linux)
DIRS-$(CONFIG_VHOST) += vhost
DIRS-y += ftl
endif
.PHONY: all clean $(DIRS-y)

View File

@ -1,16 +0,0 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) Intel Corporation.
# All rights reserved.
#
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
DIRS-y = ftl_ppa ftl_band.c ftl_reloc.c ftl_wptr ftl_md ftl_io.c
.PHONY: all clean $(DIRS-y)
all: $(DIRS-y)
clean: $(DIRS-y)
include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk

View File

@ -1,144 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) Intel Corporation.
* All rights reserved.
*/
#include "spdk/ftl.h"
#include "ftl/ftl_core.h"
#include "thread/thread_internal.h"
struct base_bdev_geometry {
size_t write_unit_size;
size_t zone_size;
size_t optimal_open_zones;
size_t blockcnt;
};
extern struct base_bdev_geometry g_geo;
struct spdk_ftl_dev *test_init_ftl_dev(const struct base_bdev_geometry *geo);
struct ftl_band *test_init_ftl_band(struct spdk_ftl_dev *dev, size_t id, size_t zone_size);
void test_free_ftl_dev(struct spdk_ftl_dev *dev);
void test_free_ftl_band(struct ftl_band *band);
uint64_t test_offset_from_addr(struct ftl_addr addr, struct ftl_band *band);
DEFINE_STUB(spdk_bdev_desc_get_bdev, struct spdk_bdev *, (struct spdk_bdev_desc *desc), NULL);
uint64_t
spdk_bdev_get_zone_size(const struct spdk_bdev *bdev)
{
return g_geo.zone_size;
}
uint32_t
spdk_bdev_get_optimal_open_zones(const struct spdk_bdev *bdev)
{
return g_geo.optimal_open_zones;
}
struct spdk_ftl_dev *
test_init_ftl_dev(const struct base_bdev_geometry *geo)
{
struct spdk_ftl_dev *dev;
dev = calloc(1, sizeof(*dev));
SPDK_CU_ASSERT_FATAL(dev != NULL);
dev->xfer_size = geo->write_unit_size;
dev->core_thread = spdk_thread_create("unit_test_thread", NULL);
spdk_set_thread(dev->core_thread);
dev->ioch = calloc(1, sizeof(*dev->ioch)
+ sizeof(struct ftl_io_channel *));
dev->num_bands = geo->blockcnt / (geo->zone_size * geo->optimal_open_zones);
dev->bands = calloc(dev->num_bands, sizeof(*dev->bands));
SPDK_CU_ASSERT_FATAL(dev->bands != NULL);
dev->lba_pool = spdk_mempool_create("ftl_ut", 2, 0x18000,
SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
SPDK_ENV_SOCKET_ID_ANY);
SPDK_CU_ASSERT_FATAL(dev->lba_pool != NULL);
LIST_INIT(&dev->free_bands);
LIST_INIT(&dev->shut_bands);
return dev;
}
struct ftl_band *
test_init_ftl_band(struct spdk_ftl_dev *dev, size_t id, size_t zone_size)
{
struct ftl_band *band;
struct ftl_zone *zone;
SPDK_CU_ASSERT_FATAL(dev != NULL);
SPDK_CU_ASSERT_FATAL(id < dev->num_bands);
band = &dev->bands[id];
band->dev = dev;
band->id = id;
band->state = FTL_BAND_STATE_CLOSED;
LIST_INSERT_HEAD(&dev->shut_bands, band, list_entry);
CIRCLEQ_INIT(&band->zones);
band->lba_map.vld = spdk_bit_array_create(ftl_get_num_blocks_in_band(dev));
SPDK_CU_ASSERT_FATAL(band->lba_map.vld != NULL);
band->zone_buf = calloc(ftl_get_num_punits(dev), sizeof(*band->zone_buf));
SPDK_CU_ASSERT_FATAL(band->zone_buf != NULL);
band->reloc_bitmap = spdk_bit_array_create(ftl_get_num_bands(dev));
SPDK_CU_ASSERT_FATAL(band->reloc_bitmap != NULL);
for (size_t i = 0; i < ftl_get_num_punits(dev); ++i) {
zone = &band->zone_buf[i];
zone->info.state = SPDK_BDEV_ZONE_STATE_FULL;
zone->info.zone_id = zone_size * (id * ftl_get_num_punits(dev) + i);
CIRCLEQ_INSERT_TAIL(&band->zones, zone, circleq);
band->num_zones++;
}
pthread_spin_init(&band->lba_map.lock, PTHREAD_PROCESS_PRIVATE);
return band;
}
void
test_free_ftl_dev(struct spdk_ftl_dev *dev)
{
struct spdk_thread *thread;
SPDK_CU_ASSERT_FATAL(dev != NULL);
free(dev->ioch);
thread = dev->core_thread;
spdk_set_thread(thread);
spdk_thread_exit(thread);
while (!spdk_thread_is_exited(thread)) {
spdk_thread_poll(thread, 0, 0);
}
spdk_thread_destroy(thread);
spdk_mempool_free(dev->lba_pool);
free(dev->bands);
free(dev);
}
void
test_free_ftl_band(struct ftl_band *band)
{
SPDK_CU_ASSERT_FATAL(band != NULL);
spdk_bit_array_free(&band->lba_map.vld);
spdk_bit_array_free(&band->reloc_bitmap);
free(band->zone_buf);
spdk_dma_free(band->lba_map.dma_buf);
}
uint64_t
test_offset_from_addr(struct ftl_addr addr, struct ftl_band *band)
{
struct spdk_ftl_dev *dev = band->dev;
CU_ASSERT_EQUAL(ftl_addr_get_band(dev, addr), band->id);
return addr.offset - band->id * ftl_get_num_blocks_in_band(dev);
}

View File

@ -1,10 +0,0 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) Intel Corporation.
# All rights reserved.
#
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
TEST_FILE = ftl_band_ut.c
include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk

View File

@ -1,365 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) Intel Corporation.
* All rights reserved.
*/
#include "spdk/stdinc.h"
#include "spdk_cunit.h"
#include "common/lib/test_env.c"
#include "ftl/ftl_core.c"
#include "ftl/ftl_band.c"
#include "../common/utils.c"
#define TEST_BAND_IDX 68
#define TEST_LBA 0x68676564
struct base_bdev_geometry g_geo = {
.write_unit_size = 16,
.optimal_open_zones = 9,
.zone_size = 100,
.blockcnt = 1500 * 100 * 8,
};
static struct spdk_ftl_dev *g_dev;
static struct ftl_band *g_band;
#if defined(DEBUG)
DEFINE_STUB(ftl_band_validate_md, bool, (struct ftl_band *band), true);
DEFINE_STUB_V(ftl_trace_limits, (struct spdk_ftl_dev *dev, int limit, size_t num_free));
DEFINE_STUB_V(ftl_trace_completion, (struct spdk_ftl_dev *dev, const struct ftl_io *io,
enum ftl_trace_completion completion));
DEFINE_STUB_V(ftl_trace_defrag_band, (struct spdk_ftl_dev *dev, const struct ftl_band *band));
DEFINE_STUB_V(ftl_trace_wbuf_fill, (struct spdk_ftl_dev *dev, const struct ftl_io *io));
DEFINE_STUB_V(ftl_trace_wbuf_pop, (struct spdk_ftl_dev *dev, const struct ftl_wbuf_entry *entry));
DEFINE_STUB_V(ftl_trace_write_band, (struct spdk_ftl_dev *dev, const struct ftl_band *band));
DEFINE_STUB_V(ftl_trace_submission, (struct spdk_ftl_dev *dev, const struct ftl_io *io,
struct ftl_addr addr, size_t addr_cnt));
#endif
DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
DEFINE_STUB(spdk_bdev_get_block_size, uint32_t, (const struct spdk_bdev *bdev), 512);
DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "test");
DEFINE_STUB(spdk_bdev_get_num_blocks, uint64_t, (const struct spdk_bdev *bdev), 0);
DEFINE_STUB(spdk_bdev_get_media_events, size_t,
(struct spdk_bdev_desc *bdev_desc, struct spdk_bdev_media_event *events,
size_t max_events), 0);
DEFINE_STUB(spdk_bdev_get_md_size, uint32_t, (const struct spdk_bdev *bdev), 8);
DEFINE_STUB(spdk_bdev_io_get_append_location, uint64_t, (struct spdk_bdev_io *bdev_io), 0);
DEFINE_STUB(spdk_bdev_write_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
void *buf, uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
void *cb_arg), 0);
DEFINE_STUB(spdk_bdev_write_blocks_with_md, int, (struct spdk_bdev_desc *desc,
struct spdk_io_channel *ch, void *buf, void *md, uint64_t offset_blocks,
uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
DEFINE_STUB(spdk_bdev_read_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
void *buf, uint64_t offset_blocks, uint64_t num_blocks,
spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
DEFINE_STUB(spdk_bdev_write_zeroes_blocks, int,
(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
uint64_t offset_blocks, uint64_t num_blocks,
spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
DEFINE_STUB(spdk_bdev_writev_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
DEFINE_STUB(spdk_bdev_zone_appendv, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
struct iovec *iov, int iovcnt, uint64_t zone_id, uint64_t num_blocks,
spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
DEFINE_STUB(spdk_bdev_zone_management, int, (struct spdk_bdev_desc *desc,
struct spdk_io_channel *ch,
uint64_t zone_id, enum spdk_bdev_zone_action action,
spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
DEFINE_STUB_V(ftl_io_advance, (struct ftl_io *io, size_t num_blocks));
DEFINE_STUB_V(ftl_io_call_foreach_child,
(struct ftl_io *io, int (*callback)(struct ftl_io *)));
DEFINE_STUB(ftl_io_channel_get_ctx, struct ftl_io_channel *,
(struct spdk_io_channel *ioch), NULL);
DEFINE_STUB_V(ftl_io_complete, (struct ftl_io *io));
DEFINE_STUB(ftl_io_current_lba, uint64_t, (const struct ftl_io *io), 0);
DEFINE_STUB_V(ftl_io_dec_req, (struct ftl_io *io));
DEFINE_STUB(ftl_io_erase_init, struct ftl_io *,
(struct ftl_band *band, size_t num_blocks, ftl_io_fn cb), NULL);
DEFINE_STUB_V(ftl_io_fail, (struct ftl_io *io, int status));
DEFINE_STUB_V(ftl_io_free, (struct ftl_io *io));
DEFINE_STUB(ftl_io_get_lba, uint64_t,
(const struct ftl_io *io, size_t offset), 0);
DEFINE_STUB_V(ftl_io_inc_req, (struct ftl_io *io));
DEFINE_STUB(ftl_io_init_internal, struct ftl_io *,
(const struct ftl_io_init_opts *opts), NULL);
DEFINE_STUB_V(ftl_io_reset, (struct ftl_io *io));
DEFINE_STUB(ftl_io_iovec_addr, void *, (struct ftl_io *io), NULL);
DEFINE_STUB(ftl_io_iovec_len_left, size_t, (struct ftl_io *io), 0);
DEFINE_STUB_V(ftl_io_shrink_iovec, (struct ftl_io *io, size_t num_blocks));
DEFINE_STUB(ftl_io_wbuf_init, struct ftl_io *,
(struct spdk_ftl_dev *dev, struct ftl_addr addr,
struct ftl_band *band, struct ftl_batch *batch, ftl_io_fn cb), NULL);
DEFINE_STUB(ftl_io_user_init, struct ftl_io *,
(struct spdk_io_channel *ioch, uint64_t lba, size_t num_blocks,
struct iovec *iov, size_t iov_cnt, spdk_ftl_fn cb_fn,
void *cb_arg, int type), NULL);
DEFINE_STUB(ftl_iovec_num_blocks, size_t,
(struct iovec *iov, size_t iov_cnt), 0);
DEFINE_STUB(ftl_reloc, bool, (struct ftl_reloc *reloc), false);
DEFINE_STUB_V(ftl_reloc_add, (struct ftl_reloc *reloc, struct ftl_band *band, size_t offset,
size_t num_blocks, int prio, bool defrag));
DEFINE_STUB(ftl_reloc_is_defrag_active, bool, (const struct ftl_reloc *reloc), false);
DEFINE_STUB(ftl_reloc_is_halted, bool, (const struct ftl_reloc *reloc), false);
#ifdef SPDK_CONFIG_PMDK
DEFINE_STUB_V(pmem_persist, (const void *addr, size_t len));
#endif
static void
setup_band(void)
{
int rc;
g_dev = test_init_ftl_dev(&g_geo);
g_band = test_init_ftl_band(g_dev, TEST_BAND_IDX, g_geo.zone_size);
rc = ftl_band_alloc_lba_map(g_band);
CU_ASSERT_EQUAL_FATAL(rc, 0);
}
static void
cleanup_band(void)
{
test_free_ftl_band(g_band);
test_free_ftl_dev(g_dev);
}
static struct ftl_addr
addr_from_punit(uint64_t punit)
{
struct ftl_addr addr = {};
addr.offset = punit * g_geo.zone_size;
return addr;
}
static void
test_band_block_offset_from_addr_base(void)
{
struct ftl_addr addr;
uint64_t offset, i, flat_lun = 0;
setup_band();
for (i = 0; i < ftl_get_num_punits(g_dev); ++i) {
addr = addr_from_punit(i);
addr.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
offset = ftl_band_block_offset_from_addr(g_band, addr);
CU_ASSERT_EQUAL(offset, flat_lun * ftl_get_num_blocks_in_zone(g_dev));
flat_lun++;
}
cleanup_band();
}
static void
test_band_block_offset_from_addr_offset(void)
{
struct ftl_addr addr;
uint64_t offset, expect, i, j;
setup_band();
for (i = 0; i < ftl_get_num_punits(g_dev); ++i) {
for (j = 0; j < g_geo.zone_size; ++j) {
addr = addr_from_punit(i);
addr.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev) + j;
offset = ftl_band_block_offset_from_addr(g_band, addr);
expect = test_offset_from_addr(addr, g_band);
CU_ASSERT_EQUAL(offset, expect);
}
}
cleanup_band();
}
static void
test_band_addr_from_block_offset(void)
{
struct ftl_addr addr, expect;
uint64_t offset, i, j;
setup_band();
for (i = 0; i < ftl_get_num_punits(g_dev); ++i) {
for (j = 0; j < g_geo.zone_size; ++j) {
expect = addr_from_punit(i);
expect.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev) + j;
offset = ftl_band_block_offset_from_addr(g_band, expect);
addr = ftl_band_addr_from_block_offset(g_band, offset);
CU_ASSERT_EQUAL(addr.offset, expect.offset);
}
}
cleanup_band();
}
static void
test_band_set_addr(void)
{
struct ftl_lba_map *lba_map;
struct ftl_addr addr;
uint64_t offset = 0;
setup_band();
lba_map = &g_band->lba_map;
addr = addr_from_punit(0);
addr.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
CU_ASSERT_EQUAL(lba_map->num_vld, 0);
offset = test_offset_from_addr(addr, g_band);
ftl_band_set_addr(g_band, TEST_LBA, addr);
CU_ASSERT_EQUAL(lba_map->num_vld, 1);
CU_ASSERT_EQUAL(lba_map->map[offset], TEST_LBA);
CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset));
addr.offset += g_geo.zone_size;
offset = test_offset_from_addr(addr, g_band);
ftl_band_set_addr(g_band, TEST_LBA + 1, addr);
CU_ASSERT_EQUAL(lba_map->num_vld, 2);
CU_ASSERT_EQUAL(lba_map->map[offset], TEST_LBA + 1);
CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset));
addr.offset -= g_geo.zone_size;
offset = test_offset_from_addr(addr, g_band);
CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset));
cleanup_band();
}
static void
test_invalidate_addr(void)
{
struct ftl_lba_map *lba_map;
struct ftl_addr addr;
uint64_t offset[2];
setup_band();
lba_map = &g_band->lba_map;
addr = addr_from_punit(0);
addr.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
offset[0] = test_offset_from_addr(addr, g_band);
ftl_band_set_addr(g_band, TEST_LBA, addr);
CU_ASSERT_EQUAL(lba_map->num_vld, 1);
CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset[0]));
ftl_invalidate_addr(g_band->dev, addr);
CU_ASSERT_EQUAL(lba_map->num_vld, 0);
CU_ASSERT_FALSE(spdk_bit_array_get(lba_map->vld, offset[0]));
offset[0] = test_offset_from_addr(addr, g_band);
ftl_band_set_addr(g_band, TEST_LBA, addr);
addr.offset += g_geo.zone_size;
offset[1] = test_offset_from_addr(addr, g_band);
ftl_band_set_addr(g_band, TEST_LBA + 1, addr);
CU_ASSERT_EQUAL(lba_map->num_vld, 2);
CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset[0]));
CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset[1]));
ftl_invalidate_addr(g_band->dev, addr);
CU_ASSERT_EQUAL(lba_map->num_vld, 1);
CU_ASSERT_TRUE(spdk_bit_array_get(lba_map->vld, offset[0]));
CU_ASSERT_FALSE(spdk_bit_array_get(lba_map->vld, offset[1]));
cleanup_band();
}
static void
test_next_xfer_addr(void)
{
struct ftl_addr addr, result, expect;
setup_band();
/* Verify simple one block increment */
addr = addr_from_punit(0);
addr.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
expect = addr;
expect.offset += 1;
result = ftl_band_next_xfer_addr(g_band, addr, 1);
CU_ASSERT_EQUAL(result.offset, expect.offset);
/* Verify jumping between zones */
expect = addr_from_punit(1);
expect.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
result = ftl_band_next_xfer_addr(g_band, addr, g_dev->xfer_size);
CU_ASSERT_EQUAL(result.offset, expect.offset);
/* Verify jumping works with unaligned offsets */
expect = addr_from_punit(1);
expect.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev) + 3;
result = ftl_band_next_xfer_addr(g_band, addr, g_dev->xfer_size + 3);
CU_ASSERT_EQUAL(result.offset, expect.offset);
/* Verify jumping from last zone to the first one */
expect = addr_from_punit(0);
expect.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev) + g_dev->xfer_size;
addr = addr_from_punit(ftl_get_num_punits(g_dev) - 1);
addr.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
result = ftl_band_next_xfer_addr(g_band, addr, g_dev->xfer_size);
CU_ASSERT_EQUAL(result.offset, expect.offset);
/* Verify jumping from last zone to the first one with unaligned offset */
expect = addr_from_punit(0);
expect.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
expect.offset += g_dev->xfer_size + 2;
addr = addr_from_punit(ftl_get_num_punits(g_dev) - 1);
addr.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
result = ftl_band_next_xfer_addr(g_band, addr, g_dev->xfer_size + 2);
CU_ASSERT_EQUAL(result.offset, expect.offset);
/* Verify large offset spanning across the whole band multiple times */
expect = addr_from_punit(0);
expect.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
expect.offset += g_dev->xfer_size * 5 + 4;
addr = addr_from_punit(0);
addr.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
addr.offset += g_dev->xfer_size * 2 + 1;
result = ftl_band_next_xfer_addr(g_band, addr, 3 * g_dev->xfer_size *
ftl_get_num_punits(g_dev) + 3);
CU_ASSERT_EQUAL(result.offset, expect.offset);
/* Remove one zone and verify it's skipped properly */
g_band->zone_buf[1].info.state = SPDK_BDEV_ZONE_STATE_OFFLINE;
CIRCLEQ_REMOVE(&g_band->zones, &g_band->zone_buf[1], circleq);
g_band->num_zones--;
expect = addr_from_punit(2);
expect.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
expect.offset += g_dev->xfer_size * 5 + 4;
addr = addr_from_punit(0);
addr.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
addr.offset += g_dev->xfer_size * 2 + 1;
result = ftl_band_next_xfer_addr(g_band, addr, 3 * g_dev->xfer_size *
(ftl_get_num_punits(g_dev) - 1) + g_dev->xfer_size + 3);
CU_ASSERT_EQUAL(result.offset, expect.offset);
cleanup_band();
}
int
main(int argc, char **argv)
{
CU_pSuite suite = NULL;
unsigned int num_failures;
CU_set_error_action(CUEA_ABORT);
CU_initialize_registry();
suite = CU_add_suite("ftl_band_suite", NULL, NULL);
CU_ADD_TEST(suite, test_band_block_offset_from_addr_base);
CU_ADD_TEST(suite, test_band_block_offset_from_addr_offset);
CU_ADD_TEST(suite, test_band_addr_from_block_offset);
CU_ADD_TEST(suite, test_band_set_addr);
CU_ADD_TEST(suite, test_invalidate_addr);
CU_ADD_TEST(suite, test_next_xfer_addr);
CU_basic_set_mode(CU_BRM_VERBOSE);
CU_basic_run_tests();
num_failures = CU_get_number_of_failures();
CU_cleanup_registry();
return num_failures;
}

View File

@ -1,10 +0,0 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) Intel Corporation.
# All rights reserved.
#
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
TEST_FILE = ftl_io_ut.c
include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk

File diff suppressed because it is too large Load Diff

View File

@ -1,10 +0,0 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) Intel Corporation.
# All rights reserved.
#
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
TEST_FILE = ftl_md_ut.c
include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk

View File

@ -1,133 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) Intel Corporation.
* All rights reserved.
*/
#include "spdk/stdinc.h"
#include "spdk_cunit.h"
#include "common/lib/test_env.c"
#include "ftl/ftl_band.c"
#include "../common/utils.c"
#if defined(DEBUG)
DEFINE_STUB(ftl_band_validate_md, bool, (struct ftl_band *band), true);
#endif
DEFINE_STUB_V(ftl_apply_limits, (struct spdk_ftl_dev *dev));
DEFINE_STUB(ftl_io_init_internal, struct ftl_io *,
(const struct ftl_io_init_opts *opts), NULL);
DEFINE_STUB_V(ftl_io_read, (struct ftl_io *io));
DEFINE_STUB_V(ftl_io_write, (struct ftl_io *io));
DEFINE_STUB_V(ftl_reloc_add, (struct ftl_reloc *reloc, struct ftl_band *band, size_t offset,
size_t num_blocks, int prio, bool defrag));
struct base_bdev_geometry g_geo = {
.write_unit_size = 16,
.optimal_open_zones = 12,
.zone_size = 100,
.blockcnt = 1500 * 100 * 12,
};
static void
setup_band(struct ftl_band **band, const struct base_bdev_geometry *geo)
{
int rc;
struct spdk_ftl_dev *dev;
dev = test_init_ftl_dev(&g_geo);
*band = test_init_ftl_band(dev, 0, geo->zone_size);
rc = ftl_band_alloc_lba_map(*band);
SPDK_CU_ASSERT_FATAL(rc == 0);
(*band)->state = FTL_BAND_STATE_PREP;
ftl_band_clear_lba_map(*band);
}
static void
cleanup_band(struct ftl_band *band)
{
struct spdk_ftl_dev *dev = band->dev;
test_free_ftl_band(band);
test_free_ftl_dev(dev);
}
static void
test_md_unpack(void)
{
struct ftl_band *band;
struct ftl_lba_map *lba_map;
setup_band(&band, &g_geo);
lba_map = &band->lba_map;
SPDK_CU_ASSERT_FATAL(lba_map->dma_buf);
ftl_pack_head_md(band);
CU_ASSERT_EQUAL(ftl_unpack_head_md(band), FTL_MD_SUCCESS);
ftl_pack_tail_md(band);
CU_ASSERT_EQUAL(ftl_unpack_tail_md(band), FTL_MD_SUCCESS);
cleanup_band(band);
}
static void
test_md_unpack_fail(void)
{
struct ftl_band *band;
struct ftl_lba_map *lba_map;
struct ftl_md_hdr *hdr;
setup_band(&band, &g_geo);
lba_map = &band->lba_map;
SPDK_CU_ASSERT_FATAL(lba_map->dma_buf);
/* check crc */
ftl_pack_tail_md(band);
/* flip last bit of lba_map */
*((char *)lba_map->dma_buf + ftl_tail_md_num_blocks(band->dev) * FTL_BLOCK_SIZE - 1) ^= 0x1;
CU_ASSERT_EQUAL(ftl_unpack_tail_md(band), FTL_MD_INVALID_CRC);
/* check invalid version */
hdr = lba_map->dma_buf;
ftl_pack_tail_md(band);
hdr->ver++;
CU_ASSERT_EQUAL(ftl_unpack_tail_md(band), FTL_MD_INVALID_VER);
/* check wrong UUID */
ftl_pack_head_md(band);
hdr->uuid.u.raw[0] ^= 0x1;
CU_ASSERT_EQUAL(ftl_unpack_head_md(band), FTL_MD_NO_MD);
/* check invalid size */
ftl_pack_tail_md(band);
g_geo.zone_size--;
CU_ASSERT_EQUAL(ftl_unpack_tail_md(band), FTL_MD_INVALID_SIZE);
cleanup_band(band);
}
int
main(int argc, char **argv)
{
CU_pSuite suite = NULL;
unsigned int num_failures;
CU_set_error_action(CUEA_ABORT);
CU_initialize_registry();
suite = CU_add_suite("ftl_meta_suite", NULL, NULL);
CU_ADD_TEST(suite, test_md_unpack);
CU_ADD_TEST(suite, test_md_unpack_fail);
CU_basic_set_mode(CU_BRM_VERBOSE);
CU_basic_run_tests();
num_failures = CU_get_number_of_failures();
CU_cleanup_registry();
return num_failures;
}

View File

@ -1,10 +0,0 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) Intel Corporation.
# All rights reserved.
#
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
TEST_FILE = ftl_ppa_ut.c
include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk

View File

@ -1,198 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) Intel Corporation.
* All rights reserved.
*/
#include "spdk/stdinc.h"
#include "spdk_cunit.h"
#include "common/lib/test_env.c"
#include "ftl/ftl_core.h"
#define L2P_TABLE_SIZE 1024
static struct spdk_ftl_dev *g_dev;
DEFINE_STUB(spdk_bdev_desc_get_bdev, struct spdk_bdev *, (struct spdk_bdev_desc *desc), NULL);
uint64_t
spdk_bdev_get_zone_size(const struct spdk_bdev *bdev)
{
if (g_dev->addr_len > 32) {
return 1ULL << 32;
}
return 1024;
}
uint32_t
spdk_bdev_get_optimal_open_zones(const struct spdk_bdev *bdev)
{
return 100;
}
static struct spdk_ftl_dev *
test_alloc_dev(size_t size)
{
struct spdk_ftl_dev *dev;
dev = calloc(1, sizeof(*dev));
dev->num_lbas = L2P_TABLE_SIZE;
dev->l2p = calloc(L2P_TABLE_SIZE, size);
return dev;
}
static int
setup_l2p_32bit(void)
{
g_dev = test_alloc_dev(sizeof(uint32_t));
g_dev->addr_len = 24;
return 0;
}
static int
setup_l2p_64bit(void)
{
g_dev = test_alloc_dev(sizeof(uint64_t));
g_dev->addr_len = 63;
return 0;
}
static void
clean_l2p(void)
{
size_t l2p_elem_size;
if (ftl_addr_packed(g_dev)) {
l2p_elem_size = sizeof(uint32_t);
} else {
l2p_elem_size = sizeof(uint64_t);
}
memset(g_dev->l2p, 0, g_dev->num_lbas * l2p_elem_size);
}
static int
cleanup(void)
{
free(g_dev->l2p);
free(g_dev);
g_dev = NULL;
return 0;
}
static void
test_addr_pack32(void)
{
struct ftl_addr orig = {}, addr;
/* Check valid address transformation */
orig.offset = 4;
addr = ftl_addr_to_packed(g_dev, orig);
CU_ASSERT_TRUE(addr.offset <= UINT32_MAX);
CU_ASSERT_FALSE(addr.pack.cached);
addr = ftl_addr_from_packed(g_dev, addr);
CU_ASSERT_FALSE(ftl_addr_invalid(addr));
CU_ASSERT_EQUAL(addr.offset, orig.offset);
/* Check invalid address transformation */
orig = ftl_to_addr(FTL_ADDR_INVALID);
addr = ftl_addr_to_packed(g_dev, orig);
CU_ASSERT_TRUE(addr.offset <= UINT32_MAX);
addr = ftl_addr_from_packed(g_dev, addr);
CU_ASSERT_TRUE(ftl_addr_invalid(addr));
/* Check cached entry offset transformation */
orig.cached = 1;
orig.cache_offset = 1024;
addr = ftl_addr_to_packed(g_dev, orig);
CU_ASSERT_TRUE(addr.offset <= UINT32_MAX);
CU_ASSERT_TRUE(addr.pack.cached);
addr = ftl_addr_from_packed(g_dev, addr);
CU_ASSERT_FALSE(ftl_addr_invalid(addr));
CU_ASSERT_TRUE(ftl_addr_cached(addr));
CU_ASSERT_EQUAL(addr.offset, orig.offset);
clean_l2p();
}
static void
test_addr_invalid(void)
{
struct ftl_addr addr;
size_t i;
/* Set every other LBA as invalid */
for (i = 0; i < L2P_TABLE_SIZE; i += 2) {
ftl_l2p_set(g_dev, i, ftl_to_addr(FTL_ADDR_INVALID));
}
/* Check every even LBA is invalid while others are fine */
for (i = 0; i < L2P_TABLE_SIZE; ++i) {
addr = ftl_l2p_get(g_dev, i);
if (i % 2 == 0) {
CU_ASSERT_TRUE(ftl_addr_invalid(addr));
} else {
CU_ASSERT_FALSE(ftl_addr_invalid(addr));
}
}
clean_l2p();
}
static void
test_addr_cached(void)
{
struct ftl_addr addr;
size_t i;
/* Set every other LBA is cached */
for (i = 0; i < L2P_TABLE_SIZE; i += 2) {
addr.cached = 1;
addr.cache_offset = i;
ftl_l2p_set(g_dev, i, addr);
}
/* Check every even LBA is cached while others are not */
for (i = 0; i < L2P_TABLE_SIZE; ++i) {
addr = ftl_l2p_get(g_dev, i);
if (i % 2 == 0) {
CU_ASSERT_TRUE(ftl_addr_cached(addr));
CU_ASSERT_EQUAL(addr.cache_offset, i);
} else {
CU_ASSERT_FALSE(ftl_addr_cached(addr));
}
}
clean_l2p();
}
int
main(int argc, char **argv)
{
CU_pSuite suite32 = NULL, suite64 = NULL;
unsigned int num_failures;
CU_set_error_action(CUEA_ABORT);
CU_initialize_registry();
suite32 = CU_add_suite("ftl_addr32_suite", setup_l2p_32bit, cleanup);
suite64 = CU_add_suite("ftl_addr64_suite", setup_l2p_64bit, cleanup);
CU_ADD_TEST(suite32, test_addr_pack32);
CU_ADD_TEST(suite32, test_addr_invalid);
CU_ADD_TEST(suite32, test_addr_cached);
CU_ADD_TEST(suite64, test_addr_invalid);
CU_ADD_TEST(suite64, test_addr_cached);
CU_basic_set_mode(CU_BRM_VERBOSE);
CU_basic_run_tests();
num_failures = CU_get_number_of_failures();
CU_cleanup_registry();
return num_failures;
}

View File

@ -1,10 +0,0 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) Intel Corporation.
# All rights reserved.
#
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
TEST_FILE = ftl_reloc_ut.c
include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk

View File

@ -1,480 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) Intel Corporation.
* All rights reserved.
*/
#include "spdk/stdinc.h"
#include "spdk_cunit.h"
#include "common/lib/test_env.c"
#include "ftl/ftl_reloc.c"
#include "../common/utils.c"
#define MAX_ACTIVE_RELOCS 5
#define MAX_RELOC_QDEPTH 31
struct base_bdev_geometry g_geo = {
.write_unit_size = 16,
.optimal_open_zones = 12,
.zone_size = 100,
.blockcnt = 1500 * 100 * 12,
};
DEFINE_STUB(ftl_dev_tail_md_disk_size, size_t, (const struct spdk_ftl_dev *dev), 1);
DEFINE_STUB(ftl_addr_is_written, bool, (struct ftl_band *band, struct ftl_addr addr), true);
DEFINE_STUB_V(ftl_band_set_state, (struct ftl_band *band, enum ftl_band_state state));
DEFINE_STUB_V(ftl_free_io, (struct ftl_io *io));
#if defined(DEBUG)
DEFINE_STUB_V(ftl_trace_lba_io_init, (struct spdk_ftl_dev *dev, const struct ftl_io *io));
#endif
int
ftl_band_alloc_lba_map(struct ftl_band *band)
{
struct spdk_ftl_dev *dev = band->dev;
ftl_band_acquire_lba_map(band);
band->lba_map.map = spdk_mempool_get(dev->lba_pool);
return 0;
}
void
ftl_band_release_lba_map(struct ftl_band *band)
{
struct spdk_ftl_dev *dev = band->dev;
band->lba_map.ref_cnt--;
spdk_mempool_put(dev->lba_pool, band->lba_map.map);
band->lba_map.map = NULL;
}
void
ftl_band_acquire_lba_map(struct ftl_band *band)
{
band->lba_map.ref_cnt++;
}
size_t
ftl_lba_map_num_blocks(const struct spdk_ftl_dev *dev)
{
return spdk_divide_round_up(ftl_get_num_blocks_in_band(dev) * sizeof(uint64_t), FTL_BLOCK_SIZE);
}
int
ftl_band_read_lba_map(struct ftl_band *band, size_t offset,
size_t num_blocks, ftl_io_fn fn, void *ctx)
{
fn(ctx, ctx, 0);
return 0;
}
uint64_t
ftl_band_block_offset_from_addr(struct ftl_band *band, struct ftl_addr addr)
{
return test_offset_from_addr(addr, band);
}
struct ftl_addr
ftl_band_addr_from_block_offset(struct ftl_band *band, uint64_t block_off)
{
struct ftl_addr addr = {};
addr.offset = block_off + band->id * ftl_get_num_blocks_in_band(band->dev);
return addr;
}
void
ftl_io_read(struct ftl_io *io)
{
io->cb_fn(io, io->cb_ctx, 0);
free(io);
}
void
ftl_io_write(struct ftl_io *io)
{
io->cb_fn(io, io->cb_ctx, 0);
free(io->lba.vector);
free(io);
}
struct ftl_io *
ftl_io_init_internal(const struct ftl_io_init_opts *opts)
{
struct ftl_io *io = opts->io;
if (!io) {
io = calloc(1, opts->size);
}
SPDK_CU_ASSERT_FATAL(io != NULL);
io->dev = opts->dev;
io->band = opts->band;
io->flags = opts->flags;
io->cb_fn = opts->cb_fn;
io->cb_ctx = io;
io->num_blocks = opts->num_blocks;
memcpy(&io->iov, &opts->iovs, sizeof(io->iov));
io->iov_cnt = opts->iovcnt;
if (opts->flags & FTL_IO_VECTOR_LBA) {
io->lba.vector = calloc(io->num_blocks, sizeof(uint64_t));
SPDK_CU_ASSERT_FATAL(io->lba.vector != NULL);
}
return io;
}
struct ftl_io *
ftl_io_alloc(struct spdk_io_channel *ch)
{
size_t io_size = sizeof(struct ftl_md_io);
return malloc(io_size);
}
void
ftl_io_reinit(struct ftl_io *io, ftl_io_fn fn, void *ctx, int flags, int type)
{
io->cb_fn = fn;
io->cb_ctx = ctx;
io->type = type;
}
static void
single_reloc_move(struct ftl_band_reloc *breloc)
{
/* Process read */
ftl_process_reloc(breloc);
/* Process lba map read */
ftl_process_reloc(breloc);
/* Process write */
ftl_process_reloc(breloc);
}
static void
add_to_active_queue(struct ftl_reloc *reloc, struct ftl_band_reloc *breloc)
{
TAILQ_REMOVE(&reloc->pending_queue, breloc, entry);
breloc->state = FTL_BAND_RELOC_STATE_ACTIVE;
TAILQ_INSERT_HEAD(&reloc->active_queue, breloc, entry);
}
static void
setup_reloc(struct spdk_ftl_dev **_dev, struct ftl_reloc **_reloc,
const struct base_bdev_geometry *geo)
{
size_t i;
struct spdk_ftl_dev *dev;
struct ftl_reloc *reloc;
dev = test_init_ftl_dev(geo);
dev->conf.max_active_relocs = MAX_ACTIVE_RELOCS;
dev->conf.max_reloc_qdepth = MAX_RELOC_QDEPTH;
SPDK_CU_ASSERT_FATAL(ftl_get_num_bands(dev) > 0);
for (i = 0; i < ftl_get_num_bands(dev); ++i) {
test_init_ftl_band(dev, i, geo->zone_size);
}
reloc = ftl_reloc_init(dev);
dev->reloc = reloc;
CU_ASSERT_PTR_NOT_NULL_FATAL(reloc);
ftl_reloc_resume(reloc);
*_dev = dev;
*_reloc = reloc;
}
static void
cleanup_reloc(struct spdk_ftl_dev *dev, struct ftl_reloc *reloc)
{
size_t i;
for (i = 0; i < ftl_get_num_bands(reloc->dev); ++i) {
SPDK_CU_ASSERT_FATAL(reloc->brelocs[i].state == FTL_BAND_RELOC_STATE_INACTIVE);
}
ftl_reloc_free(reloc);
for (i = 0; i < ftl_get_num_bands(dev); ++i) {
test_free_ftl_band(&dev->bands[i]);
}
test_free_ftl_dev(dev);
}
static void
set_band_valid_map(struct ftl_band *band, size_t offset, size_t num_blocks)
{
struct ftl_lba_map *lba_map = &band->lba_map;
size_t i;
SPDK_CU_ASSERT_FATAL(lba_map != NULL);
for (i = offset; i < offset + num_blocks; ++i) {
spdk_bit_array_set(lba_map->vld, i);
lba_map->num_vld++;
}
}
static void
test_reloc_iter_full(void)
{
size_t num_blocks, num_iters, reminder, i;
struct spdk_ftl_dev *dev;
struct ftl_reloc *reloc;
struct ftl_band_reloc *breloc;
struct ftl_band *band;
struct ftl_addr addr;
setup_reloc(&dev, &reloc, &g_geo);
g_geo.zone_size = 100;
breloc = &reloc->brelocs[0];
band = breloc->band;
set_band_valid_map(band, 0, ftl_get_num_blocks_in_band(dev));
ftl_reloc_add(reloc, band, 0, ftl_get_num_blocks_in_band(dev), 0, true);
CU_ASSERT_EQUAL(breloc->num_blocks, ftl_get_num_blocks_in_band(dev));
num_iters = ftl_get_num_punits(dev) *
(ftl_get_num_blocks_in_zone(dev) / reloc->xfer_size);
for (i = 0; i < num_iters; i++) {
num_blocks = ftl_reloc_next_blocks(breloc, &addr);
CU_ASSERT_EQUAL(num_blocks, reloc->xfer_size);
}
num_iters = ftl_get_num_punits(dev);
/* ftl_reloc_next_blocks is searching for maximum xfer_size */
/* contiguous valid logic blocks in zone, so we can end up */
/* with some reminder if number of logical blocks in zone */
/* is not divisible by xfer_size */
reminder = ftl_get_num_blocks_in_zone(dev) % reloc->xfer_size;
for (i = 0; i < num_iters; i++) {
num_blocks = ftl_reloc_next_blocks(breloc, &addr);
CU_ASSERT_EQUAL(reminder, num_blocks);
}
/* num_blocks should remain intact since all the blocks are valid */
CU_ASSERT_EQUAL(breloc->num_blocks, ftl_get_num_blocks_in_band(dev));
breloc->state = FTL_BAND_RELOC_STATE_INACTIVE;
cleanup_reloc(dev, reloc);
}
static void
test_reloc_empty_band(void)
{
struct spdk_ftl_dev *dev;
struct ftl_reloc *reloc;
struct ftl_band_reloc *breloc;
struct ftl_band *band;
setup_reloc(&dev, &reloc, &g_geo);
breloc = &reloc->brelocs[0];
band = breloc->band;
ftl_reloc_add(reloc, band, 0, ftl_get_num_blocks_in_band(dev), 0, true);
CU_ASSERT_EQUAL(breloc->num_blocks, 0);
cleanup_reloc(dev, reloc);
}
static void
test_reloc_full_band(void)
{
struct spdk_ftl_dev *dev;
struct ftl_reloc *reloc;
struct ftl_band_reloc *breloc;
struct ftl_band *band;
size_t num_moves, num_iters, num_block, i;
setup_reloc(&dev, &reloc, &g_geo);
breloc = &reloc->brelocs[0];
band = breloc->band;
num_moves = MAX_RELOC_QDEPTH * reloc->xfer_size;
num_iters = ftl_get_num_blocks_in_band(dev) / num_moves;
set_band_valid_map(band, 0, ftl_get_num_blocks_in_band(dev));
ftl_reloc_add(reloc, band, 0, ftl_get_num_blocks_in_band(dev), 0, true);
CU_ASSERT_EQUAL(breloc->num_blocks, ftl_get_num_blocks_in_band(dev));
ftl_reloc_prep(breloc);
add_to_active_queue(reloc, breloc);
for (i = 1; i <= num_iters; ++i) {
single_reloc_move(breloc);
num_block = ftl_get_num_blocks_in_band(dev) - (i * num_moves);
CU_ASSERT_EQUAL(breloc->num_blocks, num_block);
}
/* Process reminder blocks */
single_reloc_move(breloc);
/* Drain move queue */
ftl_reloc_process_moves(breloc);
CU_ASSERT_EQUAL(breloc->num_blocks, 0);
CU_ASSERT_TRUE(ftl_reloc_done(breloc));
ftl_reloc_release(breloc);
cleanup_reloc(dev, reloc);
}
static void
test_reloc_scatter_band(void)
{
struct spdk_ftl_dev *dev;
struct ftl_reloc *reloc;
struct ftl_band_reloc *breloc;
struct ftl_band *band;
size_t num_iters, i;
setup_reloc(&dev, &reloc, &g_geo);
breloc = &reloc->brelocs[0];
band = breloc->band;
num_iters = spdk_divide_round_up(ftl_get_num_blocks_in_band(dev), MAX_RELOC_QDEPTH * 2);
for (i = 0; i < ftl_get_num_blocks_in_band(dev); ++i) {
if (i % 2) {
set_band_valid_map(band, i, 1);
}
}
ftl_reloc_add(reloc, band, 0, ftl_get_num_blocks_in_band(dev), 0, true);
ftl_reloc_prep(breloc);
add_to_active_queue(reloc, breloc);
CU_ASSERT_EQUAL(breloc->num_blocks, ftl_get_num_blocks_in_band(dev));
for (i = 0; i < num_iters ; ++i) {
single_reloc_move(breloc);
}
ftl_process_reloc(breloc);
CU_ASSERT_EQUAL(breloc->num_blocks, 0);
CU_ASSERT_TRUE(ftl_reloc_done(breloc));
cleanup_reloc(dev, reloc);
}
static void
test_reloc_zone(void)
{
struct spdk_ftl_dev *dev;
struct ftl_reloc *reloc;
struct ftl_band_reloc *breloc;
struct ftl_band *band;
size_t num_io, num_iters, num_block, i;
setup_reloc(&dev, &reloc, &g_geo);
breloc = &reloc->brelocs[0];
band = breloc->band;
/* High priority band have allocated lba map */
band->high_prio = 1;
ftl_band_alloc_lba_map(band);
num_io = MAX_RELOC_QDEPTH * reloc->xfer_size;
num_iters = ftl_get_num_blocks_in_zone(dev) / num_io;
set_band_valid_map(band, 0, ftl_get_num_blocks_in_band(dev));
ftl_reloc_add(reloc, band, ftl_get_num_blocks_in_zone(dev) * 3,
ftl_get_num_blocks_in_zone(dev), 1, false);
add_to_active_queue(reloc, breloc);
CU_ASSERT_EQUAL(breloc->num_blocks, ftl_get_num_blocks_in_zone(dev));
for (i = 1; i <= num_iters ; ++i) {
single_reloc_move(breloc);
num_block = ftl_get_num_blocks_in_zone(dev) - (i * num_io);
CU_ASSERT_EQUAL(breloc->num_blocks, num_block);
}
/* In case num_blocks_in_zone % num_io != 0 one extra iteration is needed */
single_reloc_move(breloc);
/* Drain move queue */
ftl_reloc_process_moves(breloc);
CU_ASSERT_EQUAL(breloc->num_blocks, 0);
CU_ASSERT_TRUE(ftl_reloc_done(breloc));
ftl_reloc_release(breloc);
cleanup_reloc(dev, reloc);
}
static void
test_reloc_single_block(void)
{
struct spdk_ftl_dev *dev;
struct ftl_reloc *reloc;
struct ftl_band_reloc *breloc;
struct ftl_band *band;
#define TEST_RELOC_OFFSET 6
setup_reloc(&dev, &reloc, &g_geo);
breloc = &reloc->brelocs[0];
band = breloc->band;
set_band_valid_map(band, TEST_RELOC_OFFSET, 1);
ftl_reloc_add(reloc, band, TEST_RELOC_OFFSET, 1, 0, false);
SPDK_CU_ASSERT_FATAL(breloc == TAILQ_FIRST(&reloc->pending_queue));
ftl_reloc_prep(breloc);
add_to_active_queue(reloc, breloc);
CU_ASSERT_EQUAL(breloc->num_blocks, 1);
single_reloc_move(breloc);
/* Drain move queue */
ftl_reloc_process_moves(breloc);
CU_ASSERT_EQUAL(breloc->num_blocks, 0);
CU_ASSERT_TRUE(ftl_reloc_done(breloc));
ftl_reloc_release(breloc);
cleanup_reloc(dev, reloc);
}
int
main(int argc, char **argv)
{
CU_pSuite suite = NULL;
unsigned int num_failures;
CU_set_error_action(CUEA_ABORT);
CU_initialize_registry();
suite = CU_add_suite("ftl_band_suite", NULL, NULL);
CU_ADD_TEST(suite, test_reloc_iter_full);
CU_ADD_TEST(suite, test_reloc_empty_band);
CU_ADD_TEST(suite, test_reloc_full_band);
CU_ADD_TEST(suite, test_reloc_scatter_band);
CU_ADD_TEST(suite, test_reloc_zone);
CU_ADD_TEST(suite, test_reloc_single_block);
CU_basic_set_mode(CU_BRM_VERBOSE);
CU_basic_run_tests();
num_failures = CU_get_number_of_failures();
CU_cleanup_registry();
return num_failures;
}

View File

@ -1,10 +0,0 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) Intel Corporation.
# All rights reserved.
#
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
TEST_FILE = ftl_wptr_ut.c
include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk

View File

@ -1,300 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) Intel Corporation.
* All rights reserved.
*/
#include "spdk/stdinc.h"
#include "spdk_cunit.h"
#include "common/lib/test_env.c"
#include "ftl/ftl_core.c"
#include "ftl/ftl_band.c"
#include "ftl/ftl_init.c"
#include "../common/utils.c"
struct base_bdev_geometry g_geo = {
.write_unit_size = 16,
.optimal_open_zones = 12,
.zone_size = 128,
.blockcnt = 20 * 128 * 12,
};
#if defined(DEBUG)
DEFINE_STUB(ftl_band_validate_md, bool, (struct ftl_band *band), true);
DEFINE_STUB_V(ftl_trace_limits, (struct spdk_ftl_dev *dev, int limit, size_t num_free));
DEFINE_STUB_V(ftl_trace_completion, (struct spdk_ftl_dev *dev, const struct ftl_io *io,
enum ftl_trace_completion completion));
DEFINE_STUB_V(ftl_trace_defrag_band, (struct spdk_ftl_dev *dev, const struct ftl_band *band));
DEFINE_STUB_V(ftl_trace_wbuf_fill, (struct spdk_ftl_dev *dev, const struct ftl_io *io));
DEFINE_STUB_V(ftl_trace_wbuf_pop, (struct spdk_ftl_dev *dev, const struct ftl_wbuf_entry *entry));
DEFINE_STUB_V(ftl_trace_write_band, (struct spdk_ftl_dev *dev, const struct ftl_band *band));
DEFINE_STUB_V(ftl_trace_submission, (struct spdk_ftl_dev *dev, const struct ftl_io *io,
struct ftl_addr addr, size_t addr_cnt));
#endif
#if defined(FTL_META_DEBUG)
DEFINE_STUB_V(ftl_dev_dump_bands, (struct spdk_ftl_dev *dev));
#endif
#if defined(FTL_DUMP_STATS)
DEFINE_STUB_V(ftl_dev_dump_stats, (const struct spdk_ftl_dev *dev));
#endif
DEFINE_STUB_V(ftl_io_call_foreach_child,
(struct ftl_io *io, int (*callback)(struct ftl_io *)));
DEFINE_STUB(ftl_io_current_lba, uint64_t, (const struct ftl_io *io), 0);
DEFINE_STUB_V(ftl_io_dec_req, (struct ftl_io *io));
DEFINE_STUB_V(ftl_io_free, (struct ftl_io *io));
DEFINE_STUB(ftl_io_get_lba, uint64_t,
(const struct ftl_io *io, size_t offset), 0);
DEFINE_STUB_V(ftl_io_inc_req, (struct ftl_io *io));
DEFINE_STUB(ftl_io_iovec_addr, void *, (struct ftl_io *io), NULL);
DEFINE_STUB(ftl_io_iovec_len_left, size_t, (struct ftl_io *io), 0);
DEFINE_STUB_V(ftl_io_fail, (struct ftl_io *io, int status));
DEFINE_STUB(ftl_io_init_internal, struct ftl_io *,
(const struct ftl_io_init_opts *opts), NULL);
DEFINE_STUB_V(ftl_io_reset, (struct ftl_io *io));
DEFINE_STUB(ftl_iovec_num_blocks, size_t,
(struct iovec *iov, size_t iov_cnt), 0);
DEFINE_STUB_V(ftl_io_process_error, (struct ftl_io *io, const struct spdk_nvme_cpl *status));
DEFINE_STUB_V(ftl_io_shrink_iovec, (struct ftl_io *io, size_t num_blocks));
DEFINE_STUB(ftl_io_wbuf_init, struct ftl_io *,
(struct spdk_ftl_dev *dev, struct ftl_addr addr,
struct ftl_band *band, struct ftl_batch *batch, ftl_io_fn cb), NULL);
DEFINE_STUB(ftl_io_user_init, struct ftl_io *,
(struct spdk_io_channel *ioch, uint64_t lba, size_t num_blocks,
struct iovec *iov, size_t iov_cnt, spdk_ftl_fn cb_fn,
void *cb_arg, int type), NULL);
DEFINE_STUB(ftl_reloc, bool, (struct ftl_reloc *reloc), false);
DEFINE_STUB_V(ftl_reloc_add, (struct ftl_reloc *reloc, struct ftl_band *band, size_t offset,
size_t num_blocks, int prio, bool defrag));
DEFINE_STUB_V(ftl_reloc_free, (struct ftl_reloc *reloc));
DEFINE_STUB_V(ftl_reloc_halt, (struct ftl_reloc *reloc));
DEFINE_STUB(ftl_reloc_init, struct ftl_reloc *, (struct spdk_ftl_dev *dev), NULL);
DEFINE_STUB(ftl_reloc_is_defrag_active, bool, (const struct ftl_reloc *reloc), false);
DEFINE_STUB(ftl_reloc_is_halted, bool, (const struct ftl_reloc *reloc), false);
DEFINE_STUB_V(ftl_reloc_resume, (struct ftl_reloc *reloc));
DEFINE_STUB(ftl_restore_device, int,
(struct ftl_restore *restore, ftl_restore_fn cb, void *cb_arg), 0);
DEFINE_STUB(ftl_restore_md, int,
(struct spdk_ftl_dev *dev, ftl_restore_fn cb, void *cb_arg), 0);
DEFINE_STUB_V(ftl_restore_nv_cache,
(struct ftl_restore *restore, ftl_restore_fn cb, void *cb_arg));
DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
DEFINE_STUB(spdk_bdev_get_block_size, uint32_t, (const struct spdk_bdev *bdev), 512);
DEFINE_STUB(spdk_bdev_get_buf_align, size_t, (const struct spdk_bdev *bdev), 64);
DEFINE_STUB(spdk_bdev_get_by_name, struct spdk_bdev *, (const char *bdev_name), NULL);
DEFINE_STUB(spdk_bdev_get_dif_type, enum spdk_dif_type,
(const struct spdk_bdev *bdev), 0);
DEFINE_STUB(spdk_bdev_get_md_size, uint32_t, (const struct spdk_bdev *bdev), 0);
DEFINE_STUB(spdk_bdev_get_media_events, size_t,
(struct spdk_bdev_desc *bdev_desc, struct spdk_bdev_media_event *events,
size_t max_events), 0);
DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "test");
DEFINE_STUB(spdk_bdev_get_num_blocks, uint64_t, (const struct spdk_bdev *bdev), 0);
DEFINE_STUB(spdk_bdev_get_write_unit_size, uint32_t,
(const struct spdk_bdev *bdev), 0);
DEFINE_STUB(spdk_bdev_get_zone_info, int,
(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
uint64_t zone_id, size_t num_zones, struct spdk_bdev_zone_info *info,
spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
DEFINE_STUB(spdk_bdev_io_get_append_location, uint64_t, (struct spdk_bdev_io *bdev_io), 0);
DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
enum spdk_bdev_io_type io_type), true);
DEFINE_STUB(spdk_bdev_is_md_separate, bool, (const struct spdk_bdev *bdev), false);
DEFINE_STUB(spdk_bdev_is_zoned, bool, (const struct spdk_bdev *bdev), false);
DEFINE_STUB(spdk_bdev_module_claim_bdev, int,
(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
struct spdk_bdev_module *module), 0);
DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev));
DEFINE_STUB(spdk_bdev_open_ext, int,
(const char *bdev_name, bool write, spdk_bdev_event_cb_t event_cb,
void *event_ctx, struct spdk_bdev_desc **desc), 0);
DEFINE_STUB(spdk_bdev_read_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
void *buf, uint64_t offset_blocks, uint64_t num_blocks,
spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
DEFINE_STUB(spdk_bdev_write_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
void *buf, uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
void *cb_arg), 0);
DEFINE_STUB(spdk_bdev_write_blocks_with_md, int, (struct spdk_bdev_desc *desc,
struct spdk_io_channel *ch, void *buf, void *md, uint64_t offset_blocks,
uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
DEFINE_STUB(spdk_bdev_write_zeroes_blocks, int,
(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
uint64_t offset_blocks, uint64_t num_blocks,
spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
DEFINE_STUB(spdk_bdev_writev_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
DEFINE_STUB(spdk_bdev_zone_appendv, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
struct iovec *iov, int iovcnt, uint64_t zone_id, uint64_t num_blocks,
spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
DEFINE_STUB(spdk_bdev_zone_management, int, (struct spdk_bdev_desc *desc,
struct spdk_io_channel *ch,
uint64_t zone_id, enum spdk_bdev_zone_action action,
spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
DEFINE_STUB(spdk_mempool_create_ctor, struct spdk_mempool *,
(const char *name, size_t count, size_t ele_size, size_t cache_size,
int socket_id, spdk_mempool_obj_cb_t *obj_init, void *obj_init_arg), NULL);
DEFINE_STUB(spdk_mempool_obj_iter, uint32_t,
(struct spdk_mempool *mp, spdk_mempool_obj_cb_t obj_cb, void *obj_cb_arg), 0);
#ifdef SPDK_CONFIG_PMDK
DEFINE_STUB_V(pmem_persist, (const void *addr, size_t len));
DEFINE_STUB(pmem_map_file, void *,
(const char *path, size_t len, int flags, mode_t mode,
size_t *mapped_lenp, int *is_pmemp), NULL);
DEFINE_STUB(pmem_unmap, int, (void *addr, size_t len), 0);
DEFINE_STUB(pmem_memset_persist, void *, (void *pmemdest, int c, size_t len), NULL);
#endif
struct spdk_io_channel *
spdk_bdev_get_io_channel(struct spdk_bdev_desc *bdev_desc)
{
return spdk_get_io_channel(bdev_desc);
}
struct ftl_io *
ftl_io_erase_init(struct ftl_band *band, size_t num_blocks, ftl_io_fn cb)
{
struct ftl_io *io;
io = calloc(1, sizeof(struct ftl_io));
SPDK_CU_ASSERT_FATAL(io != NULL);
io->dev = band->dev;
io->band = band;
io->cb_fn = cb;
io->num_blocks = 1;
return io;
}
void
ftl_io_advance(struct ftl_io *io, size_t num_blocks)
{
io->pos += num_blocks;
}
void
ftl_io_complete(struct ftl_io *io)
{
io->cb_fn(io, NULL, 0);
free(io);
}
static void
setup_wptr_test(struct spdk_ftl_dev **dev, const struct base_bdev_geometry *geo)
{
struct spdk_ftl_dev *t_dev;
struct _ftl_io_channel *_ioch;
size_t i;
t_dev = test_init_ftl_dev(geo);
for (i = 0; i < ftl_get_num_bands(t_dev); ++i) {
test_init_ftl_band(t_dev, i, geo->zone_size);
t_dev->bands[i].state = FTL_BAND_STATE_CLOSED;
ftl_band_set_state(&t_dev->bands[i], FTL_BAND_STATE_FREE);
}
_ioch = (struct _ftl_io_channel *)(t_dev->ioch + 1);
_ioch->ioch = calloc(1, sizeof(*_ioch->ioch));
SPDK_CU_ASSERT_FATAL(_ioch->ioch != NULL);
*dev = t_dev;
}
static void
cleanup_wptr_test(struct spdk_ftl_dev *dev)
{
struct _ftl_io_channel *_ioch;
size_t i;
for (i = 0; i < ftl_get_num_bands(dev); ++i) {
dev->bands[i].lba_map.segments = NULL;
test_free_ftl_band(&dev->bands[i]);
}
_ioch = (struct _ftl_io_channel *)(dev->ioch + 1);
free(_ioch->ioch);
test_free_ftl_dev(dev);
}
static void
test_wptr(void)
{
struct spdk_ftl_dev *dev;
struct ftl_wptr *wptr;
struct ftl_band *band;
struct ftl_io io = { 0 };
size_t xfer_size;
size_t zone, block, offset, i;
int rc;
setup_wptr_test(&dev, &g_geo);
xfer_size = dev->xfer_size;
ftl_add_wptr(dev);
for (i = 0; i < ftl_get_num_bands(dev); ++i) {
wptr = LIST_FIRST(&dev->wptr_list);
band = wptr->band;
ftl_band_set_state(band, FTL_BAND_STATE_OPENING);
ftl_band_set_state(band, FTL_BAND_STATE_OPEN);
io.band = band;
io.dev = dev;
for (block = 0, offset = 0; block < ftl_get_num_blocks_in_zone(dev) / xfer_size; ++block) {
for (zone = 0; zone < band->num_zones; ++zone) {
CU_ASSERT_EQUAL(wptr->offset, offset);
ftl_wptr_advance(wptr, xfer_size);
offset += xfer_size;
}
}
CU_ASSERT_EQUAL(band->state, FTL_BAND_STATE_FULL);
ftl_band_set_state(band, FTL_BAND_STATE_CLOSING);
/* Call the metadata completion cb to force band state change */
/* and removal of the actual wptr */
ftl_md_write_cb(&io, NULL, 0);
CU_ASSERT_EQUAL(band->state, FTL_BAND_STATE_CLOSED);
CU_ASSERT_TRUE(LIST_EMPTY(&dev->wptr_list));
rc = ftl_add_wptr(dev);
/* There are no free bands during the last iteration, so */
/* there'll be no new wptr allocation */
if (i == (ftl_get_num_bands(dev) - 1)) {
CU_ASSERT_EQUAL(rc, -1);
} else {
CU_ASSERT_EQUAL(rc, 0);
}
}
cleanup_wptr_test(dev);
}
int
main(int argc, char **argv)
{
CU_pSuite suite = NULL;
unsigned int num_failures;
CU_set_error_action(CUEA_ABORT);
CU_initialize_registry();
suite = CU_add_suite("ftl_wptr_suite", NULL, NULL);
CU_ADD_TEST(suite, test_wptr);
CU_basic_set_mode(CU_BRM_VERBOSE);
CU_basic_run_tests();
num_failures = CU_get_number_of_failures();
CU_cleanup_registry();
return num_failures;
}

View File

@ -44,15 +44,6 @@ function unittest_event() {
$valgrind $testdir/lib/event/reactor.c/reactor_ut
}
function unittest_ftl() {
$valgrind $testdir/lib/ftl/ftl_ppa/ftl_ppa_ut
$valgrind $testdir/lib/ftl/ftl_band.c/ftl_band_ut
$valgrind $testdir/lib/ftl/ftl_reloc.c/ftl_reloc_ut
$valgrind $testdir/lib/ftl/ftl_wptr/ftl_wptr_ut
$valgrind $testdir/lib/ftl/ftl_md/ftl_md_ut
$valgrind $testdir/lib/ftl/ftl_io.c/ftl_io_ut
}
function unittest_iscsi() {
$valgrind $testdir/lib/iscsi/conn.c/conn_ut
$valgrind $testdir/lib/iscsi/param.c/param_ut
@ -215,9 +206,6 @@ fi
run_test "unittest_blob_blobfs" unittest_blob
run_test "unittest_event" unittest_event
if [ $(uname -s) = Linux ]; then
run_test "unittest_ftl" unittest_ftl
fi
run_test "unittest_accel" $valgrind $testdir/lib/accel/accel.c/accel_engine_ut
run_test "unittest_ioat" $valgrind $testdir/lib/ioat/ioat.c/ioat_ut