FTL: Initial nv cache structure

Signed-off-by: Kozlowski Mateusz <mateusz.kozlowski@intel.com>
Signed-off-by: Artur Paszkiewicz <artur.paszkiewicz@intel.com>
Change-Id: Ie40cc25ed9bf28976a5ae6d6a67491f438152fca
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/13317
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Mellanox Build Bot
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Kozlowski Mateusz 2022-06-13 11:54:12 +02:00 committed by Tomasz Zawadzki
parent ae6a35256b
commit a68a12a478
20 changed files with 594 additions and 53 deletions

View File

@ -22,6 +22,7 @@ CFLAGS += -I.
FTL_SUBDIRS := mngt utils FTL_SUBDIRS := mngt utils
C_SRCS = ftl_core.c ftl_init.c ftl_layout.c ftl_debug.c ftl_io.c ftl_sb.c ftl_l2p.c ftl_l2p_flat.c C_SRCS = ftl_core.c ftl_init.c ftl_layout.c ftl_debug.c ftl_io.c ftl_sb.c ftl_l2p.c ftl_l2p_flat.c
C_SRCS += ftl_nv_cache.c
C_SRCS += mngt/ftl_mngt.c mngt/ftl_mngt_bdev.c mngt/ftl_mngt_shutdown.c mngt/ftl_mngt_startup.c C_SRCS += mngt/ftl_mngt.c mngt/ftl_mngt_bdev.c mngt/ftl_mngt_shutdown.c mngt/ftl_mngt_startup.c
C_SRCS += mngt/ftl_mngt_md.c mngt/ftl_mngt_misc.c mngt/ftl_mngt_ioch.c mngt/ftl_mngt_l2p.c C_SRCS += mngt/ftl_mngt_md.c mngt/ftl_mngt_misc.c mngt/ftl_mngt_ioch.c mngt/ftl_mngt_l2p.c
C_SRCS += utils/ftl_conf.c utils/ftl_md.c utils/ftl_mempool.c C_SRCS += utils/ftl_conf.c utils/ftl_md.c utils/ftl_mempool.c

View File

@ -17,7 +17,6 @@
#include "ftl_debug.h" #include "ftl_debug.h"
#include "ftl_internal.h" #include "ftl_internal.h"
#include "mngt/ftl_mngt.h" #include "mngt/ftl_mngt.h"
#include "utils/ftl_mempool.h"
size_t size_t
@ -33,6 +32,15 @@ ftl_shutdown_complete(struct spdk_ftl_dev *dev)
return false; return false;
} }
if (!ftl_nv_cache_is_halted(&dev->nv_cache)) {
ftl_nv_cache_halt(&dev->nv_cache);
return false;
}
if (!ftl_nv_cache_chunks_busy(&dev->nv_cache)) {
return false;
}
if (!ftl_l2p_is_halted(dev)) { if (!ftl_l2p_is_halted(dev)) {
ftl_l2p_halt(dev); ftl_l2p_halt(dev);
return false; return false;
@ -141,6 +149,7 @@ ftl_core_poller(void *ctx)
} }
ftl_process_io_queue(dev); ftl_process_io_queue(dev);
ftl_nv_cache_process(dev);
ftl_l2p_process(dev); ftl_l2p_process(dev);
if (io_activity_total_old != dev->io_activity_total) { if (io_activity_total_old != dev->io_activity_total) {

View File

@ -18,6 +18,7 @@
#include "ftl_internal.h" #include "ftl_internal.h"
#include "ftl_io.h" #include "ftl_io.h"
#include "ftl_nv_cache.h"
#include "ftl_layout.h" #include "ftl_layout.h"
#include "ftl_sb.h" #include "ftl_sb.h"
#include "ftl_l2p.h" #include "ftl_l2p.h"
@ -46,12 +47,6 @@ struct spdk_ftl_dev {
/* Underlying device */ /* Underlying device */
struct spdk_bdev_desc *base_bdev_desc; struct spdk_bdev_desc *base_bdev_desc;
/* Cache device */
struct spdk_bdev_desc *cache_bdev_desc;
/* Cache VSS metadata size */
uint64_t cache_md_size;
/* Cached properties of the underlying device */ /* Cached properties of the underlying device */
uint64_t num_blocks_in_band; uint64_t num_blocks_in_band;
uint64_t num_zones_in_band; uint64_t num_zones_in_band;
@ -70,6 +65,9 @@ struct spdk_ftl_dev {
/* Management process to be continued after IO device unregistration completes */ /* Management process to be continued after IO device unregistration completes */
struct ftl_mngt_process *unregister_process; struct ftl_mngt_process *unregister_process;
/* Non-volatile write buffer cache */
struct ftl_nv_cache nv_cache;
/* counters for poller busy, include /* counters for poller busy, include
1. nv cache read/write 1. nv cache read/write
2. metadata read/write 2. metadata read/write
@ -111,9 +109,6 @@ struct spdk_ftl_dev {
/* Underlying device IO channel */ /* Underlying device IO channel */
struct spdk_io_channel *base_ioch; struct spdk_io_channel *base_ioch;
/* Cache IO channel */
struct spdk_io_channel *cache_ioch;
/* Poller */ /* Poller */
struct spdk_poller *core_poller; struct spdk_poller *core_poller;

View File

@ -18,6 +18,7 @@
#include "ftl_core.h" #include "ftl_core.h"
#include "ftl_io.h" #include "ftl_io.h"
#include "ftl_debug.h" #include "ftl_debug.h"
#include "ftl_nv_cache.h"
#include "ftl_utils.h" #include "ftl_utils.h"
#include "mngt/ftl_mngt.h" #include "mngt/ftl_mngt.h"

View File

@ -27,4 +27,33 @@ typedef uint64_t ftl_addr;
/* Number of LBAs that could be stored in a single block */ /* Number of LBAs that could be stored in a single block */
#define FTL_NUM_LBA_IN_BLOCK (FTL_BLOCK_SIZE / sizeof(uint64_t)) #define FTL_NUM_LBA_IN_BLOCK (FTL_BLOCK_SIZE / sizeof(uint64_t))
/*
* Mapping of physical (actual location on disk) to logical (user's POV) addresses. Used in two main scenarios:
* - during relocation FTL needs to pin L2P pages (this allows to check which pages to pin) and move still valid blocks
* (valid map allows for preliminary elimination of invalid physical blocks, but user data could invalidate a location
* during read/write operation, so actual comparision against L2P needs to be done)
* - After dirty shutdown the state of the L2P is unknown and needs to be rebuilt - it is done by applying all P2L, taking
* into account ordering of user writes
*/
struct ftl_p2l_map {
/* Number of valid LBAs */
size_t num_valid;
/* P2L map's reference count, prevents premature release of resources during dirty shutdown recovery for open bands */
size_t ref_cnt;
/* P2L map (only valid for open/relocating bands) */
union {
uint64_t *band_map;
void *chunk_map;
};
/* DMA buffer for region's metadata entry */
union {
struct ftl_band_md *band_dma_md;
struct ftl_nv_cache_chunk_md *chunk_dma_md;
};
};
#endif /* FTL_INTERNAL_H */ #endif /* FTL_INTERNAL_H */

View File

@ -11,7 +11,6 @@
#include "ftl_io.h" #include "ftl_io.h"
#include "ftl_core.h" #include "ftl_core.h"
#include "ftl_debug.h" #include "ftl_debug.h"
#include "utils/ftl_mempool.h"
void void
ftl_io_inc_req(struct ftl_io *io) ftl_io_inc_req(struct ftl_io *io)

View File

@ -4,6 +4,7 @@
*/ */
#include "ftl_l2p.h" #include "ftl_l2p.h"
#include "ftl_nv_cache.h"
#include "ftl_l2p_flat.h" #include "ftl_l2p_flat.h"
#include "ftl_core.h" #include "ftl_core.h"

View File

@ -8,8 +8,13 @@
#include "ftl_core.h" #include "ftl_core.h"
#include "ftl_utils.h" #include "ftl_utils.h"
#include "ftl_layout.h" #include "ftl_layout.h"
#include "ftl_nv_cache.h"
#include "ftl_sb.h" #include "ftl_sb.h"
#define FTL_NV_CACHE_CHUNK_DATA_SIZE(blocks) ((uint64_t)blocks * FTL_BLOCK_SIZE)
#define FTL_NV_CACHE_CHUNK_SIZE(blocks) \
(FTL_NV_CACHE_CHUNK_DATA_SIZE(blocks) + (2 * FTL_NV_CACHE_CHUNK_MD_SIZE))
static inline float static inline float
blocks2mib(uint64_t blocks) blocks2mib(uint64_t blocks)
{ {
@ -105,9 +110,9 @@ get_num_user_lbas(struct spdk_ftl_dev *dev)
static void static void
set_region_bdev_nvc(struct ftl_layout_region *reg, struct spdk_ftl_dev *dev) set_region_bdev_nvc(struct ftl_layout_region *reg, struct spdk_ftl_dev *dev)
{ {
reg->bdev_desc = dev->cache_bdev_desc; reg->bdev_desc = dev->nv_cache.bdev_desc;
reg->ioch = dev->cache_ioch; reg->ioch = dev->nv_cache.cache_ioch;
reg->vss_blksz = dev->cache_md_size; reg->vss_blksz = dev->nv_cache.md_size;
} }
static void static void
@ -121,9 +126,9 @@ set_region_bdev_btm(struct ftl_layout_region *reg, struct spdk_ftl_dev *dev)
static int static int
setup_layout_nvc(struct spdk_ftl_dev *dev) setup_layout_nvc(struct spdk_ftl_dev *dev)
{ {
uint64_t offset = 0; uint64_t left, offset = 0;
struct ftl_layout *layout = &dev->layout; struct ftl_layout *layout = &dev->layout;
struct ftl_layout_region *region; struct ftl_layout_region *region, *mirror;
#ifdef SPDK_FTL_VSS_EMU #ifdef SPDK_FTL_VSS_EMU
/* Skip the already init`d VSS region */ /* Skip the already init`d VSS region */
@ -157,15 +162,69 @@ setup_layout_nvc(struct spdk_ftl_dev *dev)
goto error; goto error;
} }
/*
* Initialize NV Cache metadata
*/
if (offset >= layout->nvc.total_blocks) {
goto error;
}
left = layout->nvc.total_blocks - offset;
layout->nvc.chunk_data_blocks =
FTL_NV_CACHE_CHUNK_DATA_SIZE(ftl_get_num_blocks_in_zone(dev)) / FTL_BLOCK_SIZE;
layout->nvc.chunk_meta_size = FTL_NV_CACHE_CHUNK_MD_SIZE;
layout->nvc.chunk_count = (left * FTL_BLOCK_SIZE) /
FTL_NV_CACHE_CHUNK_SIZE(ftl_get_num_blocks_in_zone(dev));
layout->nvc.chunk_tail_md_num_blocks = ftl_nv_cache_chunk_tail_md_num_blocks(&dev->nv_cache);
if (0 == layout->nvc.chunk_count) {
goto error;
}
region = &layout->region[FTL_LAYOUT_REGION_TYPE_NVC_MD];
region->type = FTL_LAYOUT_REGION_TYPE_NVC_MD;
region->mirror_type = FTL_LAYOUT_REGION_TYPE_NVC_MD_MIRROR;
region->name = "nvc_md";
region->current.version = region->prev.version = FTL_NVC_VERSION_CURRENT;
region->current.offset = offset;
region->current.blocks = blocks_region(layout->nvc.chunk_count *
sizeof(struct ftl_nv_cache_chunk_md));
region->entry_size = sizeof(struct ftl_nv_cache_chunk_md) / FTL_BLOCK_SIZE;
region->num_entries = layout->nvc.chunk_count;
set_region_bdev_nvc(region, dev);
offset += region->current.blocks;
/*
* Initialize NV Cache metadata mirror
*/
mirror = &layout->region[FTL_LAYOUT_REGION_TYPE_NVC_MD_MIRROR];
*mirror = *region;
mirror->type = FTL_LAYOUT_REGION_TYPE_NVC_MD_MIRROR;
mirror->mirror_type = FTL_LAYOUT_REGION_TYPE_INVALID;
mirror->name = "nvc_md_mirror";
mirror->current.offset += region->current.blocks;
offset += mirror->current.blocks;
/*
* Initialize data region on NV cache
*/
if (offset >= layout->nvc.total_blocks) {
goto error;
}
region = &layout->region[FTL_LAYOUT_REGION_TYPE_DATA_NVC]; region = &layout->region[FTL_LAYOUT_REGION_TYPE_DATA_NVC];
region->type = FTL_LAYOUT_REGION_TYPE_DATA_NVC; region->type = FTL_LAYOUT_REGION_TYPE_DATA_NVC;
region->name = "data_nvc"; region->name = "data_nvc";
region->current.version = region->prev.version = 0; region->current.version = region->prev.version = 0;
region->current.offset = offset; region->current.offset = offset;
region->current.blocks = layout->nvc.total_blocks - offset; region->current.blocks = layout->nvc.chunk_count * layout->nvc.chunk_data_blocks;
set_region_bdev_nvc(region, dev); set_region_bdev_nvc(region, dev);
offset += region->current.blocks; offset += region->current.blocks;
left = layout->nvc.total_blocks - offset;
if (left > layout->nvc.chunk_data_blocks) {
FTL_ERRLOG(dev, "Error when setup NV cache layout\n");
return -1;
}
if (offset > layout->nvc.total_blocks) { if (offset > layout->nvc.total_blocks) {
FTL_ERRLOG(dev, "Error when setup NV cache layout\n"); FTL_ERRLOG(dev, "Error when setup NV cache layout\n");
goto error; goto error;
@ -241,7 +300,7 @@ ftl_layout_setup(struct spdk_ftl_dev *dev)
bdev = spdk_bdev_desc_get_bdev(dev->base_bdev_desc); bdev = spdk_bdev_desc_get_bdev(dev->base_bdev_desc);
layout->base.total_blocks = spdk_bdev_get_num_blocks(bdev); layout->base.total_blocks = spdk_bdev_get_num_blocks(bdev);
bdev = spdk_bdev_desc_get_bdev(dev->cache_bdev_desc); bdev = spdk_bdev_desc_get_bdev(dev->nv_cache.bdev_desc);
layout->nvc.total_blocks = spdk_bdev_get_num_blocks(bdev); layout->nvc.total_blocks = spdk_bdev_get_num_blocks(bdev);
/* Initialize mirrors types */ /* Initialize mirrors types */
@ -310,13 +369,13 @@ ftl_layout_setup_vss_emu(struct spdk_ftl_dev *dev)
region->current.version = region->prev.version = 0; region->current.version = region->prev.version = 0;
region->current.offset = 0; region->current.offset = 0;
bdev = spdk_bdev_desc_get_bdev(dev->cache_bdev_desc); bdev = spdk_bdev_desc_get_bdev(dev->nv_cache.bdev_desc);
layout->nvc.total_blocks = spdk_bdev_get_num_blocks(bdev); layout->nvc.total_blocks = spdk_bdev_get_num_blocks(bdev);
region->current.blocks = blocks_region(dev->cache_md_size * layout->nvc.total_blocks); region->current.blocks = blocks_region(dev->nv_cache.md_size * layout->nvc.total_blocks);
region->vss_blksz = 0; region->vss_blksz = 0;
region->bdev_desc = dev->cache_bdev_desc; region->bdev_desc = dev->nv_cache.bdev_desc;
region->ioch = dev->cache_ioch; region->ioch = dev->nv_cache.cache_ioch;
assert(region->bdev_desc != NULL); assert(region->bdev_desc != NULL);
assert(region->ioch != NULL); assert(region->ioch != NULL);
@ -350,8 +409,8 @@ ftl_layout_setup_superblock(struct spdk_ftl_dev *dev)
region->current.blocks = blocks_region(FTL_SUPERBLOCK_SIZE); region->current.blocks = blocks_region(FTL_SUPERBLOCK_SIZE);
region->vss_blksz = 0; region->vss_blksz = 0;
region->bdev_desc = dev->cache_bdev_desc; region->bdev_desc = dev->nv_cache.bdev_desc;
region->ioch = dev->cache_ioch; region->ioch = dev->nv_cache.cache_ioch;
assert(region->bdev_desc != NULL); assert(region->bdev_desc != NULL);
assert(region->ioch != NULL); assert(region->ioch != NULL);
@ -389,7 +448,7 @@ ftl_layout_dump(struct spdk_ftl_dev *dev)
int i; int i;
FTL_NOTICELOG(dev, "NV cache layout:\n"); FTL_NOTICELOG(dev, "NV cache layout:\n");
for (i = 0; i < FTL_LAYOUT_REGION_TYPE_MAX; ++i) { for (i = 0; i < FTL_LAYOUT_REGION_TYPE_MAX; ++i) {
if (layout->region[i].bdev_desc == dev->cache_bdev_desc) { if (layout->region[i].bdev_desc == dev->nv_cache.bdev_desc) {
dump_region(dev, &layout->region[i]); dump_region(dev, &layout->region[i]);
} }
} }

View File

@ -23,6 +23,11 @@ enum ftl_layout_region_type {
/* If using cached L2P, this region stores the serialized instance of it */ /* If using cached L2P, this region stores the serialized instance of it */
FTL_LAYOUT_REGION_TYPE_L2P, FTL_LAYOUT_REGION_TYPE_L2P,
/* State of chunks */
FTL_LAYOUT_REGION_TYPE_NVC_MD,
/* Mirrored instance of the state of chunks */
FTL_LAYOUT_REGION_TYPE_NVC_MD_MIRROR,
/* User data region on the nv cache device */ /* User data region on the nv cache device */
FTL_LAYOUT_REGION_TYPE_DATA_NVC, FTL_LAYOUT_REGION_TYPE_DATA_NVC,
@ -99,6 +104,10 @@ struct ftl_layout {
/* Organization for NV cache */ /* Organization for NV cache */
struct { struct {
uint64_t total_blocks; uint64_t total_blocks;
uint64_t chunk_data_blocks;
uint64_t chunk_meta_size;
uint64_t chunk_count;
uint64_t chunk_tail_md_num_blocks;
} nvc; } nvc;
/* Information corresponding to L2P */ /* Information corresponding to L2P */

249
lib/ftl/ftl_nv_cache.c Normal file
View File

@ -0,0 +1,249 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) Intel Corporation.
* All rights reserved.
*/
#include "spdk/bdev.h"
#include "spdk/bdev_module.h"
#include "spdk/ftl.h"
#include "spdk/string.h"
#include "ftl_nv_cache.h"
#include "ftl_nv_cache_io.h"
#include "ftl_core.h"
#include "utils/ftl_addr_utils.h"
#include "mngt/ftl_mngt.h"
static inline const struct ftl_layout_region *
nvc_data_region(
struct ftl_nv_cache *nv_cache)
{
struct spdk_ftl_dev *dev;
dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
return &dev->layout.region[FTL_LAYOUT_REGION_TYPE_DATA_NVC];
}
static inline void
nvc_validate_md(struct ftl_nv_cache *nv_cache,
struct ftl_nv_cache_chunk_md *chunk_md)
{
struct ftl_md *md = nv_cache->md;
void *buffer = ftl_md_get_buffer(md);
uint64_t size = ftl_md_get_buffer_size(md);
void *ptr = chunk_md;
if (ptr < buffer) {
ftl_abort();
}
ptr += sizeof(*chunk_md);
if (ptr > buffer + size) {
ftl_abort();
}
}
static inline uint64_t
nvc_data_offset(struct ftl_nv_cache *nv_cache)
{
return nvc_data_region(nv_cache)->current.offset;
}
static inline uint64_t
nvc_data_blocks(struct ftl_nv_cache *nv_cache)
{
return nvc_data_region(nv_cache)->current.blocks;
}
size_t
ftl_nv_cache_chunk_tail_md_num_blocks(const struct ftl_nv_cache *nv_cache)
{
struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache,
struct spdk_ftl_dev, nv_cache);
return spdk_divide_round_up(dev->layout.nvc.chunk_data_blocks * dev->layout.l2p.addr_size,
FTL_BLOCK_SIZE);
}
static size_t
nv_cache_p2l_map_pool_elem_size(const struct ftl_nv_cache *nv_cache)
{
/* Map pool element holds the whole tail md */
return ftl_nv_cache_chunk_tail_md_num_blocks(nv_cache) * FTL_BLOCK_SIZE;
}
int
ftl_nv_cache_init(struct spdk_ftl_dev *dev)
{
struct ftl_nv_cache *nv_cache = &dev->nv_cache;
struct ftl_nv_cache_chunk *chunk;
struct ftl_nv_cache_chunk_md *md;
uint64_t i, offset;
nv_cache->halt = true;
nv_cache->md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
if (!nv_cache->md) {
FTL_ERRLOG(dev, "No NV cache metadata object\n");
return -1;
}
nv_cache->md_pool = ftl_mempool_create(dev->conf.user_io_pool_size,
nv_cache->md_size * dev->xfer_size,
FTL_BLOCK_SIZE, SPDK_ENV_SOCKET_ID_ANY);
if (!nv_cache->md_pool) {
FTL_ERRLOG(dev, "Failed to initialize NV cache metadata pool\n");
return -1;
}
/*
* Initialize chunk info
*/
nv_cache->chunk_blocks = dev->layout.nvc.chunk_data_blocks;
nv_cache->chunk_count = dev->layout.nvc.chunk_count;
/* Allocate chunks */
nv_cache->chunks = calloc(nv_cache->chunk_count,
sizeof(nv_cache->chunks[0]));
if (!nv_cache->chunks) {
FTL_ERRLOG(dev, "Failed to initialize NV cache chunks\n");
return -1;
}
TAILQ_INIT(&nv_cache->chunk_free_list);
TAILQ_INIT(&nv_cache->chunk_open_list);
TAILQ_INIT(&nv_cache->chunk_full_list);
/* First chunk metadata */
md = ftl_md_get_buffer(nv_cache->md);
if (!md) {
FTL_ERRLOG(dev, "No NV cache metadata\n");
return -1;
}
nv_cache->chunk_free_count = nv_cache->chunk_count;
chunk = nv_cache->chunks;
offset = nvc_data_offset(nv_cache);
for (i = 0; i < nv_cache->chunk_count; i++, chunk++, md++) {
chunk->nv_cache = nv_cache;
chunk->md = md;
nvc_validate_md(nv_cache, md);
chunk->offset = offset;
offset += nv_cache->chunk_blocks;
TAILQ_INSERT_TAIL(&nv_cache->chunk_free_list, chunk, entry);
}
assert(offset <= nvc_data_offset(nv_cache) + nvc_data_blocks(nv_cache));
#define FTL_MAX_OPEN_CHUNKS 2
nv_cache->p2l_pool = ftl_mempool_create(FTL_MAX_OPEN_CHUNKS,
nv_cache_p2l_map_pool_elem_size(nv_cache),
FTL_BLOCK_SIZE,
SPDK_ENV_SOCKET_ID_ANY);
if (!nv_cache->p2l_pool) {
return -ENOMEM;
}
/* One entry per open chunk */
nv_cache->chunk_md_pool = ftl_mempool_create(FTL_MAX_OPEN_CHUNKS,
sizeof(struct ftl_nv_cache_chunk_md),
FTL_BLOCK_SIZE,
SPDK_ENV_SOCKET_ID_ANY);
if (!nv_cache->chunk_md_pool) {
return -ENOMEM;
}
return 0;
}
void
ftl_nv_cache_deinit(struct spdk_ftl_dev *dev)
{
struct ftl_nv_cache *nv_cache = &dev->nv_cache;
ftl_mempool_destroy(nv_cache->md_pool);
ftl_mempool_destroy(nv_cache->p2l_pool);
ftl_mempool_destroy(nv_cache->chunk_md_pool);
nv_cache->md_pool = NULL;
nv_cache->p2l_pool = NULL;
nv_cache->chunk_md_pool = NULL;
free(nv_cache->chunks);
nv_cache->chunks = NULL;
}
void
ftl_nv_cache_fill_md(struct ftl_io *io)
{
uint64_t i;
union ftl_md_vss *metadata = io->md;
uint64_t lba = ftl_io_get_lba(io, 0);
for (i = 0; i < io->num_blocks; ++i, lba++, metadata++) {
metadata->nv_cache.lba = lba;
}
}
uint64_t
chunk_tail_md_offset(struct ftl_nv_cache *nv_cache)
{
return nv_cache->chunk_blocks - ftl_nv_cache_chunk_tail_md_num_blocks(nv_cache);
}
int
ftl_nv_cache_read(struct ftl_io *io, ftl_addr addr, uint32_t num_blocks,
spdk_bdev_io_completion_cb cb, void *cb_arg)
{
int rc;
struct ftl_nv_cache *nv_cache = &io->dev->nv_cache;
assert(ftl_addr_in_nvc(io->dev, addr));
rc = ftl_nv_cache_bdev_read_blocks_with_md(io->dev, nv_cache->bdev_desc, nv_cache->cache_ioch,
ftl_io_iovec_addr(io), NULL, ftl_addr_to_nvc_offset(io->dev, addr),
num_blocks, cb, cb_arg);
return rc;
}
bool
ftl_nv_cache_is_halted(struct ftl_nv_cache *nv_cache)
{
if (nv_cache->chunk_open_count > 0) {
return false;
}
return true;
}
void
ftl_nv_cache_process(struct spdk_ftl_dev *dev)
{
if (!dev->nv_cache.bdev_desc) {
return;
}
}
bool
ftl_nv_cache_full(struct ftl_nv_cache *nv_cache)
{
if (0 == nv_cache->chunk_open_count && NULL == nv_cache->chunk_current) {
return true;
} else {
return false;
}
}
int
ftl_nv_cache_chunks_busy(struct ftl_nv_cache *nv_cache)
{
/* chunk_current is migrating to closed status when closing, any others should already be
* moved to free chunk list. */
return nv_cache->chunk_open_count == 0;
}
void
ftl_nv_cache_halt(struct ftl_nv_cache *nv_cache)
{
nv_cache->halt = true;
}

155
lib/ftl/ftl_nv_cache.h Normal file
View File

@ -0,0 +1,155 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) Intel Corporation.
* All rights reserved.
*/
#ifndef FTL_NV_CACHE_H
#define FTL_NV_CACHE_H
#include "spdk/stdinc.h"
#include "spdk/crc32.h"
#include "ftl_io.h"
#include "ftl_utils.h"
#define FTL_NVC_VERSION_0 0
#define FTL_NVC_VERSION_1 1
#define FTL_NVC_VERSION_CURRENT FTL_NVC_VERSION_1
struct ftl_nvcache_restore;
typedef void (*ftl_nv_cache_restore_fn)(struct ftl_nvcache_restore *, int, void *cb_arg);
enum ftl_chunk_state {
FTL_CHUNK_STATE_FREE,
FTL_CHUNK_STATE_OPEN,
FTL_CHUNK_STATE_CLOSED,
FTL_CHUNK_STATE_MAX
};
struct ftl_nv_cache_chunk_md {
/* Current lba to write */
uint32_t write_pointer;
/* Number of blocks written */
uint32_t blocks_written;
/* Number of skipped block (case when IO size is greater than blocks left in chunk) */
uint32_t blocks_skipped;
/* Next block to be compacted */
uint32_t read_pointer;
/* Number of compacted (both valid and invalid) blocks */
uint32_t blocks_compacted;
/* Chunk state */
enum ftl_chunk_state state;
/* CRC32 checksum of the associated P2L map when chunk is in closed state */
uint32_t p2l_map_checksum;
} __attribute__((aligned(FTL_BLOCK_SIZE)));
#define FTL_NV_CACHE_CHUNK_MD_SIZE sizeof(struct ftl_nv_cache_chunk_md)
SPDK_STATIC_ASSERT(FTL_NV_CACHE_CHUNK_MD_SIZE == FTL_BLOCK_SIZE,
"FTL NV Chunk metadata size is invalid");
struct ftl_nv_cache_chunk {
struct ftl_nv_cache *nv_cache;
struct ftl_nv_cache_chunk_md *md;
/* Offset from start lba of the cache */
uint64_t offset;
/* P2L map */
struct ftl_p2l_map p2l_map;
/* Metadata request */
struct ftl_basic_rq metadata_rq;
TAILQ_ENTRY(ftl_nv_cache_chunk) entry;
/* This flag is used to indicate chunk is used in recovery */
bool recovery;
/* For writing metadata */
struct ftl_md_io_entry_ctx md_persist_entry_ctx;
};
struct ftl_nv_cache {
/* Flag indicating halt request */
bool halt;
/* Write buffer cache bdev */
struct spdk_bdev_desc *bdev_desc;
/* Persistent cache IO channel */
struct spdk_io_channel *cache_ioch;
/* Metadata pool */
struct ftl_mempool *md_pool;
/* P2L map memory pool */
struct ftl_mempool *p2l_pool;
/* Chunk md memory pool */
struct ftl_mempool *chunk_md_pool;
/* Block Metadata size */
uint64_t md_size;
/* NV cache metadata object handle */
struct ftl_md *md;
/* Number of blocks in chunk */
uint64_t chunk_blocks;
/* Number of chunks */
uint64_t chunk_count;
/* Current processed chunk */
struct ftl_nv_cache_chunk *chunk_current;
/* Free chunks list */
TAILQ_HEAD(, ftl_nv_cache_chunk) chunk_free_list;
uint64_t chunk_free_count;
/* Open chunks list */
TAILQ_HEAD(, ftl_nv_cache_chunk) chunk_open_list;
uint64_t chunk_open_count;
/* Full chunks list */
TAILQ_HEAD(, ftl_nv_cache_chunk) chunk_full_list;
uint64_t chunk_full_count;
struct ftl_nv_cache_chunk *chunks;
};
int ftl_nv_cache_init(struct spdk_ftl_dev *dev);
void ftl_nv_cache_deinit(struct spdk_ftl_dev *dev);
void ftl_nv_cache_fill_md(struct ftl_io *io);
int ftl_nv_cache_read(struct ftl_io *io, ftl_addr addr, uint32_t num_blocks,
spdk_bdev_io_completion_cb cb, void *cb_arg);
bool ftl_nv_cache_full(struct ftl_nv_cache *nv_cache);
void ftl_nv_cache_process(struct spdk_ftl_dev *dev);
void ftl_nv_cache_halt(struct ftl_nv_cache *nv_cache);
int ftl_nv_cache_chunks_busy(struct ftl_nv_cache *nv_cache);
static inline void
ftl_nv_cache_resume(struct ftl_nv_cache *nv_cache)
{
nv_cache->halt = false;
}
bool ftl_nv_cache_is_halted(struct ftl_nv_cache *nv_cache);
size_t ftl_nv_cache_chunk_tail_md_num_blocks(const struct ftl_nv_cache *nv_cache);
uint64_t chunk_tail_md_offset(struct ftl_nv_cache *nv_cache);
typedef int (*ftl_chunk_md_cb)(struct ftl_nv_cache_chunk *chunk, void *cntx);
#endif /* FTL_NV_CACHE_H */

View File

@ -99,7 +99,7 @@ ftl_nv_cache_bdev_readv_blocks_with_md(struct spdk_ftl_dev *dev,
uint64_t offset_blocks, uint64_t num_blocks, uint64_t offset_blocks, uint64_t num_blocks,
spdk_bdev_io_completion_cb cb, void *cb_arg) spdk_bdev_io_completion_cb cb, void *cb_arg)
{ {
assert(desc == dev->cache_bdev_desc); assert(desc == dev->nv_cache.bdev_desc);
ftl_nv_cache_bdev_get_md(dev, offset_blocks, num_blocks, md); ftl_nv_cache_bdev_get_md(dev, offset_blocks, num_blocks, md);
return spdk_bdev_readv_blocks(desc, ch, iov, iovcnt, offset_blocks, return spdk_bdev_readv_blocks(desc, ch, iov, iovcnt, offset_blocks,
num_blocks, cb, cb_arg); num_blocks, cb, cb_arg);
@ -138,7 +138,7 @@ ftl_nv_cache_bdev_writev_blocks_with_md(struct spdk_ftl_dev *dev,
uint64_t offset_blocks, uint64_t num_blocks, uint64_t offset_blocks, uint64_t num_blocks,
spdk_bdev_io_completion_cb cb, void *cb_arg) spdk_bdev_io_completion_cb cb, void *cb_arg)
{ {
assert(desc == dev->cache_bdev_desc); assert(desc == dev->nv_cache.bdev_desc);
ftl_nv_cache_bdev_set_md(dev, offset_blocks, num_blocks, md_buf); ftl_nv_cache_bdev_set_md(dev, offset_blocks, num_blocks, md_buf);
return spdk_bdev_writev_blocks(desc, ch, iov, iovcnt, return spdk_bdev_writev_blocks(desc, ch, iov, iovcnt,
offset_blocks, num_blocks, offset_blocks, num_blocks,
@ -153,7 +153,7 @@ ftl_nv_cache_bdev_read_blocks_with_md(struct spdk_ftl_dev *dev,
uint64_t offset_blocks, uint64_t num_blocks, uint64_t offset_blocks, uint64_t num_blocks,
spdk_bdev_io_completion_cb cb, void *cb_arg) spdk_bdev_io_completion_cb cb, void *cb_arg)
{ {
assert(desc == dev->cache_bdev_desc); assert(desc == dev->nv_cache.bdev_desc);
ftl_nv_cache_bdev_get_md(dev, offset_blocks, num_blocks, md); ftl_nv_cache_bdev_get_md(dev, offset_blocks, num_blocks, md);
return spdk_bdev_read_blocks(desc, ch, buf, offset_blocks, return spdk_bdev_read_blocks(desc, ch, buf, offset_blocks,
num_blocks, cb, cb_arg); num_blocks, cb, cb_arg);
@ -167,7 +167,7 @@ ftl_nv_cache_bdev_write_blocks_with_md(struct spdk_ftl_dev *dev,
uint64_t offset_blocks, uint64_t num_blocks, uint64_t offset_blocks, uint64_t num_blocks,
spdk_bdev_io_completion_cb cb, void *cb_arg) spdk_bdev_io_completion_cb cb, void *cb_arg)
{ {
assert(desc == dev->cache_bdev_desc); assert(desc == dev->nv_cache.bdev_desc);
ftl_nv_cache_bdev_set_md(dev, offset_blocks, num_blocks, md); ftl_nv_cache_bdev_set_md(dev, offset_blocks, num_blocks, md);
return spdk_bdev_write_blocks(desc, ch, buf, return spdk_bdev_write_blocks(desc, ch, buf,
offset_blocks, num_blocks, offset_blocks, num_blocks,

View File

@ -6,6 +6,7 @@
#include "spdk/bdev_module.h" #include "spdk/bdev_module.h"
#include "spdk/ftl.h" #include "spdk/ftl.h"
#include "ftl_nv_cache.h"
#include "ftl_internal.h" #include "ftl_internal.h"
#include "ftl_mngt_steps.h" #include "ftl_mngt_steps.h"
#include "ftl_internal.h" #include "ftl_internal.h"
@ -171,20 +172,21 @@ void
ftl_mngt_open_cache_bdev(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt) ftl_mngt_open_cache_bdev(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
{ {
struct spdk_bdev *bdev; struct spdk_bdev *bdev;
struct ftl_nv_cache *nv_cache = &dev->nv_cache;
const char *bdev_name = dev->conf.cache_bdev; const char *bdev_name = dev->conf.cache_bdev;
if (spdk_bdev_open_ext(bdev_name, true, nv_cache_bdev_event_cb, dev, if (spdk_bdev_open_ext(bdev_name, true, nv_cache_bdev_event_cb, dev,
&dev->cache_bdev_desc)) { &nv_cache->bdev_desc)) {
FTL_ERRLOG(dev, "Unable to open bdev: %s\n", bdev_name); FTL_ERRLOG(dev, "Unable to open bdev: %s\n", bdev_name);
goto error; goto error;
} }
bdev = spdk_bdev_desc_get_bdev(dev->cache_bdev_desc); bdev = spdk_bdev_desc_get_bdev(nv_cache->bdev_desc);
if (spdk_bdev_module_claim_bdev(bdev, dev->cache_bdev_desc, &g_ftl_bdev_module)) { if (spdk_bdev_module_claim_bdev(bdev, nv_cache->bdev_desc, &g_ftl_bdev_module)) {
/* clear the desc so that we don't try to release the claim on cleanup */ /* clear the desc so that we don't try to release the claim on cleanup */
spdk_bdev_close(dev->cache_bdev_desc); spdk_bdev_close(nv_cache->bdev_desc);
dev->cache_bdev_desc = NULL; nv_cache->bdev_desc = NULL;
FTL_ERRLOG(dev, "Unable to claim bdev %s\n", bdev_name); FTL_ERRLOG(dev, "Unable to claim bdev %s\n", bdev_name);
goto error; goto error;
} }
@ -197,8 +199,8 @@ ftl_mngt_open_cache_bdev(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt
goto error; goto error;
} }
dev->cache_ioch = spdk_bdev_get_io_channel(dev->cache_bdev_desc); nv_cache->cache_ioch = spdk_bdev_get_io_channel(nv_cache->bdev_desc);
if (!dev->cache_ioch) { if (!nv_cache->cache_ioch) {
FTL_ERRLOG(dev, "Failed to create cache IO channel for NV Cache\n"); FTL_ERRLOG(dev, "Failed to create cache IO channel for NV Cache\n");
goto error; goto error;
} }
@ -210,8 +212,8 @@ ftl_mngt_open_cache_bdev(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt
goto error; goto error;
} }
dev->cache_md_size = spdk_bdev_get_md_size(bdev); nv_cache->md_size = spdk_bdev_get_md_size(bdev);
if (dev->cache_md_size != sizeof(union ftl_md_vss)) { if (nv_cache->md_size != sizeof(union ftl_md_vss)) {
FTL_ERRLOG(dev, "Bdev's %s metadata is invalid size (%"PRIu32")\n", FTL_ERRLOG(dev, "Bdev's %s metadata is invalid size (%"PRIu32")\n",
spdk_bdev_get_name(bdev), spdk_bdev_get_md_size(bdev)); spdk_bdev_get_name(bdev), spdk_bdev_get_md_size(bdev));
goto error; goto error;
@ -229,7 +231,7 @@ ftl_mngt_open_cache_bdev(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt
goto error; goto error;
} }
if (ftl_md_xfer_blocks(dev) * dev->cache_md_size > FTL_ZERO_BUFFER_SIZE) { if (ftl_md_xfer_blocks(dev) * nv_cache->md_size > FTL_ZERO_BUFFER_SIZE) {
FTL_ERRLOG(dev, "Zero buffer too small for bdev %s metadata transfer\n", FTL_ERRLOG(dev, "Zero buffer too small for bdev %s metadata transfer\n",
spdk_bdev_get_name(bdev)); spdk_bdev_get_name(bdev));
goto error; goto error;
@ -240,7 +242,7 @@ ftl_mngt_open_cache_bdev(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt
goto error; goto error;
} }
dev->cache_md_size = 64; nv_cache->md_size = 64;
FTL_NOTICELOG(dev, "FTL uses VSS emulation\n"); FTL_NOTICELOG(dev, "FTL uses VSS emulation\n");
#endif #endif
@ -253,18 +255,18 @@ error:
void void
ftl_mngt_close_cache_bdev(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt) ftl_mngt_close_cache_bdev(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
{ {
if (dev->cache_ioch) { if (dev->nv_cache.cache_ioch) {
spdk_put_io_channel(dev->cache_ioch); spdk_put_io_channel(dev->nv_cache.cache_ioch);
dev->cache_ioch = NULL; dev->nv_cache.cache_ioch = NULL;
} }
if (dev->cache_bdev_desc) { if (dev->nv_cache.bdev_desc) {
struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->cache_bdev_desc); struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->nv_cache.bdev_desc);
spdk_bdev_module_release_bdev(bdev); spdk_bdev_module_release_bdev(bdev);
spdk_bdev_close(dev->cache_bdev_desc); spdk_bdev_close(dev->nv_cache.bdev_desc);
dev->cache_bdev_desc = NULL; dev->nv_cache.bdev_desc = NULL;
} }
ftl_mngt_next_step(mngt); ftl_mngt_next_step(mngt);

View File

@ -8,7 +8,6 @@
#include "ftl_core.h" #include "ftl_core.h"
#include "ftl_mngt.h" #include "ftl_mngt.h"
#include "ftl_mngt_steps.h" #include "ftl_mngt_steps.h"
#include "utils/ftl_mempool.h"
struct ftl_io_channel_ctx { struct ftl_io_channel_ctx {
struct ftl_io_channel *ioch; struct ftl_io_channel *ioch;

View File

@ -30,10 +30,12 @@ is_buffer_needed(enum ftl_layout_region_type type)
#ifdef SPDK_FTL_VSS_EMU #ifdef SPDK_FTL_VSS_EMU
case FTL_LAYOUT_REGION_TYPE_VSS: case FTL_LAYOUT_REGION_TYPE_VSS:
#endif #endif
case FTL_LAYOUT_REGION_TYPE_SB: case FTL_LAYOUT_REGION_TYPE_SB:
case FTL_LAYOUT_REGION_TYPE_SB_BASE: case FTL_LAYOUT_REGION_TYPE_SB_BASE:
case FTL_LAYOUT_REGION_TYPE_DATA_NVC: case FTL_LAYOUT_REGION_TYPE_DATA_NVC:
case FTL_LAYOUT_REGION_TYPE_DATA_BASE: case FTL_LAYOUT_REGION_TYPE_DATA_BASE:
case FTL_LAYOUT_REGION_TYPE_NVC_MD_MIRROR:
return false; return false;
default: default:

View File

@ -8,6 +8,7 @@
#include "ftl_mngt.h" #include "ftl_mngt.h"
#include "ftl_mngt_steps.h" #include "ftl_mngt_steps.h"
#include "ftl_internal.h" #include "ftl_internal.h"
#include "ftl_nv_cache.h"
#include "ftl_debug.h" #include "ftl_debug.h"
void void
@ -20,6 +21,25 @@ ftl_mngt_check_conf(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
} }
} }
void
ftl_mngt_init_nv_cache(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
{
if (ftl_nv_cache_init(dev)) {
FTL_ERRLOG(dev, "Unable to initialize persistent cache\n");
ftl_mngt_fail_step(mngt);
return;
}
ftl_mngt_next_step(mngt);
}
void
ftl_mngt_deinit_nv_cache(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
{
ftl_nv_cache_deinit(dev);
ftl_mngt_next_step(mngt);
}
static void static void
user_clear_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status) user_clear_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
{ {
@ -59,6 +79,8 @@ ftl_mngt_finalize_startup(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mng
{ {
dev->initialized = 1; dev->initialized = 1;
ftl_nv_cache_resume(&dev->nv_cache);
ftl_mngt_next_step(mngt); ftl_mngt_next_step(mngt);
} }

View File

@ -73,6 +73,11 @@ static const struct ftl_mngt_process_desc desc_startup = {
.action = ftl_mngt_init_md, .action = ftl_mngt_init_md,
.cleanup = ftl_mngt_deinit_md .cleanup = ftl_mngt_deinit_md
}, },
{
.name = "Initialize NV cache",
.action = ftl_mngt_init_nv_cache,
.cleanup = ftl_mngt_deinit_nv_cache
},
{ {
.name = "Select startup mode", .name = "Select startup mode",
.action = ftl_mngt_select_startup_mode .action = ftl_mngt_select_startup_mode

View File

@ -36,6 +36,10 @@ void ftl_mngt_init_io_channel(struct spdk_ftl_dev *dev, struct ftl_mngt_process
void ftl_mngt_deinit_io_channel(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt); void ftl_mngt_deinit_io_channel(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt);
void ftl_mngt_init_nv_cache(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt);
void ftl_mngt_deinit_nv_cache(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt);
void ftl_mngt_init_l2p(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt); void ftl_mngt_init_l2p(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt);
void ftl_mngt_deinit_l2p(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt); void ftl_mngt_deinit_l2p(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt);

View File

@ -260,7 +260,7 @@ read_blocks(struct spdk_ftl_dev *dev, struct spdk_bdev_desc *desc,
uint64_t offset_blocks, uint64_t num_blocks, uint64_t offset_blocks, uint64_t num_blocks,
spdk_bdev_io_completion_cb cb, void *cb_arg) spdk_bdev_io_completion_cb cb, void *cb_arg)
{ {
if (desc == dev->cache_bdev_desc) { if (desc == dev->nv_cache.bdev_desc) {
return ftl_nv_cache_bdev_read_blocks_with_md(dev, desc, ch, buf, md_buf, return ftl_nv_cache_bdev_read_blocks_with_md(dev, desc, ch, buf, md_buf,
offset_blocks, num_blocks, offset_blocks, num_blocks,
cb, cb_arg); cb, cb_arg);
@ -282,7 +282,7 @@ write_blocks(struct spdk_ftl_dev *dev, struct spdk_bdev_desc *desc,
uint64_t offset_blocks, uint64_t num_blocks, uint64_t offset_blocks, uint64_t num_blocks,
spdk_bdev_io_completion_cb cb, void *cb_arg) spdk_bdev_io_completion_cb cb, void *cb_arg)
{ {
if (desc == dev->cache_bdev_desc) { if (desc == dev->nv_cache.bdev_desc) {
return ftl_nv_cache_bdev_write_blocks_with_md(dev, desc, ch, buf, md_buf, return ftl_nv_cache_bdev_write_blocks_with_md(dev, desc, ch, buf, md_buf,
offset_blocks, num_blocks, offset_blocks, num_blocks,
cb, cb_arg); cb, cb_arg);

View File

@ -131,10 +131,10 @@ setup_device(uint32_t num_threads, uint32_t xfer_size)
dev->conf = g_default_conf; dev->conf = g_default_conf;
dev->xfer_size = xfer_size; dev->xfer_size = xfer_size;
dev->base_bdev_desc = (struct spdk_bdev_desc *)0xdeadbeef; dev->base_bdev_desc = (struct spdk_bdev_desc *)0xdeadbeef;
dev->cache_bdev_desc = (struct spdk_bdev_desc *)0xdead1234; dev->nv_cache.bdev_desc = (struct spdk_bdev_desc *)0xdead1234;
spdk_io_device_register(dev, channel_create_cb, channel_destroy_cb, 0, NULL); spdk_io_device_register(dev, channel_create_cb, channel_destroy_cb, 0, NULL);
spdk_io_device_register(dev->base_bdev_desc, channel_create_cb, channel_destroy_cb, 0, NULL); spdk_io_device_register(dev->base_bdev_desc, channel_create_cb, channel_destroy_cb, 0, NULL);
spdk_io_device_register(dev->cache_bdev_desc, channel_create_cb, channel_destroy_cb, 0, NULL); spdk_io_device_register(dev->nv_cache.bdev_desc, channel_create_cb, channel_destroy_cb, 0, NULL);
TAILQ_INIT(&dev->ioch_queue); TAILQ_INIT(&dev->ioch_queue);
@ -152,7 +152,7 @@ free_device(struct spdk_ftl_dev *dev)
spdk_io_device_unregister(dev, NULL); spdk_io_device_unregister(dev, NULL);
spdk_io_device_unregister(dev->base_bdev_desc, NULL); spdk_io_device_unregister(dev->base_bdev_desc, NULL);
spdk_io_device_unregister(dev->cache_bdev_desc, NULL); spdk_io_device_unregister(dev->nv_cache.bdev_desc, NULL);
while (!TAILQ_EMPTY(&dev->ioch_queue)) { while (!TAILQ_EMPTY(&dev->ioch_queue)) {
TAILQ_REMOVE(&dev->ioch_queue, TAILQ_FIRST(&dev->ioch_queue), entry); TAILQ_REMOVE(&dev->ioch_queue, TAILQ_FIRST(&dev->ioch_queue), entry);