lib/ftl: block nv_cache until header is written
After filling whole non-volatile cache, block all further writes until the header with metadata is written. This means that metadata stored on the device will always be up-to-date with the most recent write sequence. Change-Id: I15b724b52814289622374ce77e5c3b23173a75c6 Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com> Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/458097 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Reviewed-by: Wojciech Malikowski <wojciech.malikowski@intel.com>
This commit is contained in:
parent
2c96745563
commit
c69529d452
@ -39,6 +39,7 @@
|
||||
#include "spdk/string.h"
|
||||
#include "spdk_internal/log.h"
|
||||
#include "spdk/ftl.h"
|
||||
#include "spdk/crc32.h"
|
||||
|
||||
#include "ftl_core.h"
|
||||
#include "ftl_band.h"
|
||||
@ -944,6 +945,52 @@ ftl_process_flush(struct spdk_ftl_dev *dev, struct ftl_rwb_batch *batch)
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
ftl_nv_cache_wrap_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
|
||||
{
|
||||
struct ftl_nv_cache *nv_cache = cb_arg;
|
||||
|
||||
if (!success) {
|
||||
SPDK_ERRLOG("Unable to write non-volatile cache metadata header\n");
|
||||
/* TODO: go into read-only mode */
|
||||
assert(0);
|
||||
}
|
||||
|
||||
pthread_spin_lock(&nv_cache->lock);
|
||||
nv_cache->ready = true;
|
||||
pthread_spin_unlock(&nv_cache->lock);
|
||||
|
||||
spdk_bdev_free_io(bdev_io);
|
||||
}
|
||||
|
||||
static void
|
||||
ftl_nv_cache_wrap(void *ctx)
|
||||
{
|
||||
struct spdk_ftl_dev *dev = ctx;
|
||||
struct ftl_nv_cache *nv_cache = &dev->nv_cache;
|
||||
struct ftl_nv_cache_header *hdr = nv_cache->dma_buf;
|
||||
struct ftl_io_channel *ioch;
|
||||
struct spdk_bdev *bdev;
|
||||
int rc;
|
||||
|
||||
ioch = spdk_io_channel_get_ctx(dev->ioch);
|
||||
bdev = spdk_bdev_desc_get_bdev(nv_cache->bdev_desc);
|
||||
|
||||
hdr->uuid = dev->uuid;
|
||||
hdr->size = spdk_bdev_get_num_blocks(bdev);
|
||||
hdr->version = FTL_NV_CACHE_HEADER_VERSION;
|
||||
hdr->checksum = spdk_crc32c_update(hdr, offsetof(struct ftl_nv_cache_header, checksum), 0);
|
||||
|
||||
rc = spdk_bdev_write_blocks(nv_cache->bdev_desc, ioch->cache_ioch, hdr, 0, 1,
|
||||
ftl_nv_cache_wrap_cb, nv_cache);
|
||||
if (spdk_unlikely(rc != 0)) {
|
||||
SPDK_ERRLOG("Unable to write non-volatile cache metadata header: %s\n",
|
||||
spdk_strerror(-rc));
|
||||
/* TODO: go into read-only mode */
|
||||
assert(0);
|
||||
}
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
ftl_reserve_nv_cache(struct ftl_nv_cache *nv_cache, size_t *num_lbks)
|
||||
{
|
||||
@ -954,7 +1001,7 @@ ftl_reserve_nv_cache(struct ftl_nv_cache *nv_cache, size_t *num_lbks)
|
||||
cache_size = spdk_bdev_get_num_blocks(bdev);
|
||||
|
||||
pthread_spin_lock(&nv_cache->lock);
|
||||
if (spdk_unlikely(nv_cache->num_available == 0)) {
|
||||
if (spdk_unlikely(nv_cache->num_available == 0 || !nv_cache->ready)) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -973,6 +1020,8 @@ ftl_reserve_nv_cache(struct ftl_nv_cache *nv_cache, size_t *num_lbks)
|
||||
|
||||
if (nv_cache->current_addr == spdk_bdev_get_num_blocks(bdev)) {
|
||||
nv_cache->current_addr = FTL_NV_CACHE_DATA_OFFSET;
|
||||
nv_cache->ready = false;
|
||||
spdk_thread_send_msg(ftl_get_core_thread(dev), ftl_nv_cache_wrap, dev);
|
||||
}
|
||||
out:
|
||||
pthread_spin_unlock(&nv_cache->lock);
|
||||
|
@ -112,8 +112,12 @@ struct ftl_nv_cache {
|
||||
uint64_t num_available;
|
||||
/* Maximum number of blocks */
|
||||
uint64_t num_data_blocks;
|
||||
/* Indicates that the data can be written to the cache */
|
||||
bool ready;
|
||||
/* Metadata pool */
|
||||
struct spdk_mempool *md_pool;
|
||||
/* DMA buffer for writing the header */
|
||||
void *dma_buf;
|
||||
/* Cache lock */
|
||||
pthread_spinlock_t lock;
|
||||
};
|
||||
|
@ -557,6 +557,12 @@ ftl_dev_init_nv_cache(struct spdk_ftl_dev *dev, struct spdk_bdev_desc *bdev_desc
|
||||
return -1;
|
||||
}
|
||||
|
||||
nv_cache->dma_buf = spdk_dma_zmalloc(FTL_BLOCK_SIZE, spdk_bdev_get_buf_align(bdev), NULL);
|
||||
if (!nv_cache->dma_buf) {
|
||||
SPDK_ERRLOG("Memory allocation failure\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (pthread_spin_init(&nv_cache->lock, PTHREAD_PROCESS_PRIVATE)) {
|
||||
SPDK_ERRLOG("Failed to initialize cache lock\n");
|
||||
return -1;
|
||||
@ -566,6 +572,7 @@ ftl_dev_init_nv_cache(struct spdk_ftl_dev *dev, struct spdk_bdev_desc *bdev_desc
|
||||
nv_cache->current_addr = FTL_NV_CACHE_DATA_OFFSET;
|
||||
nv_cache->num_data_blocks = spdk_bdev_get_num_blocks(bdev) - 1;
|
||||
nv_cache->num_available = nv_cache->num_data_blocks;
|
||||
nv_cache->ready = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -867,6 +874,7 @@ ftl_write_nv_cache_md_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_ar
|
||||
return;
|
||||
}
|
||||
|
||||
dev->nv_cache.ready = true;
|
||||
ftl_init_complete(dev);
|
||||
}
|
||||
|
||||
@ -1236,6 +1244,8 @@ ftl_dev_free_sync(struct spdk_ftl_dev *dev)
|
||||
}
|
||||
}
|
||||
|
||||
spdk_dma_free(dev->nv_cache.dma_buf);
|
||||
|
||||
spdk_mempool_free(dev->lba_pool);
|
||||
spdk_mempool_free(dev->nv_cache.md_pool);
|
||||
if (dev->lba_request_pool) {
|
||||
|
@ -421,6 +421,10 @@ ftl_nv_cache_header_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
|
||||
rc = -ENOTRECOVERABLE;
|
||||
goto out;
|
||||
}
|
||||
|
||||
pthread_spin_lock(&nv_cache->lock);
|
||||
nv_cache->ready = true;
|
||||
pthread_spin_unlock(&nv_cache->lock);
|
||||
out:
|
||||
ftl_restore_complete(restore, rc);
|
||||
spdk_bdev_free_io(bdev_io);
|
||||
|
Loading…
Reference in New Issue
Block a user