lib/ftl: initialize per-ioch write buffers
This patch introduces the per-io_channel write buffer entry, the structures necessary for its management, and performs their initialization. They will be utilized by the upcomming patches. Change-Id: I772caa445d1887166b9bcd7679d32d2ed2461ef3 Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com> Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/903 Reviewed-by: Wojciech Malikowski <wojciech.malikowski@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
parent
bfafd4e472
commit
0674194ae8
@ -967,6 +967,77 @@ ftl_io_channel_register(void *ctx)
|
||||
TAILQ_INSERT_TAIL(&dev->ioch_queue, ioch, tailq);
|
||||
}
|
||||
|
||||
static int
|
||||
ftl_io_channel_init_wbuf(struct ftl_io_channel *ioch)
|
||||
{
|
||||
struct spdk_ftl_dev *dev = ioch->dev;
|
||||
struct ftl_wbuf_entry *entry;
|
||||
uint32_t i;
|
||||
int rc;
|
||||
|
||||
ioch->num_entries = dev->conf.rwb_size / FTL_BLOCK_SIZE;
|
||||
ioch->wbuf_entries = calloc(ioch->num_entries, sizeof(*ioch->wbuf_entries));
|
||||
if (ioch->wbuf_entries == NULL) {
|
||||
SPDK_ERRLOG("Failed to allocate write buffer entry array\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
ioch->wbuf_payload = spdk_zmalloc(dev->conf.rwb_size, FTL_BLOCK_SIZE, NULL,
|
||||
SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
|
||||
if (ioch->wbuf_payload == NULL) {
|
||||
SPDK_ERRLOG("Failed to allocate write buffer payload\n");
|
||||
goto error_entries;
|
||||
}
|
||||
|
||||
ioch->free_queue = spdk_ring_create(SPDK_RING_TYPE_SP_SC,
|
||||
spdk_align32pow2(ioch->num_entries + 1),
|
||||
SPDK_ENV_SOCKET_ID_ANY);
|
||||
if (ioch->free_queue == NULL) {
|
||||
SPDK_ERRLOG("Failed to allocate free queue\n");
|
||||
goto error_payload;
|
||||
}
|
||||
|
||||
ioch->submit_queue = spdk_ring_create(SPDK_RING_TYPE_SP_SC,
|
||||
spdk_align32pow2(ioch->num_entries + 1),
|
||||
SPDK_ENV_SOCKET_ID_ANY);
|
||||
if (ioch->submit_queue == NULL) {
|
||||
SPDK_ERRLOG("Failed to allocate submit queue\n");
|
||||
goto error_free_queue;
|
||||
}
|
||||
|
||||
for (i = 0; i < ioch->num_entries; ++i) {
|
||||
entry = &ioch->wbuf_entries[i];
|
||||
entry->payload = (char *)ioch->wbuf_payload + i * FTL_BLOCK_SIZE;
|
||||
entry->ioch = ioch;
|
||||
entry->index = i;
|
||||
entry->addr.offset = FTL_ADDR_INVALID;
|
||||
|
||||
rc = pthread_spin_init(&entry->lock, PTHREAD_PROCESS_PRIVATE);
|
||||
if (rc != 0) {
|
||||
SPDK_ERRLOG("Failed to initialize spinlock\n");
|
||||
goto error_spinlock;
|
||||
}
|
||||
|
||||
spdk_ring_enqueue(ioch->free_queue, (void **)&entry, 1, NULL);
|
||||
}
|
||||
|
||||
return 0;
|
||||
error_spinlock:
|
||||
for (; i > 0; --i) {
|
||||
pthread_spin_destroy(&ioch->wbuf_entries[i - 1].lock);
|
||||
}
|
||||
|
||||
spdk_ring_free(ioch->submit_queue);
|
||||
error_free_queue:
|
||||
spdk_ring_free(ioch->free_queue);
|
||||
error_payload:
|
||||
spdk_free(ioch->wbuf_payload);
|
||||
error_entries:
|
||||
free(ioch->wbuf_entries);
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int
|
||||
ftl_io_channel_create_cb(void *io_device, void *ctx)
|
||||
{
|
||||
@ -1033,12 +1104,18 @@ ftl_io_channel_create_cb(void *io_device, void *ctx)
|
||||
goto fail_poller;
|
||||
}
|
||||
|
||||
if (ftl_io_channel_init_wbuf(ioch)) {
|
||||
SPDK_ERRLOG("Failed to initialize IO channel's write buffer\n");
|
||||
goto fail_wbuf;
|
||||
}
|
||||
|
||||
_ioch->ioch = ioch;
|
||||
|
||||
spdk_thread_send_msg(ftl_get_core_thread(dev), ftl_io_channel_register, ioch);
|
||||
|
||||
return 0;
|
||||
|
||||
fail_wbuf:
|
||||
spdk_poller_unregister(&ioch->poller);
|
||||
fail_poller:
|
||||
if (ioch->cache_ioch) {
|
||||
spdk_put_io_channel(ioch->cache_ioch);
|
||||
@ -1050,7 +1127,6 @@ fail_ioch:
|
||||
free(ioch);
|
||||
|
||||
return -1;
|
||||
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1058,7 +1134,7 @@ ftl_io_channel_unregister(void *ctx)
|
||||
{
|
||||
struct ftl_io_channel *ioch = ctx;
|
||||
struct spdk_ftl_dev *dev = ioch->dev;
|
||||
uint32_t num_io_channels __attribute__((unused));
|
||||
uint32_t i, num_io_channels __attribute__((unused));
|
||||
|
||||
assert(ioch->index < dev->conf.max_io_channels);
|
||||
assert(dev->ioch_array[ioch->index] == ioch);
|
||||
@ -1069,17 +1145,30 @@ ftl_io_channel_unregister(void *ctx)
|
||||
num_io_channels = __atomic_fetch_sub(&dev->num_io_channels, 1, __ATOMIC_SEQ_CST);
|
||||
assert(num_io_channels > 0);
|
||||
|
||||
for (i = 0; i < ioch->num_entries; ++i) {
|
||||
pthread_spin_destroy(&ioch->wbuf_entries[i].lock);
|
||||
}
|
||||
|
||||
spdk_mempool_free(ioch->io_pool);
|
||||
spdk_ring_free(ioch->free_queue);
|
||||
spdk_ring_free(ioch->submit_queue);
|
||||
spdk_free(ioch->wbuf_payload);
|
||||
free(ioch->wbuf_entries);
|
||||
free(ioch);
|
||||
}
|
||||
|
||||
static void
|
||||
ftl_io_channel_destroy_cb(void *io_device, void *ctx)
|
||||
_ftl_io_channel_destroy_cb(void *ctx)
|
||||
{
|
||||
struct _ftl_io_channel *_ioch = ctx;
|
||||
struct ftl_io_channel *ioch = _ioch->ioch;
|
||||
struct ftl_io_channel *ioch = ctx;
|
||||
struct spdk_ftl_dev *dev = ioch->dev;
|
||||
|
||||
/* Do not destroy the channel if some of its entries are still in use */
|
||||
if (spdk_ring_count(ioch->free_queue) != ioch->num_entries) {
|
||||
spdk_thread_send_msg(spdk_get_thread(), _ftl_io_channel_destroy_cb, ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
spdk_poller_unregister(&ioch->poller);
|
||||
|
||||
spdk_put_io_channel(ioch->base_ioch);
|
||||
@ -1093,6 +1182,15 @@ ftl_io_channel_destroy_cb(void *io_device, void *ctx)
|
||||
spdk_thread_send_msg(ftl_get_core_thread(dev), ftl_io_channel_unregister, ioch);
|
||||
}
|
||||
|
||||
static void
|
||||
ftl_io_channel_destroy_cb(void *io_device, void *ctx)
|
||||
{
|
||||
struct _ftl_io_channel *_ioch = ctx;
|
||||
struct ftl_io_channel *ioch = _ioch->ioch;
|
||||
|
||||
_ftl_io_channel_destroy_cb(ioch);
|
||||
}
|
||||
|
||||
static int
|
||||
ftl_dev_init_io_channel(struct spdk_ftl_dev *dev)
|
||||
{
|
||||
|
@ -124,6 +124,38 @@ struct ftl_io_init_opts {
|
||||
void *cb_ctx;
|
||||
};
|
||||
|
||||
struct ftl_io_channel;
|
||||
|
||||
struct ftl_wbuf_entry {
|
||||
/* IO channel that owns the write bufer entry */
|
||||
struct ftl_io_channel *ioch;
|
||||
/* Data payload (single block) */
|
||||
void *payload;
|
||||
/* Index within the IO channel's wbuf_entries array */
|
||||
uint32_t index;
|
||||
uint32_t io_flags;
|
||||
/* Points at the band the data is copied from. Only valid for internal
|
||||
* requests coming from reloc.
|
||||
*/
|
||||
struct ftl_band *band;
|
||||
/* Physical address of that particular block. Valid once the data has
|
||||
* been written out.
|
||||
*/
|
||||
struct ftl_addr addr;
|
||||
/* Logical block address */
|
||||
uint64_t lba;
|
||||
|
||||
/* Trace ID of the requests the entry is part of */
|
||||
uint64_t trace;
|
||||
|
||||
/* Indicates that the entry was written out and is still present in the
|
||||
* L2P table.
|
||||
*/
|
||||
bool valid;
|
||||
/* Lock that protects the entry from being evicted from the L2P */
|
||||
pthread_spinlock_t lock;
|
||||
};
|
||||
|
||||
struct ftl_io_channel {
|
||||
/* Device */
|
||||
struct spdk_ftl_dev *dev;
|
||||
@ -143,6 +175,16 @@ struct ftl_io_channel {
|
||||
TAILQ_HEAD(, ftl_io) write_cmpl_queue;
|
||||
TAILQ_HEAD(, ftl_io) retry_queue;
|
||||
TAILQ_ENTRY(ftl_io_channel) tailq;
|
||||
|
||||
/* Array of write buffer entries */
|
||||
struct ftl_wbuf_entry *wbuf_entries;
|
||||
/* Write buffer data payload */
|
||||
void *wbuf_payload;
|
||||
/* Number of write buffer entries */
|
||||
uint32_t num_entries;
|
||||
/* Write buffer queues */
|
||||
struct spdk_ring *free_queue;
|
||||
struct spdk_ring *submit_queue;
|
||||
};
|
||||
|
||||
/* General IO descriptor */
|
||||
|
Loading…
Reference in New Issue
Block a user