diff --git a/lib/ftl/ftl_core.c b/lib/ftl/ftl_core.c index 3c9de5ba3..7137f3c2f 100644 --- a/lib/ftl/ftl_core.c +++ b/lib/ftl/ftl_core.c @@ -219,6 +219,12 @@ ftl_get_next_batch(struct spdk_ftl_dev *dev) uint64_t *metadata; if (batch == NULL) { + batch = TAILQ_FIRST(&dev->pending_batches); + if (batch != NULL) { + TAILQ_REMOVE(&dev->pending_batches, batch, tailq); + return batch; + } + batch = TAILQ_FIRST(&dev->free_batches); if (spdk_unlikely(batch == NULL)) { return NULL; diff --git a/lib/ftl/ftl_core.h b/lib/ftl/ftl_core.h index e02689dae..41cc0bcc3 100644 --- a/lib/ftl/ftl_core.h +++ b/lib/ftl/ftl_core.h @@ -234,6 +234,10 @@ struct spdk_ftl_dev { struct iovec *iov_buf; /* Batch currently being filled */ struct ftl_batch *current_batch; + /* Full and ready to be sent batches. A batch is put on this queue in + * case it's already filled, but cannot be sent. + */ + TAILQ_HEAD(, ftl_batch) pending_batches; TAILQ_HEAD(, ftl_batch) free_batches; /* Devices' list */ diff --git a/lib/ftl/ftl_init.c b/lib/ftl/ftl_init.c index e5e9237eb..c6407f9fb 100644 --- a/lib/ftl/ftl_init.c +++ b/lib/ftl/ftl_init.c @@ -1225,6 +1225,7 @@ ftl_dev_init_io_channel(struct spdk_ftl_dev *dev) } TAILQ_INIT(&dev->free_batches); + TAILQ_INIT(&dev->pending_batches); TAILQ_INIT(&dev->ioch_queue); for (i = 0; i < FTL_BATCH_COUNT; ++i) { diff --git a/test/unit/lib/ftl/ftl_io.c/ftl_io_ut.c b/test/unit/lib/ftl/ftl_io.c/ftl_io_ut.c index b5a3847b7..c9623ff24 100644 --- a/test/unit/lib/ftl/ftl_io.c/ftl_io_ut.c +++ b/test/unit/lib/ftl/ftl_io.c/ftl_io_ut.c @@ -807,7 +807,7 @@ test_submit_batch(void) struct spdk_io_channel **_ioch_array; struct ftl_io_channel **ioch_array; struct ftl_wbuf_entry *entry; - struct ftl_batch *batch; + struct ftl_batch *batch, *batch2; uint32_t num_io_channels = 16; uint32_t ioch_idx, tmp_idx, entry_idx; uint64_t ioch_bitmap; @@ -906,6 +906,41 @@ test_submit_batch(void) ioch_array[ioch_idx]->num_entries); } + /* Make sure pending batches are prioritized */ + for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) { + set_thread(ioch_idx); + + while (spdk_ring_count(ioch_array[ioch_idx]->submit_queue) < dev->xfer_size) { + entry = ftl_acquire_wbuf_entry(ioch_array[ioch_idx], 0); + SPDK_CU_ASSERT_FATAL(entry != NULL); + num_entries = spdk_ring_enqueue(ioch_array[ioch_idx]->submit_queue, + (void **)&entry, 1, NULL); + CU_ASSERT(num_entries == 1); + } + } + + batch = ftl_get_next_batch(dev); + SPDK_CU_ASSERT_FATAL(batch != NULL); + + TAILQ_INSERT_TAIL(&dev->pending_batches, batch, tailq); + batch2 = ftl_get_next_batch(dev); + SPDK_CU_ASSERT_FATAL(batch2 != NULL); + + CU_ASSERT(TAILQ_EMPTY(&dev->pending_batches)); + CU_ASSERT(batch == batch2); + + batch = ftl_get_next_batch(dev); + SPDK_CU_ASSERT_FATAL(batch != NULL); + + ftl_release_batch(dev, batch); + ftl_release_batch(dev, batch2); + + for (ioch_idx = 2; ioch_idx < num_io_channels; ++ioch_idx) { + batch = ftl_get_next_batch(dev); + SPDK_CU_ASSERT_FATAL(batch != NULL); + ftl_release_batch(dev, batch); + } + for (ioch_idx = 0; ioch_idx < num_io_channels; ++ioch_idx) { set_thread(ioch_idx); spdk_put_io_channel(_ioch_array[ioch_idx]);