From 310f324e3846950532387a92345022b3e3cc97a3 Mon Sep 17 00:00:00 2001 From: GangCao Date: Thu, 28 Dec 2017 04:25:15 -0500 Subject: [PATCH] bdev/qos: add the support to reset QoS bdev channel As there is no I/O channel associated with the QoS bdev channel, add the specific functions to handle the reset operation on the QoS bdev channel. The reset operation will be conducted on the QoS thread. Related UT code on QoS with reset operation was also included. Change-Id: Ibba68ddb132fa926fec6327829157b43ac806713 Signed-off-by: GangCao Reviewed-on: https://review.gerrithub.io/393181 Tested-by: SPDK Automated Test System Reviewed-by: Daniel Verkamp Reviewed-by: Jim Harris Reviewed-by: Shuhei Matsumoto --- lib/bdev/bdev.c | 44 ++++++++++ test/unit/lib/bdev/mt/bdev.c/bdev_ut.c | 114 +++++++++++++++++++++++++ 2 files changed, 158 insertions(+) diff --git a/lib/bdev/bdev.c b/lib/bdev/bdev.c index e97beb9f5..de441f776 100644 --- a/lib/bdev/bdev.c +++ b/lib/bdev/bdev.c @@ -1881,6 +1881,27 @@ _spdk_bdev_reset_freeze_channel(struct spdk_io_channel_iter *i) spdk_for_each_channel_continue(i, 0); } +static void +_spdk_bdev_reset_freeze_qos_channel(void *ctx) +{ + struct spdk_bdev *bdev = ctx; + struct spdk_bdev_mgmt_channel *mgmt_channel = NULL; + struct spdk_bdev_channel *qos_channel = bdev->qos_channel; + struct spdk_bdev_module_channel *shared_ch = NULL; + + if (qos_channel) { + shared_ch = qos_channel->module_ch; + mgmt_channel = spdk_io_channel_get_ctx(qos_channel->mgmt_channel); + + qos_channel->flags |= BDEV_CH_RESET_IN_PROGRESS; + + _spdk_bdev_abort_queued_io(&shared_ch->nomem_io, qos_channel); + _spdk_bdev_abort_queued_io(&qos_channel->qos_io, qos_channel); + _spdk_bdev_abort_buf_io(&mgmt_channel->need_buf_small, qos_channel); + _spdk_bdev_abort_buf_io(&mgmt_channel->need_buf_large, qos_channel); + } +} + static void _spdk_bdev_start_reset(void *ctx) { @@ -1938,6 +1959,12 @@ spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, _spdk_bdev_channel_start_reset(channel); + /* Explicitly handle the QoS bdev channel as no IO channel associated */ + if (bdev->qos_thread) { + spdk_thread_send_msg(bdev->qos_thread, + _spdk_bdev_reset_freeze_qos_channel, bdev); + } + return 0; } @@ -2149,6 +2176,17 @@ _spdk_bdev_io_complete(void *ctx) bdev_io->caller_ctx); } +static void +_spdk_bdev_unfreeze_qos_channel(void *ctx) +{ + struct spdk_bdev *bdev = ctx; + + if (bdev->qos_channel) { + bdev->qos_channel->flags &= ~BDEV_CH_RESET_IN_PROGRESS; + assert(TAILQ_EMPTY(&bdev->qos_channel->queued_resets)); + } +} + static void _spdk_bdev_reset_complete(struct spdk_io_channel_iter *i, int status) { @@ -2199,6 +2237,12 @@ spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status sta pthread_mutex_unlock(&bdev->mutex); if (unlock_channels) { + /* Explicitly handle the QoS bdev channel as no IO channel associated */ + if (bdev->qos_thread) { + spdk_thread_send_msg(bdev->qos_thread, + _spdk_bdev_unfreeze_qos_channel, bdev); + } + spdk_for_each_channel(__bdev_to_io_dev(bdev), _spdk_bdev_unfreeze_channel, bdev_io, _spdk_bdev_reset_complete); return; diff --git a/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c b/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c index e4dcf4bc7..b9564c2f1 100644 --- a/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c +++ b/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c @@ -919,6 +919,119 @@ io_during_qos_queue(void) teardown_test(); } +static void +io_during_qos_reset(void) +{ + struct spdk_io_channel *io_ch[3]; + struct spdk_bdev_channel *bdev_ch[3], *qos_bdev_ch; + struct spdk_bdev *bdev; + enum spdk_bdev_io_status status0, status1, status_reset; + struct spdk_bdev_module_channel *module_ch; + int rc; + + setup_test(); + + /* + * First test normal case - submit an I/O on each of two channels (QoS disabled and no reset) + * and verify they complete successfully. + */ + set_thread(0); + io_ch[0] = spdk_bdev_get_io_channel(g_desc); + bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); + status0 = SPDK_BDEV_IO_STATUS_PENDING; + rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0); + CU_ASSERT(rc == 0); + CU_ASSERT(bdev_ch[0]->flags == 0); + + set_thread(1); + io_ch[1] = spdk_bdev_get_io_channel(g_desc); + bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); + status1 = SPDK_BDEV_IO_STATUS_PENDING; + rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1); + CU_ASSERT(rc == 0); + CU_ASSERT(bdev_ch[1]->flags == 0); + + poll_threads(); + CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING); + CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING); + + set_thread(0); + stub_complete_io(g_bdev.io_target, 0); + CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS); + + set_thread(1); + stub_complete_io(g_bdev.io_target, 0); + CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS); + + /* + * Enable QoS on the bdev + */ + set_thread(2); + bdev = bdev_ch[0]->bdev; + bdev->ios_per_sec = 2000; + io_ch[2] = spdk_bdev_get_io_channel(g_desc); + bdev_ch[2] = spdk_io_channel_get_ctx(io_ch[2]); + qos_bdev_ch = bdev->qos_channel; + module_ch = qos_bdev_ch->module_ch; + CU_ASSERT(bdev->qos_channel->flags == BDEV_CH_QOS_ENABLED); + CU_ASSERT(qos_bdev_ch != NULL); + CU_ASSERT(module_ch != NULL); + + /* + * Now submit a reset, and leave it pending while we submit I/O on two different + * channels. These I/O should be failed by the bdev layer since the reset is in + * progress. + */ + set_thread(0); + status_reset = SPDK_BDEV_IO_STATUS_PENDING; + rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &status_reset); + CU_ASSERT(rc == 0); + + CU_ASSERT(bdev_ch[0]->flags == 0); + CU_ASSERT(bdev_ch[1]->flags == 0); + CU_ASSERT(bdev_ch[2]->flags == 0); + CU_ASSERT(qos_bdev_ch->flags & BDEV_CH_QOS_ENABLED); + poll_threads(); + CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_RESET_IN_PROGRESS); + CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_RESET_IN_PROGRESS); + CU_ASSERT(bdev_ch[2]->flags == BDEV_CH_RESET_IN_PROGRESS); + CU_ASSERT(qos_bdev_ch->flags & BDEV_CH_RESET_IN_PROGRESS); + + set_thread(0); + status0 = SPDK_BDEV_IO_STATUS_PENDING; + rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0); + CU_ASSERT(rc == 0); + + set_thread(1); + status1 = SPDK_BDEV_IO_STATUS_PENDING; + rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1); + CU_ASSERT(rc == 0); + + /* + * A reset is in progress so these read I/O should complete with failure when QoS has been + * enabled. Note that we need to poll_threads() since I/O completed inline have their + * completion deferred. + */ + poll_threads(); + CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING); + CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_FAILED); + CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_FAILED); + + set_thread(0); + stub_complete_io(g_bdev.io_target, 0); + spdk_put_io_channel(io_ch[0]); + set_thread(1); + stub_complete_io(g_bdev.io_target, 0); + spdk_put_io_channel(io_ch[1]); + set_thread(2); + stub_complete_io(g_bdev.io_target, 0); + spdk_put_io_channel(io_ch[2]); + poll_threads(); + CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS); + + teardown_test(); +} + static void enomem_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) { @@ -1121,6 +1234,7 @@ main(int argc, char **argv) CU_add_test(suite, "io_during_reset", io_during_reset) == NULL || CU_add_test(suite, "io_during_qos", io_during_qos) == NULL || CU_add_test(suite, "io_during_qos_queue", io_during_qos_queue) == NULL || + CU_add_test(suite, "io_during_qos_reset", io_during_qos_reset) == NULL || CU_add_test(suite, "enomem", enomem) == NULL || CU_add_test(suite, "enomem_multi_bdev", enomem_multi_bdev) == NULL ) {