From 2a2b7296ee6b38eadd66331656e794b8dd186b1b Mon Sep 17 00:00:00 2001 From: Jim Harris Date: Thu, 19 Dec 2019 08:37:04 -0700 Subject: [PATCH] bdev: do not allow overlapped locked ranges We can't allow overlapped locked ranges - otherwise two different channels could be deadlocked. So add a pending_locked_ranges to the bdev. When we start a lock operation, check if the new range overlaps one that's already locked. If so, put it on the pending list. When an unlock operation completes, we will check if any pending ranges can now be locked. Signed-off-by: Jim Harris Change-Id: I2e3113216a195887b954533495ff200df14fadc1 Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/478537 Tested-by: SPDK CI Jenkins Community-CI: Broadcom SPDK FC-NVMe CI Reviewed-by: Ben Walker Reviewed-by: Shuhei Matsumoto Reviewed-by: Paul Luse --- include/spdk/bdev_module.h | 5 + lib/bdev/bdev.c | 47 ++++++++- test/unit/lib/bdev/bdev.c/bdev_ut.c | 142 +++++++++++++++++++++++++++- 3 files changed, 191 insertions(+), 3 deletions(-) diff --git a/include/spdk/bdev_module.h b/include/spdk/bdev_module.h index d2757be23..8f84f0c08 100644 --- a/include/spdk/bdev_module.h +++ b/include/spdk/bdev_module.h @@ -433,6 +433,11 @@ struct spdk_bdev { /** Currently locked ranges for this bdev. Used to populate new channels. */ lba_range_tailq_t locked_ranges; + + /** Pending locked ranges for this bdev. These ranges are not currently + * locked due to overlapping with another locked range. + */ + lba_range_tailq_t pending_locked_ranges; } internal; }; diff --git a/lib/bdev/bdev.c b/lib/bdev/bdev.c index e7e0c1e3e..82d883e80 100644 --- a/lib/bdev/bdev.c +++ b/lib/bdev/bdev.c @@ -4633,6 +4633,7 @@ bdev_init(struct spdk_bdev *bdev) TAILQ_INIT(&bdev->internal.open_descs); TAILQ_INIT(&bdev->internal.locked_ranges); + TAILQ_INIT(&bdev->internal.pending_locked_ranges); TAILQ_INIT(&bdev->aliases); @@ -5869,6 +5870,19 @@ bdev_lock_lba_range_ctx(struct spdk_bdev *bdev, struct locked_lba_range_ctx *ctx bdev_lock_lba_range_cb); } +static bool +bdev_lba_range_overlaps_tailq(struct lba_range *range, lba_range_tailq_t *tailq) +{ + struct lba_range *r; + + TAILQ_FOREACH(r, tailq, tailq) { + if (bdev_lba_range_overlapped(range, r)) { + return true; + } + } + return false; +} + int bdev_lock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch, uint64_t offset, uint64_t length, @@ -5902,8 +5916,16 @@ bdev_lock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch, ctx->cb_arg = cb_arg; pthread_mutex_lock(&bdev->internal.mutex); - TAILQ_INSERT_TAIL(&bdev->internal.locked_ranges, &ctx->range, tailq); - bdev_lock_lba_range_ctx(bdev, ctx); + if (bdev_lba_range_overlaps_tailq(&ctx->range, &bdev->internal.locked_ranges)) { + /* There is an active lock overlapping with this range. + * Put it on the pending list until this range no + * longer overlaps with another. + */ + TAILQ_INSERT_TAIL(&bdev->internal.pending_locked_ranges, &ctx->range, tailq); + } else { + TAILQ_INSERT_TAIL(&bdev->internal.locked_ranges, &ctx->range, tailq); + bdev_lock_lba_range_ctx(bdev, ctx); + } pthread_mutex_unlock(&bdev->internal.mutex); return 0; } @@ -5912,6 +5934,27 @@ static void bdev_unlock_lba_range_cb(struct spdk_io_channel_iter *i, int status) { struct locked_lba_range_ctx *ctx = spdk_io_channel_iter_get_ctx(i); + struct locked_lba_range_ctx *pending_ctx; + struct spdk_bdev_channel *ch = ctx->range.owner_ch; + struct spdk_bdev *bdev = ch->bdev; + struct lba_range *range, *tmp; + + pthread_mutex_lock(&bdev->internal.mutex); + /* Check if there are any pending locked ranges that overlap with this range + * that was just unlocked. If there are, check that it doesn't overlap with any + * other locked ranges before calling bdev_lock_lba_range_ctx which will start + * the lock process. + */ + TAILQ_FOREACH_SAFE(range, &bdev->internal.pending_locked_ranges, tailq, tmp) { + if (bdev_lba_range_overlapped(range, &ctx->range) && + !bdev_lba_range_overlaps_tailq(range, &bdev->internal.locked_ranges)) { + TAILQ_REMOVE(&bdev->internal.pending_locked_ranges, range, tailq); + pending_ctx = SPDK_CONTAINEROF(range, struct locked_lba_range_ctx, range); + TAILQ_INSERT_TAIL(&bdev->internal.locked_ranges, range, tailq); + bdev_lock_lba_range_ctx(bdev, pending_ctx); + } + } + pthread_mutex_unlock(&bdev->internal.mutex); ctx->cb_fn(ctx->cb_arg, status); free(ctx); diff --git a/test/unit/lib/bdev/bdev.c/bdev_ut.c b/test/unit/lib/bdev/bdev.c/bdev_ut.c index 06b66fad7..6191113ad 100644 --- a/test/unit/lib/bdev/bdev.c/bdev_ut.c +++ b/test/unit/lib/bdev/bdev.c/bdev_ut.c @@ -2751,6 +2751,144 @@ lock_lba_range_with_io_outstanding(void) poll_threads(); } +static void +lock_lba_range_overlapped(void) +{ + struct spdk_bdev *bdev; + struct spdk_bdev_desc *desc = NULL; + struct spdk_io_channel *io_ch; + struct spdk_bdev_channel *channel; + struct lba_range *range; + int ctx1; + int rc; + + spdk_bdev_initialize(bdev_init_cb, NULL); + + bdev = allocate_bdev("bdev0"); + + rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc); + CU_ASSERT(rc == 0); + CU_ASSERT(desc != NULL); + io_ch = spdk_bdev_get_io_channel(desc); + CU_ASSERT(io_ch != NULL); + channel = spdk_io_channel_get_ctx(io_ch); + + /* Lock range 20-29. */ + g_lock_lba_range_done = false; + rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); + CU_ASSERT(rc == 0); + poll_threads(); + + CU_ASSERT(g_lock_lba_range_done == true); + range = TAILQ_FIRST(&channel->locked_ranges); + SPDK_CU_ASSERT_FATAL(range != NULL); + CU_ASSERT(range->offset == 20); + CU_ASSERT(range->length == 10); + + /* Try to lock range 25-39. It should not lock immediately, since it overlaps with + * 20-29. + */ + g_lock_lba_range_done = false; + rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1); + CU_ASSERT(rc == 0); + poll_threads(); + + CU_ASSERT(g_lock_lba_range_done == false); + range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); + SPDK_CU_ASSERT_FATAL(range != NULL); + CU_ASSERT(range->offset == 25); + CU_ASSERT(range->length == 15); + + /* Unlock 20-29. This should result in range 25-39 now getting locked since it + * no longer overlaps with an active lock. + */ + g_unlock_lba_range_done = false; + rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); + CU_ASSERT(rc == 0); + poll_threads(); + + CU_ASSERT(g_unlock_lba_range_done == true); + CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); + range = TAILQ_FIRST(&channel->locked_ranges); + SPDK_CU_ASSERT_FATAL(range != NULL); + CU_ASSERT(range->offset == 25); + CU_ASSERT(range->length == 15); + + /* Lock 40-59. This should immediately lock since it does not overlap with the + * currently active 25-39 lock. + */ + g_lock_lba_range_done = false; + rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1); + CU_ASSERT(rc == 0); + poll_threads(); + + CU_ASSERT(g_lock_lba_range_done == true); + range = TAILQ_FIRST(&bdev->internal.locked_ranges); + SPDK_CU_ASSERT_FATAL(range != NULL); + range = TAILQ_NEXT(range, tailq); + SPDK_CU_ASSERT_FATAL(range != NULL); + CU_ASSERT(range->offset == 40); + CU_ASSERT(range->length == 20); + + /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */ + g_lock_lba_range_done = false; + rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1); + CU_ASSERT(rc == 0); + poll_threads(); + + CU_ASSERT(g_lock_lba_range_done == false); + range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); + SPDK_CU_ASSERT_FATAL(range != NULL); + CU_ASSERT(range->offset == 35); + CU_ASSERT(range->length == 10); + + /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since + * the 40-59 lock is still active. + */ + g_unlock_lba_range_done = false; + rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1); + CU_ASSERT(rc == 0); + poll_threads(); + + CU_ASSERT(g_unlock_lba_range_done == true); + CU_ASSERT(g_lock_lba_range_done == false); + range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); + SPDK_CU_ASSERT_FATAL(range != NULL); + CU_ASSERT(range->offset == 35); + CU_ASSERT(range->length == 10); + + /* Unlock 40-59. This should result in 35-44 now getting locked, since there are + * no longer any active overlapping locks. + */ + g_unlock_lba_range_done = false; + rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1); + CU_ASSERT(rc == 0); + poll_threads(); + + CU_ASSERT(g_unlock_lba_range_done == true); + CU_ASSERT(g_lock_lba_range_done == true); + CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); + range = TAILQ_FIRST(&bdev->internal.locked_ranges); + SPDK_CU_ASSERT_FATAL(range != NULL); + CU_ASSERT(range->offset == 35); + CU_ASSERT(range->length == 10); + + /* Finally, unlock 35-44. */ + g_unlock_lba_range_done = false; + rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1); + CU_ASSERT(rc == 0); + poll_threads(); + + CU_ASSERT(g_unlock_lba_range_done == true); + CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges)); + + spdk_put_io_channel(io_ch); + spdk_bdev_close(desc); + free_bdev(bdev); + spdk_bdev_finish(bdev_fini_cb, NULL); + poll_threads(); +} + int main(int argc, char **argv) { @@ -2789,7 +2927,9 @@ main(int argc, char **argv) CU_add_test(suite, "bdev_set_io_timeout", bdev_set_io_timeout) == NULL || CU_add_test(suite, "lba_range_overlap", lba_range_overlap) == NULL || CU_add_test(suite, "lock_lba_range_check_ranges", lock_lba_range_check_ranges) == NULL || - CU_add_test(suite, "lock_lba_range_with_io_outstanding", lock_lba_range_with_io_outstanding) == NULL + CU_add_test(suite, "lock_lba_range_with_io_outstanding", + lock_lba_range_with_io_outstanding) == NULL || + CU_add_test(suite, "lock_lba_range_overlapped", lock_lba_range_overlapped) == NULL ) { CU_cleanup_registry(); return CU_get_error();