bdev: inherit locked ranges for new channels

Keep a mutex protected list of the active locked ranges
in the bdev itself.  This is only accessed when a new
channel is created, so that it can be populated with
the currently locked ranges.

Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: Id68311b46ad4983b6bc9b0e1a8664d121a7e9f8e

Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/477871
Reviewed-by: Paul Luse <paul.e.luse@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Community-CI: Broadcom SPDK FC-NVMe CI <spdk-ci.pdl@broadcom.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Jim Harris 2019-12-11 06:31:08 -07:00 committed by Tomasz Zawadzki
parent b90b7ce477
commit ebd1a4f76c
3 changed files with 96 additions and 11 deletions

View File

@ -244,6 +244,7 @@ struct spdk_bdev_alias {
typedef TAILQ_HEAD(, spdk_bdev_io) bdev_io_tailq_t;
typedef STAILQ_HEAD(, spdk_bdev_io) bdev_io_stailq_t;
typedef TAILQ_HEAD(, lba_range) lba_range_tailq_t;
struct spdk_bdev {
/** User context passed in by the backend */
@ -429,6 +430,9 @@ struct spdk_bdev {
/** histogram enabled on this bdev */
bool histogram_enabled;
bool histogram_in_progress;
/** Currently locked ranges for this bdev. Used to populate new channels. */
lba_range_tailq_t locked_ranges;
} internal;
};

View File

@ -134,8 +134,6 @@ struct lba_range {
TAILQ_ENTRY(lba_range) tailq;
};
typedef TAILQ_HEAD(, lba_range) lba_range_tailq_t;
static struct spdk_bdev_opts g_bdev_opts = {
.bdev_io_pool_size = SPDK_BDEV_IO_POOL_SIZE,
.bdev_io_cache_size = SPDK_BDEV_IO_CACHE_SIZE,
@ -2375,6 +2373,7 @@ bdev_channel_create(void *io_device, void *ctx_buf)
struct spdk_io_channel *mgmt_io_ch;
struct spdk_bdev_mgmt_channel *mgmt_ch;
struct spdk_bdev_shared_resource *shared_resource;
struct lba_range *range;
ch->bdev = bdev;
ch->channel = bdev->fn_table->get_io_channel(bdev->ctxt);
@ -2452,6 +2451,21 @@ bdev_channel_create(void *io_device, void *ctx_buf)
pthread_mutex_lock(&bdev->internal.mutex);
bdev_enable_qos(bdev, ch);
TAILQ_FOREACH(range, &bdev->internal.locked_ranges, tailq) {
struct lba_range *new_range;
new_range = calloc(1, sizeof(*new_range));
if (new_range == NULL) {
pthread_mutex_unlock(&bdev->internal.mutex);
bdev_channel_destroy_resource(ch);
return -1;
}
new_range->length = range->length;
new_range->offset = range->offset;
TAILQ_INSERT_TAIL(&ch->locked_ranges, new_range, tailq);
}
pthread_mutex_unlock(&bdev->internal.mutex);
return 0;
@ -4618,6 +4632,7 @@ bdev_init(struct spdk_bdev *bdev)
}
TAILQ_INIT(&bdev->internal.open_descs);
TAILQ_INIT(&bdev->internal.locked_ranges);
TAILQ_INIT(&bdev->aliases);
@ -5769,7 +5784,11 @@ bdev_lock_lba_range_cb(struct spdk_io_channel_iter *i, int status)
*/
ctx->owner_range->owner_ch = ctx->range.owner_ch;
ctx->cb_fn(ctx->cb_arg, status);
free(ctx);
/* Don't free the ctx here. Its range is in the bdev's global list of
* locked ranges still, and will be removed and freed when this range
* is later unlocked.
*/
}
static int
@ -5807,6 +5826,20 @@ bdev_lock_lba_range_get_channel(struct spdk_io_channel_iter *i)
struct locked_lba_range_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
struct lba_range *range;
TAILQ_FOREACH(range, &ch->locked_ranges, tailq) {
if (range->length == ctx->range.length && range->offset == ctx->range.offset) {
/* This range already exists on this channel, so don't add
* it again. This can happen when a new channel is created
* while the for_each_channel operation is in progress.
* Do not check for outstanding I/O in that case, since the
* range was locked before any I/O could be submitted to the
* new channel.
*/
spdk_for_each_channel_continue(i, 0);
return;
}
}
range = calloc(1, sizeof(*range));
if (range == NULL) {
spdk_for_each_channel_continue(i, -ENOMEM);
@ -5868,7 +5901,10 @@ bdev_lock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch,
ctx->cb_fn = cb_fn;
ctx->cb_arg = cb_arg;
pthread_mutex_lock(&bdev->internal.mutex);
TAILQ_INSERT_TAIL(&bdev->internal.locked_ranges, &ctx->range, tailq);
bdev_lock_lba_range_ctx(bdev, ctx);
pthread_mutex_unlock(&bdev->internal.mutex);
return 0;
}
@ -5956,15 +5992,28 @@ bdev_unlock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch,
return -EINVAL;
}
ctx = calloc(1, sizeof(*ctx));
if (ctx == NULL) {
return -ENOMEM;
pthread_mutex_lock(&bdev->internal.mutex);
/* We confirmed that this channel has locked the specified range. To
* start the unlock the process, we find the range in the bdev's locked_ranges
* and remove it. This ensures new channels don't inherit the locked range.
* Then we will send a message to each channel (including the one specified
* here) to remove the range from its per-channel list.
*/
TAILQ_FOREACH(range, &bdev->internal.locked_ranges, tailq) {
if (range->offset == offset && range->length == length &&
range->locked_ctx == cb_arg) {
break;
}
}
if (range == NULL) {
assert(false);
pthread_mutex_unlock(&bdev->internal.mutex);
return -EINVAL;
}
TAILQ_REMOVE(&bdev->internal.locked_ranges, range, tailq);
ctx = SPDK_CONTAINEROF(range, struct locked_lba_range_ctx, range);
pthread_mutex_unlock(&bdev->internal.mutex);
ctx->range.offset = offset;
ctx->range.length = length;
ctx->range.owner_ch = ch;
ctx->range.locked_ctx = cb_arg;
ctx->cb_fn = cb_fn;
ctx->cb_arg = cb_arg;

View File

@ -1724,9 +1724,17 @@ bdev_set_io_timeout_mt(void)
free_threads();
}
static bool g_io_done2;
static bool g_lock_lba_range_done;
static bool g_unlock_lba_range_done;
static void
io_done2(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
{
g_io_done2 = true;
spdk_bdev_free_io(bdev_io);
}
static void
lock_lba_range_done(void *ctx, int status)
{
@ -1761,7 +1769,7 @@ lock_lba_range_then_submit_io(void)
struct spdk_bdev_channel *bdev_ch[3];
struct lba_range *range;
char buf[4096];
int ctx0, ctx1;
int ctx0, ctx1, ctx2;
int rc;
setup_test();
@ -1835,6 +1843,23 @@ lock_lba_range_then_submit_io(void)
rc = bdev_unlock_lba_range(desc, io_ch[1], 20, 10, unlock_lba_range_done, &ctx1);
CU_ASSERT(rc == -EINVAL);
/* Now create a new channel and submit a write I/O with it. This should also be queued.
* The new channel should inherit the active locks from the bdev's internal list.
*/
set_thread(2);
io_ch[2] = spdk_bdev_get_io_channel(desc);
bdev_ch[2] = spdk_io_channel_get_ctx(io_ch[2]);
CU_ASSERT(io_ch[2] != NULL);
g_io_done2 = false;
CU_ASSERT(TAILQ_EMPTY(&bdev_ch[2]->io_locked));
rc = spdk_bdev_write_blocks(desc, io_ch[2], buf, 22, 2, io_done2, &ctx2);
CU_ASSERT(rc == 0);
poll_threads();
CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 0);
CU_ASSERT(!TAILQ_EMPTY(&bdev_ch[2]->io_locked));
CU_ASSERT(g_io_done2 == false);
set_thread(0);
rc = bdev_unlock_lba_range(desc, io_ch[0], 20, 10, unlock_lba_range_done, &ctx0);
CU_ASSERT(rc == 0);
@ -1843,19 +1868,26 @@ lock_lba_range_then_submit_io(void)
/* The LBA range is unlocked, so the write IOs should now have started execution. */
CU_ASSERT(TAILQ_EMPTY(&bdev_ch[1]->io_locked));
CU_ASSERT(TAILQ_EMPTY(&bdev_ch[2]->io_locked));
set_thread(1);
CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
stub_complete_io(io_target, 1);
set_thread(2);
CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
stub_complete_io(io_target, 1);
poll_threads();
CU_ASSERT(g_io_done == true);
CU_ASSERT(g_io_done2 == true);
/* Tear down the channels */
set_thread(0);
spdk_put_io_channel(io_ch[0]);
set_thread(1);
spdk_put_io_channel(io_ch[1]);
set_thread(2);
spdk_put_io_channel(io_ch[2]);
poll_threads();
set_thread(0);
teardown_test();