bdev: Add mutex and reference counter to block device descriptor

This patch adds reference counter and gurading mutex to bdev_desc
structure to keep track of in-flight messages and avoid release of
descriptor until all of messages has been processed. With reference
counter existing remove_scheduled field in descriptor structure is not
needed anymore.

Signed-off-by: Evgeniy Kochetov <evgeniik@mellanox.com>
Signed-off-by: Sasha Kotchubievsky <sashakot@mellanox.com>
Signed-off-by: Alexey Marchuk <alexeymar@mellanox.com>
Change-Id: I97f78955362b04131abf202ba04e6d60343f3faf
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/469620
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
This commit is contained in:
Evgeniy Kochetov 2019-09-26 13:46:01 +00:00 committed by Jim Harris
parent 3fb65d7b9d
commit ba00bff251

View File

@ -1,8 +1,8 @@
/*- /*-
* BSD LICENSE * BSD LICENSE
* *
* Copyright (c) Intel Corporation. * Copyright (c) Intel Corporation. All rights reserved.
* All rights reserved. * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions * modification, are permitted provided that the following conditions
@ -283,9 +283,10 @@ struct spdk_bdev_desc {
}; };
void *ctx; void *ctx;
} callback; } callback;
bool remove_scheduled;
bool closed; bool closed;
bool write; bool write;
pthread_mutex_t mutex;
uint32_t refs;
TAILQ_ENTRY(spdk_bdev_desc) link; TAILQ_ENTRY(spdk_bdev_desc) link;
}; };
@ -2661,6 +2662,13 @@ spdk_bdev_set_qd_sampling_period(struct spdk_bdev *bdev, uint64_t period)
} }
} }
static void
_spdk_bdev_desc_free(struct spdk_bdev_desc *desc)
{
pthread_mutex_destroy(&desc->mutex);
free(desc);
}
int int
spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size) spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
{ {
@ -4214,17 +4222,27 @@ _remove_notify(void *arg)
{ {
struct spdk_bdev_desc *desc = arg; struct spdk_bdev_desc *desc = arg;
desc->remove_scheduled = false; pthread_mutex_lock(&desc->mutex);
desc->refs--;
if (desc->closed) { if (!desc->closed) {
free(desc); pthread_mutex_unlock(&desc->mutex);
} else {
if (desc->callback.open_with_ext) { if (desc->callback.open_with_ext) {
desc->callback.event_fn(SPDK_BDEV_EVENT_REMOVE, desc->bdev, desc->callback.ctx); desc->callback.event_fn(SPDK_BDEV_EVENT_REMOVE, desc->bdev, desc->callback.ctx);
} else { } else {
desc->callback.remove_fn(desc->callback.ctx); desc->callback.remove_fn(desc->callback.ctx);
} }
return;
} else if (0 == desc->refs) {
/* This descriptor was closed after this remove_notify message was sent.
* spdk_bdev_close() could not free the descriptor since this message was
* in flight, so we free it now using _spdk_bdev_desc_free().
*/
pthread_mutex_unlock(&desc->mutex);
_spdk_bdev_desc_free(desc);
return;
} }
pthread_mutex_unlock(&desc->mutex);
} }
/* Must be called while holding bdev->internal.mutex. /* Must be called while holding bdev->internal.mutex.
@ -4239,17 +4257,16 @@ spdk_bdev_unregister_unsafe(struct spdk_bdev *bdev)
/* Notify each descriptor about hotremoval */ /* Notify each descriptor about hotremoval */
TAILQ_FOREACH_SAFE(desc, &bdev->internal.open_descs, link, tmp) { TAILQ_FOREACH_SAFE(desc, &bdev->internal.open_descs, link, tmp) {
rc = -EBUSY; rc = -EBUSY;
pthread_mutex_lock(&desc->mutex);
/* /*
* Defer invocation of the event_cb to a separate message that will * Defer invocation of the event_cb to a separate message that will
* run later on its thread. This ensures this context unwinds and * run later on its thread. This ensures this context unwinds and
* we don't recursively unregister this bdev again if the event_cb * we don't recursively unregister this bdev again if the event_cb
* immediately closes its descriptor. * immediately closes its descriptor.
*/ */
if (!desc->remove_scheduled) { desc->refs++;
/* Avoid scheduling removal of the same descriptor multiple times. */ spdk_thread_send_msg(desc->thread, _remove_notify, desc);
desc->remove_scheduled = true; pthread_mutex_unlock(&desc->mutex);
spdk_thread_send_msg(desc->thread, _remove_notify, desc);
}
} }
/* If there are no descriptors, proceed removing the bdev */ /* If there are no descriptors, proceed removing the bdev */
@ -4383,12 +4400,13 @@ spdk_bdev_open(struct spdk_bdev *bdev, bool write, spdk_bdev_remove_cb_t remove_
desc->callback.open_with_ext = false; desc->callback.open_with_ext = false;
desc->callback.remove_fn = remove_cb; desc->callback.remove_fn = remove_cb;
desc->callback.ctx = remove_ctx; desc->callback.ctx = remove_ctx;
pthread_mutex_init(&desc->mutex, NULL);
pthread_mutex_lock(&g_bdev_mgr.mutex); pthread_mutex_lock(&g_bdev_mgr.mutex);
rc = _spdk_bdev_open(bdev, write, desc); rc = _spdk_bdev_open(bdev, write, desc);
if (rc != 0) { if (rc != 0) {
free(desc); _spdk_bdev_desc_free(desc);
desc = NULL; desc = NULL;
} }
@ -4432,10 +4450,11 @@ spdk_bdev_open_ext(const char *bdev_name, bool write, spdk_bdev_event_cb_t event
desc->callback.open_with_ext = true; desc->callback.open_with_ext = true;
desc->callback.event_fn = event_cb; desc->callback.event_fn = event_cb;
desc->callback.ctx = event_ctx; desc->callback.ctx = event_ctx;
pthread_mutex_init(&desc->mutex, NULL);
rc = _spdk_bdev_open(bdev, write, desc); rc = _spdk_bdev_open(bdev, write, desc);
if (rc != 0) { if (rc != 0) {
free(desc); _spdk_bdev_desc_free(desc);
desc = NULL; desc = NULL;
} }
@ -4458,13 +4477,17 @@ spdk_bdev_close(struct spdk_bdev_desc *desc)
assert(desc->thread == spdk_get_thread()); assert(desc->thread == spdk_get_thread());
pthread_mutex_lock(&bdev->internal.mutex); pthread_mutex_lock(&bdev->internal.mutex);
pthread_mutex_lock(&desc->mutex);
TAILQ_REMOVE(&bdev->internal.open_descs, desc, link); TAILQ_REMOVE(&bdev->internal.open_descs, desc, link);
desc->closed = true; desc->closed = true;
if (!desc->remove_scheduled) { if (0 == desc->refs) {
free(desc); pthread_mutex_unlock(&desc->mutex);
_spdk_bdev_desc_free(desc);
} else {
pthread_mutex_unlock(&desc->mutex);
} }
/* If no more descriptors, kill QoS channel */ /* If no more descriptors, kill QoS channel */