bdev: use spinlock instead of mutex

SPDK threads generally run on dedicated cores and locks should be rarely
contended. Thus, putting a thread to sleep while waiting on a mutex does
not free up CPU cycles for other pthreads or processes. Even when
running in interrupt mode, lock contention should be low enough that
spinlocks are a net win by avoiding context switches.

Signed-off-by: Mike Gerdts <mgerdts@nvidia.com>
Change-Id: I6e2e78b2835bbadb56bbec34918d998d75280dfd
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/15438
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Mike Gerdts 2022-11-10 21:22:06 -06:00 committed by Tomasz Zawadzki
parent 2be196c609
commit 8dbaca1300
3 changed files with 133 additions and 127 deletions

View File

@ -489,8 +489,8 @@ struct spdk_bdev {
/** True if the state of the QoS is being modified */ /** True if the state of the QoS is being modified */
bool qos_mod_in_progress; bool qos_mod_in_progress;
/** Mutex protecting claimed */ /** Spin lock protecting claimed */
pthread_mutex_t mutex; pthread_spinlock_t spinlock;
/** The bdev status */ /** The bdev status */
enum spdk_bdev_status status; enum spdk_bdev_status status;

View File

@ -93,7 +93,7 @@ struct spdk_bdev_mgr {
bool init_complete; bool init_complete;
bool module_init_complete; bool module_init_complete;
pthread_mutex_t mutex; pthread_spinlock_t spinlock;
#ifdef SPDK_CONFIG_VTUNE #ifdef SPDK_CONFIG_VTUNE
__itt_domain *domain; __itt_domain *domain;
@ -106,9 +106,15 @@ static struct spdk_bdev_mgr g_bdev_mgr = {
.bdev_names = RB_INITIALIZER(g_bdev_mgr.bdev_names), .bdev_names = RB_INITIALIZER(g_bdev_mgr.bdev_names),
.init_complete = false, .init_complete = false,
.module_init_complete = false, .module_init_complete = false,
.mutex = PTHREAD_MUTEX_INITIALIZER,
}; };
static void
__attribute__((constructor))
_bdev_init(void)
{
pthread_spin_init(&g_bdev_mgr.spinlock, PTHREAD_PROCESS_PRIVATE);
}
typedef void (*lock_range_cb)(void *ctx, int status); typedef void (*lock_range_cb)(void *ctx, int status);
typedef void (*bdev_copy_bounce_buffer_cpl)(void *ctx, int rc); typedef void (*bdev_copy_bounce_buffer_cpl)(void *ctx, int rc);
@ -301,7 +307,7 @@ struct spdk_bdev_desc {
bool closed; bool closed;
bool write; bool write;
bool memory_domains_supported; bool memory_domains_supported;
pthread_mutex_t mutex; pthread_spinlock_t spinlock;
uint32_t refs; uint32_t refs;
TAILQ_HEAD(, media_event_entry) pending_media_events; TAILQ_HEAD(, media_event_entry) pending_media_events;
TAILQ_HEAD(, media_event_entry) free_media_events; TAILQ_HEAD(, media_event_entry) free_media_events;
@ -480,9 +486,9 @@ spdk_bdev_get_by_name(const char *bdev_name)
{ {
struct spdk_bdev *bdev; struct spdk_bdev *bdev;
pthread_mutex_lock(&g_bdev_mgr.mutex); pthread_spin_lock(&g_bdev_mgr.spinlock);
bdev = bdev_get_by_name(bdev_name); bdev = bdev_get_by_name(bdev_name);
pthread_mutex_unlock(&g_bdev_mgr.mutex); pthread_spin_unlock(&g_bdev_mgr.spinlock);
return bdev; return bdev;
} }
@ -1377,7 +1383,7 @@ spdk_bdev_subsystem_config_json(struct spdk_json_write_ctx *w)
} }
} }
pthread_mutex_lock(&g_bdev_mgr.mutex); pthread_spin_lock(&g_bdev_mgr.spinlock);
TAILQ_FOREACH(bdev, &g_bdev_mgr.bdevs, internal.link) { TAILQ_FOREACH(bdev, &g_bdev_mgr.bdevs, internal.link) {
if (bdev->fn_table->write_config_json) { if (bdev->fn_table->write_config_json) {
@ -1387,7 +1393,7 @@ spdk_bdev_subsystem_config_json(struct spdk_json_write_ctx *w)
bdev_qos_config_json(bdev, w); bdev_qos_config_json(bdev, w);
} }
pthread_mutex_unlock(&g_bdev_mgr.mutex); pthread_spin_unlock(&g_bdev_mgr.spinlock);
/* This has to be last RPC in array to make sure all bdevs finished examine */ /* This has to be last RPC in array to make sure all bdevs finished examine */
spdk_json_write_object_begin(w); spdk_json_write_object_begin(w);
@ -3197,7 +3203,7 @@ bdev_channel_destroy_resource(struct spdk_bdev_channel *ch)
} }
} }
/* Caller must hold bdev->internal.mutex. */ /* Caller must hold bdev->internal.spinlock. */
static void static void
bdev_enable_qos(struct spdk_bdev *bdev, struct spdk_bdev_channel *ch) bdev_enable_qos(struct spdk_bdev *bdev, struct spdk_bdev_channel *ch)
{ {
@ -3259,7 +3265,7 @@ struct poll_timeout_ctx {
static void static void
bdev_desc_free(struct spdk_bdev_desc *desc) bdev_desc_free(struct spdk_bdev_desc *desc)
{ {
pthread_mutex_destroy(&desc->mutex); pthread_spin_destroy(&desc->spinlock);
free(desc->media_events_buffer); free(desc->media_events_buffer);
free(desc); free(desc);
} }
@ -3272,14 +3278,14 @@ bdev_channel_poll_timeout_io_done(struct spdk_bdev *bdev, void *_ctx, int status
free(ctx); free(ctx);
pthread_mutex_lock(&desc->mutex); pthread_spin_lock(&desc->spinlock);
desc->refs--; desc->refs--;
if (desc->closed == true && desc->refs == 0) { if (desc->closed == true && desc->refs == 0) {
pthread_mutex_unlock(&desc->mutex); pthread_spin_unlock(&desc->spinlock);
bdev_desc_free(desc); bdev_desc_free(desc);
return; return;
} }
pthread_mutex_unlock(&desc->mutex); pthread_spin_unlock(&desc->spinlock);
} }
static void static void
@ -3292,13 +3298,13 @@ bdev_channel_poll_timeout_io(struct spdk_bdev_channel_iter *i, struct spdk_bdev
struct spdk_bdev_io *bdev_io; struct spdk_bdev_io *bdev_io;
uint64_t now; uint64_t now;
pthread_mutex_lock(&desc->mutex); pthread_spin_lock(&desc->spinlock);
if (desc->closed == true) { if (desc->closed == true) {
pthread_mutex_unlock(&desc->mutex); pthread_spin_unlock(&desc->spinlock);
spdk_bdev_for_each_channel_continue(i, -1); spdk_bdev_for_each_channel_continue(i, -1);
return; return;
} }
pthread_mutex_unlock(&desc->mutex); pthread_spin_unlock(&desc->spinlock);
now = spdk_get_ticks(); now = spdk_get_ticks();
TAILQ_FOREACH(bdev_io, &bdev_ch->io_submitted, internal.ch_link) { TAILQ_FOREACH(bdev_io, &bdev_ch->io_submitted, internal.ch_link) {
@ -3344,9 +3350,9 @@ bdev_poll_timeout_io(void *arg)
/* Take a ref on the descriptor in case it gets closed while we are checking /* Take a ref on the descriptor in case it gets closed while we are checking
* all of the channels. * all of the channels.
*/ */
pthread_mutex_lock(&desc->mutex); pthread_spin_lock(&desc->spinlock);
desc->refs++; desc->refs++;
pthread_mutex_unlock(&desc->mutex); pthread_spin_unlock(&desc->spinlock);
spdk_bdev_for_each_channel(bdev, bdev_channel_poll_timeout_io, ctx, spdk_bdev_for_each_channel(bdev, bdev_channel_poll_timeout_io, ctx,
bdev_channel_poll_timeout_io_done); bdev_channel_poll_timeout_io_done);
@ -3468,7 +3474,7 @@ bdev_channel_create(void *io_device, void *ctx_buf)
} }
#endif #endif
pthread_mutex_lock(&bdev->internal.mutex); pthread_spin_lock(&bdev->internal.spinlock);
bdev_enable_qos(bdev, ch); bdev_enable_qos(bdev, ch);
TAILQ_FOREACH(range, &bdev->internal.locked_ranges, tailq) { TAILQ_FOREACH(range, &bdev->internal.locked_ranges, tailq) {
@ -3476,7 +3482,7 @@ bdev_channel_create(void *io_device, void *ctx_buf)
new_range = calloc(1, sizeof(*new_range)); new_range = calloc(1, sizeof(*new_range));
if (new_range == NULL) { if (new_range == NULL) {
pthread_mutex_unlock(&bdev->internal.mutex); pthread_spin_unlock(&bdev->internal.spinlock);
bdev_channel_destroy_resource(ch); bdev_channel_destroy_resource(ch);
return -1; return -1;
} }
@ -3486,7 +3492,7 @@ bdev_channel_create(void *io_device, void *ctx_buf)
TAILQ_INSERT_TAIL(&ch->locked_ranges, new_range, tailq); TAILQ_INSERT_TAIL(&ch->locked_ranges, new_range, tailq);
} }
pthread_mutex_unlock(&bdev->internal.mutex); pthread_spin_unlock(&bdev->internal.spinlock);
return 0; return 0;
} }
@ -3686,9 +3692,9 @@ bdev_channel_destroy(void *io_device, void *ctx_buf)
spdk_thread_get_id(spdk_io_channel_get_thread(ch->channel))); spdk_thread_get_id(spdk_io_channel_get_thread(ch->channel)));
/* This channel is going away, so add its statistics into the bdev so that they don't get lost. */ /* This channel is going away, so add its statistics into the bdev so that they don't get lost. */
pthread_mutex_lock(&ch->bdev->internal.mutex); pthread_spin_lock(&ch->bdev->internal.spinlock);
bdev_io_stat_add(&ch->bdev->internal.stat, &ch->stat); bdev_io_stat_add(&ch->bdev->internal.stat, &ch->stat);
pthread_mutex_unlock(&ch->bdev->internal.mutex); pthread_spin_unlock(&ch->bdev->internal.spinlock);
bdev_abort_all_queued_io(&ch->queued_resets, ch); bdev_abort_all_queued_io(&ch->queued_resets, ch);
@ -3718,9 +3724,9 @@ bdev_name_add(struct spdk_bdev_name *bdev_name, struct spdk_bdev *bdev, const ch
bdev_name->bdev = bdev; bdev_name->bdev = bdev;
pthread_mutex_lock(&g_bdev_mgr.mutex); pthread_spin_lock(&g_bdev_mgr.spinlock);
tmp = RB_INSERT(bdev_name_tree, &g_bdev_mgr.bdev_names, bdev_name); tmp = RB_INSERT(bdev_name_tree, &g_bdev_mgr.bdev_names, bdev_name);
pthread_mutex_unlock(&g_bdev_mgr.mutex); pthread_spin_unlock(&g_bdev_mgr.spinlock);
if (tmp != NULL) { if (tmp != NULL) {
SPDK_ERRLOG("Bdev name %s already exists\n", name); SPDK_ERRLOG("Bdev name %s already exists\n", name);
@ -3741,9 +3747,9 @@ bdev_name_del_unsafe(struct spdk_bdev_name *bdev_name)
static void static void
bdev_name_del(struct spdk_bdev_name *bdev_name) bdev_name_del(struct spdk_bdev_name *bdev_name)
{ {
pthread_mutex_lock(&g_bdev_mgr.mutex); pthread_spin_lock(&g_bdev_mgr.spinlock);
bdev_name_del_unsafe(bdev_name); bdev_name_del_unsafe(bdev_name);
pthread_mutex_unlock(&g_bdev_mgr.mutex); pthread_spin_unlock(&g_bdev_mgr.spinlock);
} }
int int
@ -3891,7 +3897,7 @@ spdk_bdev_get_qos_rate_limits(struct spdk_bdev *bdev, uint64_t *limits)
memset(limits, 0, sizeof(*limits) * SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES); memset(limits, 0, sizeof(*limits) * SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES);
pthread_mutex_lock(&bdev->internal.mutex); pthread_spin_lock(&bdev->internal.spinlock);
if (bdev->internal.qos) { if (bdev->internal.qos) {
for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) { for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
if (bdev->internal.qos->rate_limits[i].limit != if (bdev->internal.qos->rate_limits[i].limit !=
@ -3904,7 +3910,7 @@ spdk_bdev_get_qos_rate_limits(struct spdk_bdev *bdev, uint64_t *limits)
} }
} }
} }
pthread_mutex_unlock(&bdev->internal.mutex); pthread_spin_unlock(&bdev->internal.spinlock);
} }
size_t size_t
@ -4214,10 +4220,10 @@ _resize_notify(void *arg)
{ {
struct spdk_bdev_desc *desc = arg; struct spdk_bdev_desc *desc = arg;
pthread_mutex_lock(&desc->mutex); pthread_spin_lock(&desc->spinlock);
desc->refs--; desc->refs--;
if (!desc->closed) { if (!desc->closed) {
pthread_mutex_unlock(&desc->mutex); pthread_spin_unlock(&desc->spinlock);
desc->callback.event_fn(SPDK_BDEV_EVENT_RESIZE, desc->callback.event_fn(SPDK_BDEV_EVENT_RESIZE,
desc->bdev, desc->bdev,
desc->callback.ctx); desc->callback.ctx);
@ -4227,11 +4233,11 @@ _resize_notify(void *arg)
* spdk_bdev_close() could not free the descriptor since this message was * spdk_bdev_close() could not free the descriptor since this message was
* in flight, so we free it now using bdev_desc_free(). * in flight, so we free it now using bdev_desc_free().
*/ */
pthread_mutex_unlock(&desc->mutex); pthread_spin_unlock(&desc->spinlock);
bdev_desc_free(desc); bdev_desc_free(desc);
return; return;
} }
pthread_mutex_unlock(&desc->mutex); pthread_spin_unlock(&desc->spinlock);
} }
int int
@ -4244,7 +4250,7 @@ spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
return 0; return 0;
} }
pthread_mutex_lock(&bdev->internal.mutex); pthread_spin_lock(&bdev->internal.spinlock);
/* bdev has open descriptors */ /* bdev has open descriptors */
if (!TAILQ_EMPTY(&bdev->internal.open_descs) && if (!TAILQ_EMPTY(&bdev->internal.open_descs) &&
@ -4253,17 +4259,17 @@ spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
} else { } else {
bdev->blockcnt = size; bdev->blockcnt = size;
TAILQ_FOREACH(desc, &bdev->internal.open_descs, link) { TAILQ_FOREACH(desc, &bdev->internal.open_descs, link) {
pthread_mutex_lock(&desc->mutex); pthread_spin_lock(&desc->spinlock);
if (!desc->closed) { if (!desc->closed) {
desc->refs++; desc->refs++;
spdk_thread_send_msg(desc->thread, _resize_notify, desc); spdk_thread_send_msg(desc->thread, _resize_notify, desc);
} }
pthread_mutex_unlock(&desc->mutex); pthread_spin_unlock(&desc->spinlock);
} }
ret = 0; ret = 0;
} }
pthread_mutex_unlock(&bdev->internal.mutex); pthread_spin_unlock(&bdev->internal.spinlock);
return ret; return ret;
} }
@ -5489,11 +5495,11 @@ bdev_reset_freeze_channel(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bd
* the channel flag is set, so the lock here should not * the channel flag is set, so the lock here should not
* be necessary. We're not in the fast path though, so * be necessary. We're not in the fast path though, so
* just take it anyway. */ * just take it anyway. */
pthread_mutex_lock(&channel->bdev->internal.mutex); pthread_spin_lock(&channel->bdev->internal.spinlock);
if (channel->bdev->internal.qos->ch == channel) { if (channel->bdev->internal.qos->ch == channel) {
TAILQ_SWAP(&channel->bdev->internal.qos->queued, &tmp_queued, spdk_bdev_io, internal.link); TAILQ_SWAP(&channel->bdev->internal.qos->queued, &tmp_queued, spdk_bdev_io, internal.link);
} }
pthread_mutex_unlock(&channel->bdev->internal.mutex); pthread_spin_unlock(&channel->bdev->internal.spinlock);
} }
bdev_abort_all_queued_io(&shared_resource->nomem_io, channel); bdev_abort_all_queued_io(&shared_resource->nomem_io, channel);
@ -5520,7 +5526,7 @@ bdev_channel_start_reset(struct spdk_bdev_channel *ch)
assert(!TAILQ_EMPTY(&ch->queued_resets)); assert(!TAILQ_EMPTY(&ch->queued_resets));
pthread_mutex_lock(&bdev->internal.mutex); pthread_spin_lock(&bdev->internal.spinlock);
if (bdev->internal.reset_in_progress == NULL) { if (bdev->internal.reset_in_progress == NULL) {
bdev->internal.reset_in_progress = TAILQ_FIRST(&ch->queued_resets); bdev->internal.reset_in_progress = TAILQ_FIRST(&ch->queued_resets);
/* /*
@ -5533,7 +5539,7 @@ bdev_channel_start_reset(struct spdk_bdev_channel *ch)
bdev->internal.reset_in_progress->u.reset.ch_ref = spdk_get_io_channel(__bdev_to_io_dev(bdev)); bdev->internal.reset_in_progress->u.reset.ch_ref = spdk_get_io_channel(__bdev_to_io_dev(bdev));
bdev_start_reset(ch); bdev_start_reset(ch);
} }
pthread_mutex_unlock(&bdev->internal.mutex); pthread_spin_unlock(&bdev->internal.spinlock);
} }
int int
@ -5556,9 +5562,9 @@ spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
bdev_io->u.reset.ch_ref = NULL; bdev_io->u.reset.ch_ref = NULL;
bdev_io_init(bdev_io, bdev, cb_arg, cb); bdev_io_init(bdev_io, bdev, cb_arg, cb);
pthread_mutex_lock(&bdev->internal.mutex); pthread_spin_lock(&bdev->internal.spinlock);
TAILQ_INSERT_TAIL(&channel->queued_resets, bdev_io, internal.link); TAILQ_INSERT_TAIL(&channel->queued_resets, bdev_io, internal.link);
pthread_mutex_unlock(&bdev->internal.mutex); pthread_spin_unlock(&bdev->internal.spinlock);
TAILQ_INSERT_TAIL(&bdev_io->internal.ch->io_submitted, bdev_io, TAILQ_INSERT_TAIL(&bdev_io->internal.ch->io_submitted, bdev_io,
internal.ch_link); internal.ch_link);
@ -5620,9 +5626,9 @@ spdk_bdev_get_device_stat(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat
bdev_iostat_ctx->cb_arg = cb_arg; bdev_iostat_ctx->cb_arg = cb_arg;
/* Start with the statistics from previously deleted channels. */ /* Start with the statistics from previously deleted channels. */
pthread_mutex_lock(&bdev->internal.mutex); pthread_spin_lock(&bdev->internal.spinlock);
bdev_io_stat_add(bdev_iostat_ctx->stat, &bdev->internal.stat); bdev_io_stat_add(bdev_iostat_ctx->stat, &bdev->internal.stat);
pthread_mutex_unlock(&bdev->internal.mutex); pthread_spin_unlock(&bdev->internal.spinlock);
/* Then iterate and add the statistics from each existing channel. */ /* Then iterate and add the statistics from each existing channel. */
spdk_bdev_for_each_channel(bdev, bdev_get_each_channel_stat, bdev_iostat_ctx, spdk_bdev_for_each_channel(bdev, bdev_get_each_channel_stat, bdev_iostat_ctx,
@ -6151,12 +6157,12 @@ spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status sta
if (status == SPDK_BDEV_IO_STATUS_NOMEM) { if (status == SPDK_BDEV_IO_STATUS_NOMEM) {
SPDK_ERRLOG("NOMEM returned for reset\n"); SPDK_ERRLOG("NOMEM returned for reset\n");
} }
pthread_mutex_lock(&bdev->internal.mutex); pthread_spin_lock(&bdev->internal.spinlock);
if (bdev_io == bdev->internal.reset_in_progress) { if (bdev_io == bdev->internal.reset_in_progress) {
bdev->internal.reset_in_progress = NULL; bdev->internal.reset_in_progress = NULL;
unlock_channels = true; unlock_channels = true;
} }
pthread_mutex_unlock(&bdev->internal.mutex); pthread_spin_unlock(&bdev->internal.spinlock);
if (unlock_channels) { if (unlock_channels) {
spdk_bdev_for_each_channel(bdev, bdev_unfreeze_channel, bdev_io, spdk_bdev_for_each_channel(bdev, bdev_unfreeze_channel, bdev_io,
@ -6473,7 +6479,7 @@ bdev_register(struct spdk_bdev *bdev)
free(bdev_name); free(bdev_name);
pthread_mutex_init(&bdev->internal.mutex, NULL); pthread_spin_init(&bdev->internal.spinlock, PTHREAD_PROCESS_PRIVATE);
SPDK_DEBUGLOG(bdev, "Inserting bdev %s into list\n", bdev->name); SPDK_DEBUGLOG(bdev, "Inserting bdev %s into list\n", bdev->name);
TAILQ_INSERT_TAIL(&g_bdev_mgr.bdevs, bdev, internal.link); TAILQ_INSERT_TAIL(&g_bdev_mgr.bdevs, bdev, internal.link);
@ -6493,7 +6499,7 @@ bdev_destroy_cb(void *io_device)
cb_fn = bdev->internal.unregister_cb; cb_fn = bdev->internal.unregister_cb;
cb_arg = bdev->internal.unregister_ctx; cb_arg = bdev->internal.unregister_ctx;
pthread_mutex_destroy(&bdev->internal.mutex); pthread_spin_destroy(&bdev->internal.spinlock);
free(bdev->internal.qos); free(bdev->internal.qos);
rc = bdev->fn_table->destruct(bdev->ctxt); rc = bdev->fn_table->destruct(bdev->ctxt);
@ -6518,11 +6524,11 @@ _remove_notify(void *arg)
{ {
struct spdk_bdev_desc *desc = arg; struct spdk_bdev_desc *desc = arg;
pthread_mutex_lock(&desc->mutex); pthread_spin_lock(&desc->spinlock);
desc->refs--; desc->refs--;
if (!desc->closed) { if (!desc->closed) {
pthread_mutex_unlock(&desc->mutex); pthread_spin_unlock(&desc->spinlock);
desc->callback.event_fn(SPDK_BDEV_EVENT_REMOVE, desc->bdev, desc->callback.ctx); desc->callback.event_fn(SPDK_BDEV_EVENT_REMOVE, desc->bdev, desc->callback.ctx);
return; return;
} else if (0 == desc->refs) { } else if (0 == desc->refs) {
@ -6530,14 +6536,14 @@ _remove_notify(void *arg)
* spdk_bdev_close() could not free the descriptor since this message was * spdk_bdev_close() could not free the descriptor since this message was
* in flight, so we free it now using bdev_desc_free(). * in flight, so we free it now using bdev_desc_free().
*/ */
pthread_mutex_unlock(&desc->mutex); pthread_spin_unlock(&desc->spinlock);
bdev_desc_free(desc); bdev_desc_free(desc);
return; return;
} }
pthread_mutex_unlock(&desc->mutex); pthread_spin_unlock(&desc->spinlock);
} }
/* Must be called while holding g_bdev_mgr.mutex and bdev->internal.mutex. /* Must be called while holding g_bdev_mgr.spinlock and bdev->internal.spinlock.
* returns: 0 - bdev removed and ready to be destructed. * returns: 0 - bdev removed and ready to be destructed.
* -EBUSY - bdev can't be destructed yet. */ * -EBUSY - bdev can't be destructed yet. */
static int static int
@ -6550,7 +6556,7 @@ bdev_unregister_unsafe(struct spdk_bdev *bdev)
/* Notify each descriptor about hotremoval */ /* Notify each descriptor about hotremoval */
TAILQ_FOREACH_SAFE(desc, &bdev->internal.open_descs, link, tmp) { TAILQ_FOREACH_SAFE(desc, &bdev->internal.open_descs, link, tmp) {
rc = -EBUSY; rc = -EBUSY;
pthread_mutex_lock(&desc->mutex); pthread_spin_lock(&desc->spinlock);
/* /*
* Defer invocation of the event_cb to a separate message that will * Defer invocation of the event_cb to a separate message that will
* run later on its thread. This ensures this context unwinds and * run later on its thread. This ensures this context unwinds and
@ -6559,7 +6565,7 @@ bdev_unregister_unsafe(struct spdk_bdev *bdev)
*/ */
desc->refs++; desc->refs++;
spdk_thread_send_msg(desc->thread, _remove_notify, desc); spdk_thread_send_msg(desc->thread, _remove_notify, desc);
pthread_mutex_unlock(&desc->mutex); pthread_spin_unlock(&desc->spinlock);
} }
/* If there are no descriptors, proceed removing the bdev */ /* If there are no descriptors, proceed removing the bdev */
@ -6600,8 +6606,8 @@ bdev_unregister(struct spdk_bdev *bdev, void *_ctx, int status)
{ {
int rc; int rc;
pthread_mutex_lock(&g_bdev_mgr.mutex); pthread_spin_lock(&g_bdev_mgr.spinlock);
pthread_mutex_lock(&bdev->internal.mutex); pthread_spin_lock(&bdev->internal.spinlock);
/* /*
* Set the status to REMOVING after completing to abort channels. Otherwise, * Set the status to REMOVING after completing to abort channels. Otherwise,
* the last spdk_bdev_close() may call spdk_io_device_unregister() while * the last spdk_bdev_close() may call spdk_io_device_unregister() while
@ -6610,8 +6616,8 @@ bdev_unregister(struct spdk_bdev *bdev, void *_ctx, int status)
*/ */
bdev->internal.status = SPDK_BDEV_STATUS_REMOVING; bdev->internal.status = SPDK_BDEV_STATUS_REMOVING;
rc = bdev_unregister_unsafe(bdev); rc = bdev_unregister_unsafe(bdev);
pthread_mutex_unlock(&bdev->internal.mutex); pthread_spin_unlock(&bdev->internal.spinlock);
pthread_mutex_unlock(&g_bdev_mgr.mutex); pthread_spin_unlock(&g_bdev_mgr.spinlock);
if (rc == 0) { if (rc == 0) {
spdk_io_device_unregister(__bdev_to_io_dev(bdev), bdev_destroy_cb); spdk_io_device_unregister(__bdev_to_io_dev(bdev), bdev_destroy_cb);
@ -6634,22 +6640,22 @@ spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void
return; return;
} }
pthread_mutex_lock(&g_bdev_mgr.mutex); pthread_spin_lock(&g_bdev_mgr.spinlock);
if (bdev->internal.status == SPDK_BDEV_STATUS_UNREGISTERING || if (bdev->internal.status == SPDK_BDEV_STATUS_UNREGISTERING ||
bdev->internal.status == SPDK_BDEV_STATUS_REMOVING) { bdev->internal.status == SPDK_BDEV_STATUS_REMOVING) {
pthread_mutex_unlock(&g_bdev_mgr.mutex); pthread_spin_unlock(&g_bdev_mgr.spinlock);
if (cb_fn) { if (cb_fn) {
cb_fn(cb_arg, -EBUSY); cb_fn(cb_arg, -EBUSY);
} }
return; return;
} }
pthread_mutex_lock(&bdev->internal.mutex); pthread_spin_lock(&bdev->internal.spinlock);
bdev->internal.status = SPDK_BDEV_STATUS_UNREGISTERING; bdev->internal.status = SPDK_BDEV_STATUS_UNREGISTERING;
bdev->internal.unregister_cb = cb_fn; bdev->internal.unregister_cb = cb_fn;
bdev->internal.unregister_ctx = cb_arg; bdev->internal.unregister_ctx = cb_arg;
pthread_mutex_unlock(&bdev->internal.mutex); pthread_spin_unlock(&bdev->internal.spinlock);
pthread_mutex_unlock(&g_bdev_mgr.mutex); pthread_spin_unlock(&g_bdev_mgr.spinlock);
spdk_bdev_set_qd_sampling_period(bdev, 0); spdk_bdev_set_qd_sampling_period(bdev, 0);
@ -6725,30 +6731,30 @@ bdev_open(struct spdk_bdev *bdev, bool write, struct spdk_bdev_desc *desc)
desc->thread = thread; desc->thread = thread;
desc->write = write; desc->write = write;
pthread_mutex_lock(&bdev->internal.mutex); pthread_spin_lock(&bdev->internal.spinlock);
if (bdev->internal.status == SPDK_BDEV_STATUS_UNREGISTERING || if (bdev->internal.status == SPDK_BDEV_STATUS_UNREGISTERING ||
bdev->internal.status == SPDK_BDEV_STATUS_REMOVING) { bdev->internal.status == SPDK_BDEV_STATUS_REMOVING) {
pthread_mutex_unlock(&bdev->internal.mutex); pthread_spin_unlock(&bdev->internal.spinlock);
return -ENODEV; return -ENODEV;
} }
if (write && bdev->internal.claim_module) { if (write && bdev->internal.claim_module) {
SPDK_ERRLOG("Could not open %s - %s module already claimed it\n", SPDK_ERRLOG("Could not open %s - %s module already claimed it\n",
bdev->name, bdev->internal.claim_module->name); bdev->name, bdev->internal.claim_module->name);
pthread_mutex_unlock(&bdev->internal.mutex); pthread_spin_unlock(&bdev->internal.spinlock);
return -EPERM; return -EPERM;
} }
rc = bdev_start_qos(bdev); rc = bdev_start_qos(bdev);
if (rc != 0) { if (rc != 0) {
SPDK_ERRLOG("Failed to start QoS on bdev %s\n", bdev->name); SPDK_ERRLOG("Failed to start QoS on bdev %s\n", bdev->name);
pthread_mutex_unlock(&bdev->internal.mutex); pthread_spin_unlock(&bdev->internal.spinlock);
return rc; return rc;
} }
TAILQ_INSERT_TAIL(&bdev->internal.open_descs, desc, link); TAILQ_INSERT_TAIL(&bdev->internal.open_descs, desc, link);
pthread_mutex_unlock(&bdev->internal.mutex); pthread_spin_unlock(&bdev->internal.spinlock);
return 0; return 0;
} }
@ -6772,7 +6778,7 @@ bdev_desc_alloc(struct spdk_bdev *bdev, spdk_bdev_event_cb_t event_cb, void *eve
desc->memory_domains_supported = spdk_bdev_get_memory_domains(bdev, NULL, 0) > 0; desc->memory_domains_supported = spdk_bdev_get_memory_domains(bdev, NULL, 0) > 0;
desc->callback.event_fn = event_cb; desc->callback.event_fn = event_cb;
desc->callback.ctx = event_ctx; desc->callback.ctx = event_ctx;
pthread_mutex_init(&desc->mutex, NULL); pthread_spin_init(&desc->spinlock, PTHREAD_PROCESS_PRIVATE);
if (bdev->media_events) { if (bdev->media_events) {
desc->media_events_buffer = calloc(MEDIA_EVENT_POOL_SIZE, desc->media_events_buffer = calloc(MEDIA_EVENT_POOL_SIZE,
@ -6807,19 +6813,19 @@ spdk_bdev_open_ext(const char *bdev_name, bool write, spdk_bdev_event_cb_t event
return -EINVAL; return -EINVAL;
} }
pthread_mutex_lock(&g_bdev_mgr.mutex); pthread_spin_lock(&g_bdev_mgr.spinlock);
bdev = bdev_get_by_name(bdev_name); bdev = bdev_get_by_name(bdev_name);
if (bdev == NULL) { if (bdev == NULL) {
SPDK_NOTICELOG("Currently unable to find bdev with name: %s\n", bdev_name); SPDK_NOTICELOG("Currently unable to find bdev with name: %s\n", bdev_name);
pthread_mutex_unlock(&g_bdev_mgr.mutex); pthread_spin_unlock(&g_bdev_mgr.spinlock);
return -ENODEV; return -ENODEV;
} }
rc = bdev_desc_alloc(bdev, event_cb, event_ctx, &desc); rc = bdev_desc_alloc(bdev, event_cb, event_ctx, &desc);
if (rc != 0) { if (rc != 0) {
pthread_mutex_unlock(&g_bdev_mgr.mutex); pthread_spin_unlock(&g_bdev_mgr.spinlock);
return rc; return rc;
} }
@ -6831,7 +6837,7 @@ spdk_bdev_open_ext(const char *bdev_name, bool write, spdk_bdev_event_cb_t event
*_desc = desc; *_desc = desc;
pthread_mutex_unlock(&g_bdev_mgr.mutex); pthread_spin_unlock(&g_bdev_mgr.spinlock);
return rc; return rc;
} }
@ -6841,18 +6847,18 @@ bdev_close(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc)
{ {
int rc; int rc;
pthread_mutex_lock(&bdev->internal.mutex); pthread_spin_lock(&bdev->internal.spinlock);
pthread_mutex_lock(&desc->mutex); pthread_spin_lock(&desc->spinlock);
TAILQ_REMOVE(&bdev->internal.open_descs, desc, link); TAILQ_REMOVE(&bdev->internal.open_descs, desc, link);
desc->closed = true; desc->closed = true;
if (0 == desc->refs) { if (0 == desc->refs) {
pthread_mutex_unlock(&desc->mutex); pthread_spin_unlock(&desc->spinlock);
bdev_desc_free(desc); bdev_desc_free(desc);
} else { } else {
pthread_mutex_unlock(&desc->mutex); pthread_spin_unlock(&desc->spinlock);
} }
/* If no more descriptors, kill QoS channel */ /* If no more descriptors, kill QoS channel */
@ -6870,13 +6876,13 @@ bdev_close(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc)
if (bdev->internal.status == SPDK_BDEV_STATUS_REMOVING && TAILQ_EMPTY(&bdev->internal.open_descs)) { if (bdev->internal.status == SPDK_BDEV_STATUS_REMOVING && TAILQ_EMPTY(&bdev->internal.open_descs)) {
rc = bdev_unregister_unsafe(bdev); rc = bdev_unregister_unsafe(bdev);
pthread_mutex_unlock(&bdev->internal.mutex); pthread_spin_unlock(&bdev->internal.spinlock);
if (rc == 0) { if (rc == 0) {
spdk_io_device_unregister(__bdev_to_io_dev(bdev), bdev_destroy_cb); spdk_io_device_unregister(__bdev_to_io_dev(bdev), bdev_destroy_cb);
} }
} else { } else {
pthread_mutex_unlock(&bdev->internal.mutex); pthread_spin_unlock(&bdev->internal.spinlock);
} }
} }
@ -6892,11 +6898,11 @@ spdk_bdev_close(struct spdk_bdev_desc *desc)
spdk_poller_unregister(&desc->io_timeout_poller); spdk_poller_unregister(&desc->io_timeout_poller);
pthread_mutex_lock(&g_bdev_mgr.mutex); pthread_spin_lock(&g_bdev_mgr.spinlock);
bdev_close(bdev, desc); bdev_close(bdev, desc);
pthread_mutex_unlock(&g_bdev_mgr.mutex); pthread_spin_unlock(&g_bdev_mgr.spinlock);
} }
static void static void
@ -6988,7 +6994,7 @@ spdk_for_each_bdev(void *ctx, spdk_for_each_bdev_fn fn)
assert(fn != NULL); assert(fn != NULL);
pthread_mutex_lock(&g_bdev_mgr.mutex); pthread_spin_lock(&g_bdev_mgr.spinlock);
bdev = spdk_bdev_first(); bdev = spdk_bdev_first();
while (bdev != NULL) { while (bdev != NULL) {
rc = bdev_desc_alloc(bdev, _tmp_bdev_event_cb, NULL, &desc); rc = bdev_desc_alloc(bdev, _tmp_bdev_event_cb, NULL, &desc);
@ -7006,11 +7012,11 @@ spdk_for_each_bdev(void *ctx, spdk_for_each_bdev_fn fn)
} }
break; break;
} }
pthread_mutex_unlock(&g_bdev_mgr.mutex); pthread_spin_unlock(&g_bdev_mgr.spinlock);
rc = fn(ctx, bdev); rc = fn(ctx, bdev);
pthread_mutex_lock(&g_bdev_mgr.mutex); pthread_spin_lock(&g_bdev_mgr.spinlock);
tmp = spdk_bdev_next(bdev); tmp = spdk_bdev_next(bdev);
bdev_close(bdev, desc); bdev_close(bdev, desc);
if (rc != 0) { if (rc != 0) {
@ -7018,7 +7024,7 @@ spdk_for_each_bdev(void *ctx, spdk_for_each_bdev_fn fn)
} }
bdev = tmp; bdev = tmp;
} }
pthread_mutex_unlock(&g_bdev_mgr.mutex); pthread_spin_unlock(&g_bdev_mgr.spinlock);
return rc; return rc;
} }
@ -7032,7 +7038,7 @@ spdk_for_each_bdev_leaf(void *ctx, spdk_for_each_bdev_fn fn)
assert(fn != NULL); assert(fn != NULL);
pthread_mutex_lock(&g_bdev_mgr.mutex); pthread_spin_lock(&g_bdev_mgr.spinlock);
bdev = spdk_bdev_first_leaf(); bdev = spdk_bdev_first_leaf();
while (bdev != NULL) { while (bdev != NULL) {
rc = bdev_desc_alloc(bdev, _tmp_bdev_event_cb, NULL, &desc); rc = bdev_desc_alloc(bdev, _tmp_bdev_event_cb, NULL, &desc);
@ -7050,11 +7056,11 @@ spdk_for_each_bdev_leaf(void *ctx, spdk_for_each_bdev_fn fn)
} }
break; break;
} }
pthread_mutex_unlock(&g_bdev_mgr.mutex); pthread_spin_unlock(&g_bdev_mgr.spinlock);
rc = fn(ctx, bdev); rc = fn(ctx, bdev);
pthread_mutex_lock(&g_bdev_mgr.mutex); pthread_spin_lock(&g_bdev_mgr.spinlock);
tmp = spdk_bdev_next_leaf(bdev); tmp = spdk_bdev_next_leaf(bdev);
bdev_close(bdev, desc); bdev_close(bdev, desc);
if (rc != 0) { if (rc != 0) {
@ -7062,7 +7068,7 @@ spdk_for_each_bdev_leaf(void *ctx, spdk_for_each_bdev_fn fn)
} }
bdev = tmp; bdev = tmp;
} }
pthread_mutex_unlock(&g_bdev_mgr.mutex); pthread_spin_unlock(&g_bdev_mgr.spinlock);
return rc; return rc;
} }
@ -7223,9 +7229,9 @@ bdev_write_zero_buffer_done(struct spdk_bdev_io *bdev_io, bool success, void *cb
static void static void
bdev_set_qos_limit_done(struct set_qos_limit_ctx *ctx, int status) bdev_set_qos_limit_done(struct set_qos_limit_ctx *ctx, int status)
{ {
pthread_mutex_lock(&ctx->bdev->internal.mutex); pthread_spin_lock(&ctx->bdev->internal.spinlock);
ctx->bdev->internal.qos_mod_in_progress = false; ctx->bdev->internal.qos_mod_in_progress = false;
pthread_mutex_unlock(&ctx->bdev->internal.mutex); pthread_spin_unlock(&ctx->bdev->internal.spinlock);
if (ctx->cb_fn) { if (ctx->cb_fn) {
ctx->cb_fn(ctx->cb_arg, status); ctx->cb_fn(ctx->cb_arg, status);
@ -7241,10 +7247,10 @@ bdev_disable_qos_done(void *cb_arg)
struct spdk_bdev_io *bdev_io; struct spdk_bdev_io *bdev_io;
struct spdk_bdev_qos *qos; struct spdk_bdev_qos *qos;
pthread_mutex_lock(&bdev->internal.mutex); pthread_spin_lock(&bdev->internal.spinlock);
qos = bdev->internal.qos; qos = bdev->internal.qos;
bdev->internal.qos = NULL; bdev->internal.qos = NULL;
pthread_mutex_unlock(&bdev->internal.mutex); pthread_spin_unlock(&bdev->internal.spinlock);
while (!TAILQ_EMPTY(&qos->queued)) { while (!TAILQ_EMPTY(&qos->queued)) {
/* Send queued I/O back to their original thread for resubmission. */ /* Send queued I/O back to their original thread for resubmission. */
@ -7280,9 +7286,9 @@ bdev_disable_qos_msg_done(struct spdk_bdev *bdev, void *_ctx, int status)
struct set_qos_limit_ctx *ctx = _ctx; struct set_qos_limit_ctx *ctx = _ctx;
struct spdk_thread *thread; struct spdk_thread *thread;
pthread_mutex_lock(&bdev->internal.mutex); pthread_spin_lock(&bdev->internal.spinlock);
thread = bdev->internal.qos->thread; thread = bdev->internal.qos->thread;
pthread_mutex_unlock(&bdev->internal.mutex); pthread_spin_unlock(&bdev->internal.spinlock);
if (thread != NULL) { if (thread != NULL) {
spdk_thread_send_msg(thread, bdev_disable_qos_done, ctx); spdk_thread_send_msg(thread, bdev_disable_qos_done, ctx);
@ -7308,9 +7314,9 @@ bdev_update_qos_rate_limit_msg(void *cb_arg)
struct set_qos_limit_ctx *ctx = cb_arg; struct set_qos_limit_ctx *ctx = cb_arg;
struct spdk_bdev *bdev = ctx->bdev; struct spdk_bdev *bdev = ctx->bdev;
pthread_mutex_lock(&bdev->internal.mutex); pthread_spin_lock(&bdev->internal.spinlock);
bdev_qos_update_max_quota_per_timeslice(bdev->internal.qos); bdev_qos_update_max_quota_per_timeslice(bdev->internal.qos);
pthread_mutex_unlock(&bdev->internal.mutex); pthread_spin_unlock(&bdev->internal.spinlock);
bdev_set_qos_limit_done(ctx, 0); bdev_set_qos_limit_done(ctx, 0);
} }
@ -7321,9 +7327,9 @@ bdev_enable_qos_msg(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
{ {
struct spdk_bdev_channel *bdev_ch = __io_ch_to_bdev_ch(ch); struct spdk_bdev_channel *bdev_ch = __io_ch_to_bdev_ch(ch);
pthread_mutex_lock(&bdev->internal.mutex); pthread_spin_lock(&bdev->internal.spinlock);
bdev_enable_qos(bdev, bdev_ch); bdev_enable_qos(bdev, bdev_ch);
pthread_mutex_unlock(&bdev->internal.mutex); pthread_spin_unlock(&bdev->internal.spinlock);
spdk_bdev_for_each_channel_continue(i, 0); spdk_bdev_for_each_channel_continue(i, 0);
} }
@ -7400,9 +7406,9 @@ spdk_bdev_set_qos_rate_limits(struct spdk_bdev *bdev, uint64_t *limits,
ctx->cb_arg = cb_arg; ctx->cb_arg = cb_arg;
ctx->bdev = bdev; ctx->bdev = bdev;
pthread_mutex_lock(&bdev->internal.mutex); pthread_spin_lock(&bdev->internal.spinlock);
if (bdev->internal.qos_mod_in_progress) { if (bdev->internal.qos_mod_in_progress) {
pthread_mutex_unlock(&bdev->internal.mutex); pthread_spin_unlock(&bdev->internal.spinlock);
free(ctx); free(ctx);
cb_fn(cb_arg, -EAGAIN); cb_fn(cb_arg, -EAGAIN);
return; return;
@ -7425,7 +7431,7 @@ spdk_bdev_set_qos_rate_limits(struct spdk_bdev *bdev, uint64_t *limits,
if (bdev->internal.qos == NULL) { if (bdev->internal.qos == NULL) {
bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos)); bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
if (!bdev->internal.qos) { if (!bdev->internal.qos) {
pthread_mutex_unlock(&bdev->internal.mutex); pthread_spin_unlock(&bdev->internal.spinlock);
SPDK_ERRLOG("Unable to allocate memory for QoS tracking\n"); SPDK_ERRLOG("Unable to allocate memory for QoS tracking\n");
bdev_set_qos_limit_done(ctx, -ENOMEM); bdev_set_qos_limit_done(ctx, -ENOMEM);
return; return;
@ -7453,13 +7459,13 @@ spdk_bdev_set_qos_rate_limits(struct spdk_bdev *bdev, uint64_t *limits,
spdk_bdev_for_each_channel(bdev, bdev_disable_qos_msg, ctx, spdk_bdev_for_each_channel(bdev, bdev_disable_qos_msg, ctx,
bdev_disable_qos_msg_done); bdev_disable_qos_msg_done);
} else { } else {
pthread_mutex_unlock(&bdev->internal.mutex); pthread_spin_unlock(&bdev->internal.spinlock);
bdev_set_qos_limit_done(ctx, 0); bdev_set_qos_limit_done(ctx, 0);
return; return;
} }
} }
pthread_mutex_unlock(&bdev->internal.mutex); pthread_spin_unlock(&bdev->internal.spinlock);
} }
struct spdk_bdev_histogram_ctx { struct spdk_bdev_histogram_ctx {
@ -7474,9 +7480,9 @@ bdev_histogram_disable_channel_cb(struct spdk_bdev *bdev, void *_ctx, int status
{ {
struct spdk_bdev_histogram_ctx *ctx = _ctx; struct spdk_bdev_histogram_ctx *ctx = _ctx;
pthread_mutex_lock(&ctx->bdev->internal.mutex); pthread_spin_lock(&ctx->bdev->internal.spinlock);
ctx->bdev->internal.histogram_in_progress = false; ctx->bdev->internal.histogram_in_progress = false;
pthread_mutex_unlock(&ctx->bdev->internal.mutex); pthread_spin_unlock(&ctx->bdev->internal.spinlock);
ctx->cb_fn(ctx->cb_arg, ctx->status); ctx->cb_fn(ctx->cb_arg, ctx->status);
free(ctx); free(ctx);
} }
@ -7505,9 +7511,9 @@ bdev_histogram_enable_channel_cb(struct spdk_bdev *bdev, void *_ctx, int status)
spdk_bdev_for_each_channel(ctx->bdev, bdev_histogram_disable_channel, ctx, spdk_bdev_for_each_channel(ctx->bdev, bdev_histogram_disable_channel, ctx,
bdev_histogram_disable_channel_cb); bdev_histogram_disable_channel_cb);
} else { } else {
pthread_mutex_lock(&ctx->bdev->internal.mutex); pthread_spin_lock(&ctx->bdev->internal.spinlock);
ctx->bdev->internal.histogram_in_progress = false; ctx->bdev->internal.histogram_in_progress = false;
pthread_mutex_unlock(&ctx->bdev->internal.mutex); pthread_spin_unlock(&ctx->bdev->internal.spinlock);
ctx->cb_fn(ctx->cb_arg, ctx->status); ctx->cb_fn(ctx->cb_arg, ctx->status);
free(ctx); free(ctx);
} }
@ -7547,16 +7553,16 @@ spdk_bdev_histogram_enable(struct spdk_bdev *bdev, spdk_bdev_histogram_status_cb
ctx->cb_fn = cb_fn; ctx->cb_fn = cb_fn;
ctx->cb_arg = cb_arg; ctx->cb_arg = cb_arg;
pthread_mutex_lock(&bdev->internal.mutex); pthread_spin_lock(&bdev->internal.spinlock);
if (bdev->internal.histogram_in_progress) { if (bdev->internal.histogram_in_progress) {
pthread_mutex_unlock(&bdev->internal.mutex); pthread_spin_unlock(&bdev->internal.spinlock);
free(ctx); free(ctx);
cb_fn(cb_arg, -EAGAIN); cb_fn(cb_arg, -EAGAIN);
return; return;
} }
bdev->internal.histogram_in_progress = true; bdev->internal.histogram_in_progress = true;
pthread_mutex_unlock(&bdev->internal.mutex); pthread_spin_unlock(&bdev->internal.spinlock);
bdev->internal.histogram_enabled = enable; bdev->internal.histogram_enabled = enable;
@ -7659,7 +7665,7 @@ spdk_bdev_push_media_events(struct spdk_bdev *bdev, const struct spdk_bdev_media
assert(bdev->media_events); assert(bdev->media_events);
pthread_mutex_lock(&bdev->internal.mutex); pthread_spin_lock(&bdev->internal.spinlock);
TAILQ_FOREACH(desc, &bdev->internal.open_descs, link) { TAILQ_FOREACH(desc, &bdev->internal.open_descs, link) {
if (desc->write) { if (desc->write) {
break; break;
@ -7684,7 +7690,7 @@ spdk_bdev_push_media_events(struct spdk_bdev *bdev, const struct spdk_bdev_media
rc = event_id; rc = event_id;
out: out:
pthread_mutex_unlock(&bdev->internal.mutex); pthread_spin_unlock(&bdev->internal.spinlock);
return rc; return rc;
} }
@ -7693,14 +7699,14 @@ spdk_bdev_notify_media_management(struct spdk_bdev *bdev)
{ {
struct spdk_bdev_desc *desc; struct spdk_bdev_desc *desc;
pthread_mutex_lock(&bdev->internal.mutex); pthread_spin_lock(&bdev->internal.spinlock);
TAILQ_FOREACH(desc, &bdev->internal.open_descs, link) { TAILQ_FOREACH(desc, &bdev->internal.open_descs, link) {
if (!TAILQ_EMPTY(&desc->pending_media_events)) { if (!TAILQ_EMPTY(&desc->pending_media_events)) {
desc->callback.event_fn(SPDK_BDEV_EVENT_MEDIA_MANAGEMENT, bdev, desc->callback.event_fn(SPDK_BDEV_EVENT_MEDIA_MANAGEMENT, bdev,
desc->callback.ctx); desc->callback.ctx);
} }
} }
pthread_mutex_unlock(&bdev->internal.mutex); pthread_spin_unlock(&bdev->internal.spinlock);
} }
struct locked_lba_range_ctx { struct locked_lba_range_ctx {
@ -7878,7 +7884,7 @@ bdev_lock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch,
ctx->cb_fn = cb_fn; ctx->cb_fn = cb_fn;
ctx->cb_arg = cb_arg; ctx->cb_arg = cb_arg;
pthread_mutex_lock(&bdev->internal.mutex); pthread_spin_lock(&bdev->internal.spinlock);
if (bdev_lba_range_overlaps_tailq(&ctx->range, &bdev->internal.locked_ranges)) { if (bdev_lba_range_overlaps_tailq(&ctx->range, &bdev->internal.locked_ranges)) {
/* There is an active lock overlapping with this range. /* There is an active lock overlapping with this range.
* Put it on the pending list until this range no * Put it on the pending list until this range no
@ -7889,7 +7895,7 @@ bdev_lock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch,
TAILQ_INSERT_TAIL(&bdev->internal.locked_ranges, &ctx->range, tailq); TAILQ_INSERT_TAIL(&bdev->internal.locked_ranges, &ctx->range, tailq);
bdev_lock_lba_range_ctx(bdev, ctx); bdev_lock_lba_range_ctx(bdev, ctx);
} }
pthread_mutex_unlock(&bdev->internal.mutex); pthread_spin_unlock(&bdev->internal.spinlock);
return 0; return 0;
} }
@ -7908,7 +7914,7 @@ bdev_unlock_lba_range_cb(struct spdk_bdev *bdev, void *_ctx, int status)
struct locked_lba_range_ctx *pending_ctx; struct locked_lba_range_ctx *pending_ctx;
struct lba_range *range, *tmp; struct lba_range *range, *tmp;
pthread_mutex_lock(&bdev->internal.mutex); pthread_spin_lock(&bdev->internal.spinlock);
/* Check if there are any pending locked ranges that overlap with this range /* Check if there are any pending locked ranges that overlap with this range
* that was just unlocked. If there are, check that it doesn't overlap with any * that was just unlocked. If there are, check that it doesn't overlap with any
* other locked ranges before calling bdev_lock_lba_range_ctx which will start * other locked ranges before calling bdev_lock_lba_range_ctx which will start
@ -7924,7 +7930,7 @@ bdev_unlock_lba_range_cb(struct spdk_bdev *bdev, void *_ctx, int status)
bdev_lock_lba_range_ctx_msg, pending_ctx); bdev_lock_lba_range_ctx_msg, pending_ctx);
} }
} }
pthread_mutex_unlock(&bdev->internal.mutex); pthread_spin_unlock(&bdev->internal.spinlock);
ctx->cb_fn(ctx->cb_arg, status); ctx->cb_fn(ctx->cb_arg, status);
free(ctx); free(ctx);
@ -8001,7 +8007,7 @@ bdev_unlock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch,
return -EINVAL; return -EINVAL;
} }
pthread_mutex_lock(&bdev->internal.mutex); pthread_spin_lock(&bdev->internal.spinlock);
/* We confirmed that this channel has locked the specified range. To /* We confirmed that this channel has locked the specified range. To
* start the unlock the process, we find the range in the bdev's locked_ranges * start the unlock the process, we find the range in the bdev's locked_ranges
* and remove it. This ensures new channels don't inherit the locked range. * and remove it. This ensures new channels don't inherit the locked range.
@ -8016,12 +8022,12 @@ bdev_unlock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch,
} }
if (range == NULL) { if (range == NULL) {
assert(false); assert(false);
pthread_mutex_unlock(&bdev->internal.mutex); pthread_spin_unlock(&bdev->internal.spinlock);
return -EINVAL; return -EINVAL;
} }
TAILQ_REMOVE(&bdev->internal.locked_ranges, range, tailq); TAILQ_REMOVE(&bdev->internal.locked_ranges, range, tailq);
ctx = SPDK_CONTAINEROF(range, struct locked_lba_range_ctx, range); ctx = SPDK_CONTAINEROF(range, struct locked_lba_range_ctx, range);
pthread_mutex_unlock(&bdev->internal.mutex); pthread_spin_unlock(&bdev->internal.spinlock);
ctx->cb_fn = cb_fn; ctx->cb_fn = cb_fn;
ctx->cb_arg = cb_arg; ctx->cb_arg = cb_arg;

View File

@ -921,7 +921,7 @@ io_valid_test(void)
memset(&bdev, 0, sizeof(bdev)); memset(&bdev, 0, sizeof(bdev));
bdev.blocklen = 512; bdev.blocklen = 512;
CU_ASSERT(pthread_mutex_init(&bdev.internal.mutex, NULL) == 0); CU_ASSERT(pthread_spin_init(&bdev.internal.spinlock, PTHREAD_PROCESS_PRIVATE) == 0);
spdk_bdev_notify_blockcnt_change(&bdev, 100); spdk_bdev_notify_blockcnt_change(&bdev, 100);
@ -940,7 +940,7 @@ io_valid_test(void)
/* Offset near end of uint64_t range (2^64 - 1) */ /* Offset near end of uint64_t range (2^64 - 1) */
CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false); CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false);
CU_ASSERT(pthread_mutex_destroy(&bdev.internal.mutex) == 0); CU_ASSERT(pthread_spin_destroy(&bdev.internal.spinlock) == 0);
} }
static void static void