bdev: use SPDK spinlocks

Transition from pthread spinlocks to SPDK spinlocks for improved error
checking.

Signed-off-by: Mike Gerdts <mgerdts@nvidia.com>
Change-Id: I7877c3a4601d7d5cf03e632df493974f97782272
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/15439
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
This commit is contained in:
Mike Gerdts 2022-11-14 16:50:09 -06:00 committed by Jim Harris
parent 7722996dd2
commit 0dc6aac101
4 changed files with 128 additions and 124 deletions

View File

@ -36,6 +36,10 @@ device.
A new API `spdk_bdev_channel_get_histogram` was added to get the histogram of a specified A new API `spdk_bdev_channel_get_histogram` was added to get the histogram of a specified
channel for a bdev. channel for a bdev.
Converted internal use of `pthread_mutex_t` to `struct spdk_spinlock`. Consumers of bdev
API functions must be on an SPDK thread or the program will abort. It is now enforced
that no internal bdev locks can be held when a poller or message goes off CPU.
### event ### event
Added core lock file mechanism to prevent the same CPU cores from being used by multiple Added core lock file mechanism to prevent the same CPU cores from being used by multiple

View File

@ -490,7 +490,7 @@ struct spdk_bdev {
bool qos_mod_in_progress; bool qos_mod_in_progress;
/** Spin lock protecting claimed */ /** Spin lock protecting claimed */
pthread_spinlock_t spinlock; struct spdk_spinlock spinlock;
/** The bdev status */ /** The bdev status */
enum spdk_bdev_status status; enum spdk_bdev_status status;

View File

@ -93,7 +93,7 @@ struct spdk_bdev_mgr {
bool init_complete; bool init_complete;
bool module_init_complete; bool module_init_complete;
pthread_spinlock_t spinlock; struct spdk_spinlock spinlock;
#ifdef SPDK_CONFIG_VTUNE #ifdef SPDK_CONFIG_VTUNE
__itt_domain *domain; __itt_domain *domain;
@ -112,7 +112,7 @@ static void
__attribute__((constructor)) __attribute__((constructor))
_bdev_init(void) _bdev_init(void)
{ {
pthread_spin_init(&g_bdev_mgr.spinlock, PTHREAD_PROCESS_PRIVATE); spdk_spin_init(&g_bdev_mgr.spinlock);
} }
typedef void (*lock_range_cb)(void *ctx, int status); typedef void (*lock_range_cb)(void *ctx, int status);
@ -307,7 +307,7 @@ struct spdk_bdev_desc {
bool closed; bool closed;
bool write; bool write;
bool memory_domains_supported; bool memory_domains_supported;
pthread_spinlock_t spinlock; struct spdk_spinlock spinlock;
uint32_t refs; uint32_t refs;
TAILQ_HEAD(, media_event_entry) pending_media_events; TAILQ_HEAD(, media_event_entry) pending_media_events;
TAILQ_HEAD(, media_event_entry) free_media_events; TAILQ_HEAD(, media_event_entry) free_media_events;
@ -486,9 +486,9 @@ spdk_bdev_get_by_name(const char *bdev_name)
{ {
struct spdk_bdev *bdev; struct spdk_bdev *bdev;
pthread_spin_lock(&g_bdev_mgr.spinlock); spdk_spin_lock(&g_bdev_mgr.spinlock);
bdev = bdev_get_by_name(bdev_name); bdev = bdev_get_by_name(bdev_name);
pthread_spin_unlock(&g_bdev_mgr.spinlock); spdk_spin_unlock(&g_bdev_mgr.spinlock);
return bdev; return bdev;
} }
@ -1384,7 +1384,7 @@ spdk_bdev_subsystem_config_json(struct spdk_json_write_ctx *w)
} }
} }
pthread_spin_lock(&g_bdev_mgr.spinlock); spdk_spin_lock(&g_bdev_mgr.spinlock);
TAILQ_FOREACH(bdev, &g_bdev_mgr.bdevs, internal.link) { TAILQ_FOREACH(bdev, &g_bdev_mgr.bdevs, internal.link) {
if (bdev->fn_table->write_config_json) { if (bdev->fn_table->write_config_json) {
@ -1394,7 +1394,7 @@ spdk_bdev_subsystem_config_json(struct spdk_json_write_ctx *w)
bdev_qos_config_json(bdev, w); bdev_qos_config_json(bdev, w);
} }
pthread_spin_unlock(&g_bdev_mgr.spinlock); spdk_spin_unlock(&g_bdev_mgr.spinlock);
/* This has to be last RPC in array to make sure all bdevs finished examine */ /* This has to be last RPC in array to make sure all bdevs finished examine */
spdk_json_write_object_begin(w); spdk_json_write_object_begin(w);
@ -3267,7 +3267,7 @@ struct poll_timeout_ctx {
static void static void
bdev_desc_free(struct spdk_bdev_desc *desc) bdev_desc_free(struct spdk_bdev_desc *desc)
{ {
pthread_spin_destroy(&desc->spinlock); spdk_spin_destroy(&desc->spinlock);
free(desc->media_events_buffer); free(desc->media_events_buffer);
free(desc); free(desc);
} }
@ -3280,14 +3280,14 @@ bdev_channel_poll_timeout_io_done(struct spdk_bdev *bdev, void *_ctx, int status
free(ctx); free(ctx);
pthread_spin_lock(&desc->spinlock); spdk_spin_lock(&desc->spinlock);
desc->refs--; desc->refs--;
if (desc->closed == true && desc->refs == 0) { if (desc->closed == true && desc->refs == 0) {
pthread_spin_unlock(&desc->spinlock); spdk_spin_unlock(&desc->spinlock);
bdev_desc_free(desc); bdev_desc_free(desc);
return; return;
} }
pthread_spin_unlock(&desc->spinlock); spdk_spin_unlock(&desc->spinlock);
} }
static void static void
@ -3300,13 +3300,13 @@ bdev_channel_poll_timeout_io(struct spdk_bdev_channel_iter *i, struct spdk_bdev
struct spdk_bdev_io *bdev_io; struct spdk_bdev_io *bdev_io;
uint64_t now; uint64_t now;
pthread_spin_lock(&desc->spinlock); spdk_spin_lock(&desc->spinlock);
if (desc->closed == true) { if (desc->closed == true) {
pthread_spin_unlock(&desc->spinlock); spdk_spin_unlock(&desc->spinlock);
spdk_bdev_for_each_channel_continue(i, -1); spdk_bdev_for_each_channel_continue(i, -1);
return; return;
} }
pthread_spin_unlock(&desc->spinlock); spdk_spin_unlock(&desc->spinlock);
now = spdk_get_ticks(); now = spdk_get_ticks();
TAILQ_FOREACH(bdev_io, &bdev_ch->io_submitted, internal.ch_link) { TAILQ_FOREACH(bdev_io, &bdev_ch->io_submitted, internal.ch_link) {
@ -3352,9 +3352,9 @@ bdev_poll_timeout_io(void *arg)
/* Take a ref on the descriptor in case it gets closed while we are checking /* Take a ref on the descriptor in case it gets closed while we are checking
* all of the channels. * all of the channels.
*/ */
pthread_spin_lock(&desc->spinlock); spdk_spin_lock(&desc->spinlock);
desc->refs++; desc->refs++;
pthread_spin_unlock(&desc->spinlock); spdk_spin_unlock(&desc->spinlock);
spdk_bdev_for_each_channel(bdev, bdev_channel_poll_timeout_io, ctx, spdk_bdev_for_each_channel(bdev, bdev_channel_poll_timeout_io, ctx,
bdev_channel_poll_timeout_io_done); bdev_channel_poll_timeout_io_done);
@ -3476,7 +3476,7 @@ bdev_channel_create(void *io_device, void *ctx_buf)
} }
#endif #endif
pthread_spin_lock(&bdev->internal.spinlock); spdk_spin_lock(&bdev->internal.spinlock);
bdev_enable_qos(bdev, ch); bdev_enable_qos(bdev, ch);
TAILQ_FOREACH(range, &bdev->internal.locked_ranges, tailq) { TAILQ_FOREACH(range, &bdev->internal.locked_ranges, tailq) {
@ -3484,7 +3484,7 @@ bdev_channel_create(void *io_device, void *ctx_buf)
new_range = calloc(1, sizeof(*new_range)); new_range = calloc(1, sizeof(*new_range));
if (new_range == NULL) { if (new_range == NULL) {
pthread_spin_unlock(&bdev->internal.spinlock); spdk_spin_unlock(&bdev->internal.spinlock);
bdev_channel_destroy_resource(ch); bdev_channel_destroy_resource(ch);
return -1; return -1;
} }
@ -3494,7 +3494,7 @@ bdev_channel_create(void *io_device, void *ctx_buf)
TAILQ_INSERT_TAIL(&ch->locked_ranges, new_range, tailq); TAILQ_INSERT_TAIL(&ch->locked_ranges, new_range, tailq);
} }
pthread_spin_unlock(&bdev->internal.spinlock); spdk_spin_unlock(&bdev->internal.spinlock);
return 0; return 0;
} }
@ -3694,9 +3694,9 @@ bdev_channel_destroy(void *io_device, void *ctx_buf)
spdk_thread_get_id(spdk_io_channel_get_thread(ch->channel))); spdk_thread_get_id(spdk_io_channel_get_thread(ch->channel)));
/* This channel is going away, so add its statistics into the bdev so that they don't get lost. */ /* This channel is going away, so add its statistics into the bdev so that they don't get lost. */
pthread_spin_lock(&ch->bdev->internal.spinlock); spdk_spin_lock(&ch->bdev->internal.spinlock);
bdev_io_stat_add(&ch->bdev->internal.stat, &ch->stat); bdev_io_stat_add(&ch->bdev->internal.stat, &ch->stat);
pthread_spin_unlock(&ch->bdev->internal.spinlock); spdk_spin_unlock(&ch->bdev->internal.spinlock);
bdev_abort_all_queued_io(&ch->queued_resets, ch); bdev_abort_all_queued_io(&ch->queued_resets, ch);
@ -3726,9 +3726,9 @@ bdev_name_add(struct spdk_bdev_name *bdev_name, struct spdk_bdev *bdev, const ch
bdev_name->bdev = bdev; bdev_name->bdev = bdev;
pthread_spin_lock(&g_bdev_mgr.spinlock); spdk_spin_lock(&g_bdev_mgr.spinlock);
tmp = RB_INSERT(bdev_name_tree, &g_bdev_mgr.bdev_names, bdev_name); tmp = RB_INSERT(bdev_name_tree, &g_bdev_mgr.bdev_names, bdev_name);
pthread_spin_unlock(&g_bdev_mgr.spinlock); spdk_spin_unlock(&g_bdev_mgr.spinlock);
if (tmp != NULL) { if (tmp != NULL) {
SPDK_ERRLOG("Bdev name %s already exists\n", name); SPDK_ERRLOG("Bdev name %s already exists\n", name);
@ -3749,9 +3749,9 @@ bdev_name_del_unsafe(struct spdk_bdev_name *bdev_name)
static void static void
bdev_name_del(struct spdk_bdev_name *bdev_name) bdev_name_del(struct spdk_bdev_name *bdev_name)
{ {
pthread_spin_lock(&g_bdev_mgr.spinlock); spdk_spin_lock(&g_bdev_mgr.spinlock);
bdev_name_del_unsafe(bdev_name); bdev_name_del_unsafe(bdev_name);
pthread_spin_unlock(&g_bdev_mgr.spinlock); spdk_spin_unlock(&g_bdev_mgr.spinlock);
} }
int int
@ -3899,7 +3899,7 @@ spdk_bdev_get_qos_rate_limits(struct spdk_bdev *bdev, uint64_t *limits)
memset(limits, 0, sizeof(*limits) * SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES); memset(limits, 0, sizeof(*limits) * SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES);
pthread_spin_lock(&bdev->internal.spinlock); spdk_spin_lock(&bdev->internal.spinlock);
if (bdev->internal.qos) { if (bdev->internal.qos) {
for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) { for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
if (bdev->internal.qos->rate_limits[i].limit != if (bdev->internal.qos->rate_limits[i].limit !=
@ -3912,7 +3912,7 @@ spdk_bdev_get_qos_rate_limits(struct spdk_bdev *bdev, uint64_t *limits)
} }
} }
} }
pthread_spin_unlock(&bdev->internal.spinlock); spdk_spin_unlock(&bdev->internal.spinlock);
} }
size_t size_t
@ -4222,10 +4222,10 @@ _resize_notify(void *arg)
{ {
struct spdk_bdev_desc *desc = arg; struct spdk_bdev_desc *desc = arg;
pthread_spin_lock(&desc->spinlock); spdk_spin_lock(&desc->spinlock);
desc->refs--; desc->refs--;
if (!desc->closed) { if (!desc->closed) {
pthread_spin_unlock(&desc->spinlock); spdk_spin_unlock(&desc->spinlock);
desc->callback.event_fn(SPDK_BDEV_EVENT_RESIZE, desc->callback.event_fn(SPDK_BDEV_EVENT_RESIZE,
desc->bdev, desc->bdev,
desc->callback.ctx); desc->callback.ctx);
@ -4235,11 +4235,11 @@ _resize_notify(void *arg)
* spdk_bdev_close() could not free the descriptor since this message was * spdk_bdev_close() could not free the descriptor since this message was
* in flight, so we free it now using bdev_desc_free(). * in flight, so we free it now using bdev_desc_free().
*/ */
pthread_spin_unlock(&desc->spinlock); spdk_spin_unlock(&desc->spinlock);
bdev_desc_free(desc); bdev_desc_free(desc);
return; return;
} }
pthread_spin_unlock(&desc->spinlock); spdk_spin_unlock(&desc->spinlock);
} }
int int
@ -4252,7 +4252,7 @@ spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
return 0; return 0;
} }
pthread_spin_lock(&bdev->internal.spinlock); spdk_spin_lock(&bdev->internal.spinlock);
/* bdev has open descriptors */ /* bdev has open descriptors */
if (!TAILQ_EMPTY(&bdev->internal.open_descs) && if (!TAILQ_EMPTY(&bdev->internal.open_descs) &&
@ -4261,17 +4261,17 @@ spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
} else { } else {
bdev->blockcnt = size; bdev->blockcnt = size;
TAILQ_FOREACH(desc, &bdev->internal.open_descs, link) { TAILQ_FOREACH(desc, &bdev->internal.open_descs, link) {
pthread_spin_lock(&desc->spinlock); spdk_spin_lock(&desc->spinlock);
if (!desc->closed) { if (!desc->closed) {
desc->refs++; desc->refs++;
spdk_thread_send_msg(desc->thread, _resize_notify, desc); spdk_thread_send_msg(desc->thread, _resize_notify, desc);
} }
pthread_spin_unlock(&desc->spinlock); spdk_spin_unlock(&desc->spinlock);
} }
ret = 0; ret = 0;
} }
pthread_spin_unlock(&bdev->internal.spinlock); spdk_spin_unlock(&bdev->internal.spinlock);
return ret; return ret;
} }
@ -5497,11 +5497,11 @@ bdev_reset_freeze_channel(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bd
* the channel flag is set, so the lock here should not * the channel flag is set, so the lock here should not
* be necessary. We're not in the fast path though, so * be necessary. We're not in the fast path though, so
* just take it anyway. */ * just take it anyway. */
pthread_spin_lock(&channel->bdev->internal.spinlock); spdk_spin_lock(&channel->bdev->internal.spinlock);
if (channel->bdev->internal.qos->ch == channel) { if (channel->bdev->internal.qos->ch == channel) {
TAILQ_SWAP(&channel->bdev->internal.qos->queued, &tmp_queued, spdk_bdev_io, internal.link); TAILQ_SWAP(&channel->bdev->internal.qos->queued, &tmp_queued, spdk_bdev_io, internal.link);
} }
pthread_spin_unlock(&channel->bdev->internal.spinlock); spdk_spin_unlock(&channel->bdev->internal.spinlock);
} }
bdev_abort_all_queued_io(&shared_resource->nomem_io, channel); bdev_abort_all_queued_io(&shared_resource->nomem_io, channel);
@ -5528,7 +5528,7 @@ bdev_channel_start_reset(struct spdk_bdev_channel *ch)
assert(!TAILQ_EMPTY(&ch->queued_resets)); assert(!TAILQ_EMPTY(&ch->queued_resets));
pthread_spin_lock(&bdev->internal.spinlock); spdk_spin_lock(&bdev->internal.spinlock);
if (bdev->internal.reset_in_progress == NULL) { if (bdev->internal.reset_in_progress == NULL) {
bdev->internal.reset_in_progress = TAILQ_FIRST(&ch->queued_resets); bdev->internal.reset_in_progress = TAILQ_FIRST(&ch->queued_resets);
/* /*
@ -5541,7 +5541,7 @@ bdev_channel_start_reset(struct spdk_bdev_channel *ch)
bdev->internal.reset_in_progress->u.reset.ch_ref = spdk_get_io_channel(__bdev_to_io_dev(bdev)); bdev->internal.reset_in_progress->u.reset.ch_ref = spdk_get_io_channel(__bdev_to_io_dev(bdev));
bdev_start_reset(ch); bdev_start_reset(ch);
} }
pthread_spin_unlock(&bdev->internal.spinlock); spdk_spin_unlock(&bdev->internal.spinlock);
} }
int int
@ -5564,9 +5564,9 @@ spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
bdev_io->u.reset.ch_ref = NULL; bdev_io->u.reset.ch_ref = NULL;
bdev_io_init(bdev_io, bdev, cb_arg, cb); bdev_io_init(bdev_io, bdev, cb_arg, cb);
pthread_spin_lock(&bdev->internal.spinlock); spdk_spin_lock(&bdev->internal.spinlock);
TAILQ_INSERT_TAIL(&channel->queued_resets, bdev_io, internal.link); TAILQ_INSERT_TAIL(&channel->queued_resets, bdev_io, internal.link);
pthread_spin_unlock(&bdev->internal.spinlock); spdk_spin_unlock(&bdev->internal.spinlock);
TAILQ_INSERT_TAIL(&bdev_io->internal.ch->io_submitted, bdev_io, TAILQ_INSERT_TAIL(&bdev_io->internal.ch->io_submitted, bdev_io,
internal.ch_link); internal.ch_link);
@ -5628,9 +5628,9 @@ spdk_bdev_get_device_stat(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat
bdev_iostat_ctx->cb_arg = cb_arg; bdev_iostat_ctx->cb_arg = cb_arg;
/* Start with the statistics from previously deleted channels. */ /* Start with the statistics from previously deleted channels. */
pthread_spin_lock(&bdev->internal.spinlock); spdk_spin_lock(&bdev->internal.spinlock);
bdev_io_stat_add(bdev_iostat_ctx->stat, &bdev->internal.stat); bdev_io_stat_add(bdev_iostat_ctx->stat, &bdev->internal.stat);
pthread_spin_unlock(&bdev->internal.spinlock); spdk_spin_unlock(&bdev->internal.spinlock);
/* Then iterate and add the statistics from each existing channel. */ /* Then iterate and add the statistics from each existing channel. */
spdk_bdev_for_each_channel(bdev, bdev_get_each_channel_stat, bdev_iostat_ctx, spdk_bdev_for_each_channel(bdev, bdev_get_each_channel_stat, bdev_iostat_ctx,
@ -6159,12 +6159,12 @@ spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status sta
if (status == SPDK_BDEV_IO_STATUS_NOMEM) { if (status == SPDK_BDEV_IO_STATUS_NOMEM) {
SPDK_ERRLOG("NOMEM returned for reset\n"); SPDK_ERRLOG("NOMEM returned for reset\n");
} }
pthread_spin_lock(&bdev->internal.spinlock); spdk_spin_lock(&bdev->internal.spinlock);
if (bdev_io == bdev->internal.reset_in_progress) { if (bdev_io == bdev->internal.reset_in_progress) {
bdev->internal.reset_in_progress = NULL; bdev->internal.reset_in_progress = NULL;
unlock_channels = true; unlock_channels = true;
} }
pthread_spin_unlock(&bdev->internal.spinlock); spdk_spin_unlock(&bdev->internal.spinlock);
if (unlock_channels) { if (unlock_channels) {
spdk_bdev_for_each_channel(bdev, bdev_unfreeze_channel, bdev_io, spdk_bdev_for_each_channel(bdev, bdev_unfreeze_channel, bdev_io,
@ -6481,7 +6481,7 @@ bdev_register(struct spdk_bdev *bdev)
free(bdev_name); free(bdev_name);
pthread_spin_init(&bdev->internal.spinlock, PTHREAD_PROCESS_PRIVATE); spdk_spin_init(&bdev->internal.spinlock);
SPDK_DEBUGLOG(bdev, "Inserting bdev %s into list\n", bdev->name); SPDK_DEBUGLOG(bdev, "Inserting bdev %s into list\n", bdev->name);
TAILQ_INSERT_TAIL(&g_bdev_mgr.bdevs, bdev, internal.link); TAILQ_INSERT_TAIL(&g_bdev_mgr.bdevs, bdev, internal.link);
@ -6501,7 +6501,7 @@ bdev_destroy_cb(void *io_device)
cb_fn = bdev->internal.unregister_cb; cb_fn = bdev->internal.unregister_cb;
cb_arg = bdev->internal.unregister_ctx; cb_arg = bdev->internal.unregister_ctx;
pthread_spin_destroy(&bdev->internal.spinlock); spdk_spin_destroy(&bdev->internal.spinlock);
free(bdev->internal.qos); free(bdev->internal.qos);
rc = bdev->fn_table->destruct(bdev->ctxt); rc = bdev->fn_table->destruct(bdev->ctxt);
@ -6526,11 +6526,11 @@ _remove_notify(void *arg)
{ {
struct spdk_bdev_desc *desc = arg; struct spdk_bdev_desc *desc = arg;
pthread_spin_lock(&desc->spinlock); spdk_spin_lock(&desc->spinlock);
desc->refs--; desc->refs--;
if (!desc->closed) { if (!desc->closed) {
pthread_spin_unlock(&desc->spinlock); spdk_spin_unlock(&desc->spinlock);
desc->callback.event_fn(SPDK_BDEV_EVENT_REMOVE, desc->bdev, desc->callback.ctx); desc->callback.event_fn(SPDK_BDEV_EVENT_REMOVE, desc->bdev, desc->callback.ctx);
return; return;
} else if (0 == desc->refs) { } else if (0 == desc->refs) {
@ -6538,11 +6538,11 @@ _remove_notify(void *arg)
* spdk_bdev_close() could not free the descriptor since this message was * spdk_bdev_close() could not free the descriptor since this message was
* in flight, so we free it now using bdev_desc_free(). * in flight, so we free it now using bdev_desc_free().
*/ */
pthread_spin_unlock(&desc->spinlock); spdk_spin_unlock(&desc->spinlock);
bdev_desc_free(desc); bdev_desc_free(desc);
return; return;
} }
pthread_spin_unlock(&desc->spinlock); spdk_spin_unlock(&desc->spinlock);
} }
/* Must be called while holding g_bdev_mgr.spinlock and bdev->internal.spinlock. /* Must be called while holding g_bdev_mgr.spinlock and bdev->internal.spinlock.
@ -6558,7 +6558,7 @@ bdev_unregister_unsafe(struct spdk_bdev *bdev)
/* Notify each descriptor about hotremoval */ /* Notify each descriptor about hotremoval */
TAILQ_FOREACH_SAFE(desc, &bdev->internal.open_descs, link, tmp) { TAILQ_FOREACH_SAFE(desc, &bdev->internal.open_descs, link, tmp) {
rc = -EBUSY; rc = -EBUSY;
pthread_spin_lock(&desc->spinlock); spdk_spin_lock(&desc->spinlock);
/* /*
* Defer invocation of the event_cb to a separate message that will * Defer invocation of the event_cb to a separate message that will
* run later on its thread. This ensures this context unwinds and * run later on its thread. This ensures this context unwinds and
@ -6567,7 +6567,7 @@ bdev_unregister_unsafe(struct spdk_bdev *bdev)
*/ */
desc->refs++; desc->refs++;
spdk_thread_send_msg(desc->thread, _remove_notify, desc); spdk_thread_send_msg(desc->thread, _remove_notify, desc);
pthread_spin_unlock(&desc->spinlock); spdk_spin_unlock(&desc->spinlock);
} }
/* If there are no descriptors, proceed removing the bdev */ /* If there are no descriptors, proceed removing the bdev */
@ -6608,8 +6608,8 @@ bdev_unregister(struct spdk_bdev *bdev, void *_ctx, int status)
{ {
int rc; int rc;
pthread_spin_lock(&g_bdev_mgr.spinlock); spdk_spin_lock(&g_bdev_mgr.spinlock);
pthread_spin_lock(&bdev->internal.spinlock); spdk_spin_lock(&bdev->internal.spinlock);
/* /*
* Set the status to REMOVING after completing to abort channels. Otherwise, * Set the status to REMOVING after completing to abort channels. Otherwise,
* the last spdk_bdev_close() may call spdk_io_device_unregister() while * the last spdk_bdev_close() may call spdk_io_device_unregister() while
@ -6618,8 +6618,8 @@ bdev_unregister(struct spdk_bdev *bdev, void *_ctx, int status)
*/ */
bdev->internal.status = SPDK_BDEV_STATUS_REMOVING; bdev->internal.status = SPDK_BDEV_STATUS_REMOVING;
rc = bdev_unregister_unsafe(bdev); rc = bdev_unregister_unsafe(bdev);
pthread_spin_unlock(&bdev->internal.spinlock); spdk_spin_unlock(&bdev->internal.spinlock);
pthread_spin_unlock(&g_bdev_mgr.spinlock); spdk_spin_unlock(&g_bdev_mgr.spinlock);
if (rc == 0) { if (rc == 0) {
spdk_io_device_unregister(__bdev_to_io_dev(bdev), bdev_destroy_cb); spdk_io_device_unregister(__bdev_to_io_dev(bdev), bdev_destroy_cb);
@ -6642,22 +6642,22 @@ spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void
return; return;
} }
pthread_spin_lock(&g_bdev_mgr.spinlock); spdk_spin_lock(&g_bdev_mgr.spinlock);
if (bdev->internal.status == SPDK_BDEV_STATUS_UNREGISTERING || if (bdev->internal.status == SPDK_BDEV_STATUS_UNREGISTERING ||
bdev->internal.status == SPDK_BDEV_STATUS_REMOVING) { bdev->internal.status == SPDK_BDEV_STATUS_REMOVING) {
pthread_spin_unlock(&g_bdev_mgr.spinlock); spdk_spin_unlock(&g_bdev_mgr.spinlock);
if (cb_fn) { if (cb_fn) {
cb_fn(cb_arg, -EBUSY); cb_fn(cb_arg, -EBUSY);
} }
return; return;
} }
pthread_spin_lock(&bdev->internal.spinlock); spdk_spin_lock(&bdev->internal.spinlock);
bdev->internal.status = SPDK_BDEV_STATUS_UNREGISTERING; bdev->internal.status = SPDK_BDEV_STATUS_UNREGISTERING;
bdev->internal.unregister_cb = cb_fn; bdev->internal.unregister_cb = cb_fn;
bdev->internal.unregister_ctx = cb_arg; bdev->internal.unregister_ctx = cb_arg;
pthread_spin_unlock(&bdev->internal.spinlock); spdk_spin_unlock(&bdev->internal.spinlock);
pthread_spin_unlock(&g_bdev_mgr.spinlock); spdk_spin_unlock(&g_bdev_mgr.spinlock);
spdk_bdev_set_qd_sampling_period(bdev, 0); spdk_bdev_set_qd_sampling_period(bdev, 0);
@ -6733,30 +6733,30 @@ bdev_open(struct spdk_bdev *bdev, bool write, struct spdk_bdev_desc *desc)
desc->thread = thread; desc->thread = thread;
desc->write = write; desc->write = write;
pthread_spin_lock(&bdev->internal.spinlock); spdk_spin_lock(&bdev->internal.spinlock);
if (bdev->internal.status == SPDK_BDEV_STATUS_UNREGISTERING || if (bdev->internal.status == SPDK_BDEV_STATUS_UNREGISTERING ||
bdev->internal.status == SPDK_BDEV_STATUS_REMOVING) { bdev->internal.status == SPDK_BDEV_STATUS_REMOVING) {
pthread_spin_unlock(&bdev->internal.spinlock); spdk_spin_unlock(&bdev->internal.spinlock);
return -ENODEV; return -ENODEV;
} }
if (write && bdev->internal.claim_module) { if (write && bdev->internal.claim_module) {
SPDK_ERRLOG("Could not open %s - %s module already claimed it\n", SPDK_ERRLOG("Could not open %s - %s module already claimed it\n",
bdev->name, bdev->internal.claim_module->name); bdev->name, bdev->internal.claim_module->name);
pthread_spin_unlock(&bdev->internal.spinlock); spdk_spin_unlock(&bdev->internal.spinlock);
return -EPERM; return -EPERM;
} }
rc = bdev_start_qos(bdev); rc = bdev_start_qos(bdev);
if (rc != 0) { if (rc != 0) {
SPDK_ERRLOG("Failed to start QoS on bdev %s\n", bdev->name); SPDK_ERRLOG("Failed to start QoS on bdev %s\n", bdev->name);
pthread_spin_unlock(&bdev->internal.spinlock); spdk_spin_unlock(&bdev->internal.spinlock);
return rc; return rc;
} }
TAILQ_INSERT_TAIL(&bdev->internal.open_descs, desc, link); TAILQ_INSERT_TAIL(&bdev->internal.open_descs, desc, link);
pthread_spin_unlock(&bdev->internal.spinlock); spdk_spin_unlock(&bdev->internal.spinlock);
return 0; return 0;
} }
@ -6780,7 +6780,7 @@ bdev_desc_alloc(struct spdk_bdev *bdev, spdk_bdev_event_cb_t event_cb, void *eve
desc->memory_domains_supported = spdk_bdev_get_memory_domains(bdev, NULL, 0) > 0; desc->memory_domains_supported = spdk_bdev_get_memory_domains(bdev, NULL, 0) > 0;
desc->callback.event_fn = event_cb; desc->callback.event_fn = event_cb;
desc->callback.ctx = event_ctx; desc->callback.ctx = event_ctx;
pthread_spin_init(&desc->spinlock, PTHREAD_PROCESS_PRIVATE); spdk_spin_init(&desc->spinlock);
if (bdev->media_events) { if (bdev->media_events) {
desc->media_events_buffer = calloc(MEDIA_EVENT_POOL_SIZE, desc->media_events_buffer = calloc(MEDIA_EVENT_POOL_SIZE,
@ -6815,19 +6815,19 @@ spdk_bdev_open_ext(const char *bdev_name, bool write, spdk_bdev_event_cb_t event
return -EINVAL; return -EINVAL;
} }
pthread_spin_lock(&g_bdev_mgr.spinlock); spdk_spin_lock(&g_bdev_mgr.spinlock);
bdev = bdev_get_by_name(bdev_name); bdev = bdev_get_by_name(bdev_name);
if (bdev == NULL) { if (bdev == NULL) {
SPDK_NOTICELOG("Currently unable to find bdev with name: %s\n", bdev_name); SPDK_NOTICELOG("Currently unable to find bdev with name: %s\n", bdev_name);
pthread_spin_unlock(&g_bdev_mgr.spinlock); spdk_spin_unlock(&g_bdev_mgr.spinlock);
return -ENODEV; return -ENODEV;
} }
rc = bdev_desc_alloc(bdev, event_cb, event_ctx, &desc); rc = bdev_desc_alloc(bdev, event_cb, event_ctx, &desc);
if (rc != 0) { if (rc != 0) {
pthread_spin_unlock(&g_bdev_mgr.spinlock); spdk_spin_unlock(&g_bdev_mgr.spinlock);
return rc; return rc;
} }
@ -6839,7 +6839,7 @@ spdk_bdev_open_ext(const char *bdev_name, bool write, spdk_bdev_event_cb_t event
*_desc = desc; *_desc = desc;
pthread_spin_unlock(&g_bdev_mgr.spinlock); spdk_spin_unlock(&g_bdev_mgr.spinlock);
return rc; return rc;
} }
@ -6849,18 +6849,18 @@ bdev_close(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc)
{ {
int rc; int rc;
pthread_spin_lock(&bdev->internal.spinlock); spdk_spin_lock(&bdev->internal.spinlock);
pthread_spin_lock(&desc->spinlock); spdk_spin_lock(&desc->spinlock);
TAILQ_REMOVE(&bdev->internal.open_descs, desc, link); TAILQ_REMOVE(&bdev->internal.open_descs, desc, link);
desc->closed = true; desc->closed = true;
if (0 == desc->refs) { if (0 == desc->refs) {
pthread_spin_unlock(&desc->spinlock); spdk_spin_unlock(&desc->spinlock);
bdev_desc_free(desc); bdev_desc_free(desc);
} else { } else {
pthread_spin_unlock(&desc->spinlock); spdk_spin_unlock(&desc->spinlock);
} }
/* If no more descriptors, kill QoS channel */ /* If no more descriptors, kill QoS channel */
@ -6878,13 +6878,13 @@ bdev_close(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc)
if (bdev->internal.status == SPDK_BDEV_STATUS_REMOVING && TAILQ_EMPTY(&bdev->internal.open_descs)) { if (bdev->internal.status == SPDK_BDEV_STATUS_REMOVING && TAILQ_EMPTY(&bdev->internal.open_descs)) {
rc = bdev_unregister_unsafe(bdev); rc = bdev_unregister_unsafe(bdev);
pthread_spin_unlock(&bdev->internal.spinlock); spdk_spin_unlock(&bdev->internal.spinlock);
if (rc == 0) { if (rc == 0) {
spdk_io_device_unregister(__bdev_to_io_dev(bdev), bdev_destroy_cb); spdk_io_device_unregister(__bdev_to_io_dev(bdev), bdev_destroy_cb);
} }
} else { } else {
pthread_spin_unlock(&bdev->internal.spinlock); spdk_spin_unlock(&bdev->internal.spinlock);
} }
} }
@ -6900,11 +6900,11 @@ spdk_bdev_close(struct spdk_bdev_desc *desc)
spdk_poller_unregister(&desc->io_timeout_poller); spdk_poller_unregister(&desc->io_timeout_poller);
pthread_spin_lock(&g_bdev_mgr.spinlock); spdk_spin_lock(&g_bdev_mgr.spinlock);
bdev_close(bdev, desc); bdev_close(bdev, desc);
pthread_spin_unlock(&g_bdev_mgr.spinlock); spdk_spin_unlock(&g_bdev_mgr.spinlock);
} }
static void static void
@ -6996,7 +6996,7 @@ spdk_for_each_bdev(void *ctx, spdk_for_each_bdev_fn fn)
assert(fn != NULL); assert(fn != NULL);
pthread_spin_lock(&g_bdev_mgr.spinlock); spdk_spin_lock(&g_bdev_mgr.spinlock);
bdev = spdk_bdev_first(); bdev = spdk_bdev_first();
while (bdev != NULL) { while (bdev != NULL) {
rc = bdev_desc_alloc(bdev, _tmp_bdev_event_cb, NULL, &desc); rc = bdev_desc_alloc(bdev, _tmp_bdev_event_cb, NULL, &desc);
@ -7014,11 +7014,11 @@ spdk_for_each_bdev(void *ctx, spdk_for_each_bdev_fn fn)
} }
break; break;
} }
pthread_spin_unlock(&g_bdev_mgr.spinlock); spdk_spin_unlock(&g_bdev_mgr.spinlock);
rc = fn(ctx, bdev); rc = fn(ctx, bdev);
pthread_spin_lock(&g_bdev_mgr.spinlock); spdk_spin_lock(&g_bdev_mgr.spinlock);
tmp = spdk_bdev_next(bdev); tmp = spdk_bdev_next(bdev);
bdev_close(bdev, desc); bdev_close(bdev, desc);
if (rc != 0) { if (rc != 0) {
@ -7026,7 +7026,7 @@ spdk_for_each_bdev(void *ctx, spdk_for_each_bdev_fn fn)
} }
bdev = tmp; bdev = tmp;
} }
pthread_spin_unlock(&g_bdev_mgr.spinlock); spdk_spin_unlock(&g_bdev_mgr.spinlock);
return rc; return rc;
} }
@ -7040,7 +7040,7 @@ spdk_for_each_bdev_leaf(void *ctx, spdk_for_each_bdev_fn fn)
assert(fn != NULL); assert(fn != NULL);
pthread_spin_lock(&g_bdev_mgr.spinlock); spdk_spin_lock(&g_bdev_mgr.spinlock);
bdev = spdk_bdev_first_leaf(); bdev = spdk_bdev_first_leaf();
while (bdev != NULL) { while (bdev != NULL) {
rc = bdev_desc_alloc(bdev, _tmp_bdev_event_cb, NULL, &desc); rc = bdev_desc_alloc(bdev, _tmp_bdev_event_cb, NULL, &desc);
@ -7058,11 +7058,11 @@ spdk_for_each_bdev_leaf(void *ctx, spdk_for_each_bdev_fn fn)
} }
break; break;
} }
pthread_spin_unlock(&g_bdev_mgr.spinlock); spdk_spin_unlock(&g_bdev_mgr.spinlock);
rc = fn(ctx, bdev); rc = fn(ctx, bdev);
pthread_spin_lock(&g_bdev_mgr.spinlock); spdk_spin_lock(&g_bdev_mgr.spinlock);
tmp = spdk_bdev_next_leaf(bdev); tmp = spdk_bdev_next_leaf(bdev);
bdev_close(bdev, desc); bdev_close(bdev, desc);
if (rc != 0) { if (rc != 0) {
@ -7070,7 +7070,7 @@ spdk_for_each_bdev_leaf(void *ctx, spdk_for_each_bdev_fn fn)
} }
bdev = tmp; bdev = tmp;
} }
pthread_spin_unlock(&g_bdev_mgr.spinlock); spdk_spin_unlock(&g_bdev_mgr.spinlock);
return rc; return rc;
} }
@ -7231,9 +7231,9 @@ bdev_write_zero_buffer_done(struct spdk_bdev_io *bdev_io, bool success, void *cb
static void static void
bdev_set_qos_limit_done(struct set_qos_limit_ctx *ctx, int status) bdev_set_qos_limit_done(struct set_qos_limit_ctx *ctx, int status)
{ {
pthread_spin_lock(&ctx->bdev->internal.spinlock); spdk_spin_lock(&ctx->bdev->internal.spinlock);
ctx->bdev->internal.qos_mod_in_progress = false; ctx->bdev->internal.qos_mod_in_progress = false;
pthread_spin_unlock(&ctx->bdev->internal.spinlock); spdk_spin_unlock(&ctx->bdev->internal.spinlock);
if (ctx->cb_fn) { if (ctx->cb_fn) {
ctx->cb_fn(ctx->cb_arg, status); ctx->cb_fn(ctx->cb_arg, status);
@ -7249,10 +7249,10 @@ bdev_disable_qos_done(void *cb_arg)
struct spdk_bdev_io *bdev_io; struct spdk_bdev_io *bdev_io;
struct spdk_bdev_qos *qos; struct spdk_bdev_qos *qos;
pthread_spin_lock(&bdev->internal.spinlock); spdk_spin_lock(&bdev->internal.spinlock);
qos = bdev->internal.qos; qos = bdev->internal.qos;
bdev->internal.qos = NULL; bdev->internal.qos = NULL;
pthread_spin_unlock(&bdev->internal.spinlock); spdk_spin_unlock(&bdev->internal.spinlock);
while (!TAILQ_EMPTY(&qos->queued)) { while (!TAILQ_EMPTY(&qos->queued)) {
/* Send queued I/O back to their original thread for resubmission. */ /* Send queued I/O back to their original thread for resubmission. */
@ -7288,9 +7288,9 @@ bdev_disable_qos_msg_done(struct spdk_bdev *bdev, void *_ctx, int status)
struct set_qos_limit_ctx *ctx = _ctx; struct set_qos_limit_ctx *ctx = _ctx;
struct spdk_thread *thread; struct spdk_thread *thread;
pthread_spin_lock(&bdev->internal.spinlock); spdk_spin_lock(&bdev->internal.spinlock);
thread = bdev->internal.qos->thread; thread = bdev->internal.qos->thread;
pthread_spin_unlock(&bdev->internal.spinlock); spdk_spin_unlock(&bdev->internal.spinlock);
if (thread != NULL) { if (thread != NULL) {
spdk_thread_send_msg(thread, bdev_disable_qos_done, ctx); spdk_thread_send_msg(thread, bdev_disable_qos_done, ctx);
@ -7316,9 +7316,9 @@ bdev_update_qos_rate_limit_msg(void *cb_arg)
struct set_qos_limit_ctx *ctx = cb_arg; struct set_qos_limit_ctx *ctx = cb_arg;
struct spdk_bdev *bdev = ctx->bdev; struct spdk_bdev *bdev = ctx->bdev;
pthread_spin_lock(&bdev->internal.spinlock); spdk_spin_lock(&bdev->internal.spinlock);
bdev_qos_update_max_quota_per_timeslice(bdev->internal.qos); bdev_qos_update_max_quota_per_timeslice(bdev->internal.qos);
pthread_spin_unlock(&bdev->internal.spinlock); spdk_spin_unlock(&bdev->internal.spinlock);
bdev_set_qos_limit_done(ctx, 0); bdev_set_qos_limit_done(ctx, 0);
} }
@ -7329,9 +7329,9 @@ bdev_enable_qos_msg(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
{ {
struct spdk_bdev_channel *bdev_ch = __io_ch_to_bdev_ch(ch); struct spdk_bdev_channel *bdev_ch = __io_ch_to_bdev_ch(ch);
pthread_spin_lock(&bdev->internal.spinlock); spdk_spin_lock(&bdev->internal.spinlock);
bdev_enable_qos(bdev, bdev_ch); bdev_enable_qos(bdev, bdev_ch);
pthread_spin_unlock(&bdev->internal.spinlock); spdk_spin_unlock(&bdev->internal.spinlock);
spdk_bdev_for_each_channel_continue(i, 0); spdk_bdev_for_each_channel_continue(i, 0);
} }
@ -7408,9 +7408,9 @@ spdk_bdev_set_qos_rate_limits(struct spdk_bdev *bdev, uint64_t *limits,
ctx->cb_arg = cb_arg; ctx->cb_arg = cb_arg;
ctx->bdev = bdev; ctx->bdev = bdev;
pthread_spin_lock(&bdev->internal.spinlock); spdk_spin_lock(&bdev->internal.spinlock);
if (bdev->internal.qos_mod_in_progress) { if (bdev->internal.qos_mod_in_progress) {
pthread_spin_unlock(&bdev->internal.spinlock); spdk_spin_unlock(&bdev->internal.spinlock);
free(ctx); free(ctx);
cb_fn(cb_arg, -EAGAIN); cb_fn(cb_arg, -EAGAIN);
return; return;
@ -7433,7 +7433,7 @@ spdk_bdev_set_qos_rate_limits(struct spdk_bdev *bdev, uint64_t *limits,
if (bdev->internal.qos == NULL) { if (bdev->internal.qos == NULL) {
bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos)); bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
if (!bdev->internal.qos) { if (!bdev->internal.qos) {
pthread_spin_unlock(&bdev->internal.spinlock); spdk_spin_unlock(&bdev->internal.spinlock);
SPDK_ERRLOG("Unable to allocate memory for QoS tracking\n"); SPDK_ERRLOG("Unable to allocate memory for QoS tracking\n");
bdev_set_qos_limit_done(ctx, -ENOMEM); bdev_set_qos_limit_done(ctx, -ENOMEM);
return; return;
@ -7461,13 +7461,13 @@ spdk_bdev_set_qos_rate_limits(struct spdk_bdev *bdev, uint64_t *limits,
spdk_bdev_for_each_channel(bdev, bdev_disable_qos_msg, ctx, spdk_bdev_for_each_channel(bdev, bdev_disable_qos_msg, ctx,
bdev_disable_qos_msg_done); bdev_disable_qos_msg_done);
} else { } else {
pthread_spin_unlock(&bdev->internal.spinlock); spdk_spin_unlock(&bdev->internal.spinlock);
bdev_set_qos_limit_done(ctx, 0); bdev_set_qos_limit_done(ctx, 0);
return; return;
} }
} }
pthread_spin_unlock(&bdev->internal.spinlock); spdk_spin_unlock(&bdev->internal.spinlock);
} }
struct spdk_bdev_histogram_ctx { struct spdk_bdev_histogram_ctx {
@ -7482,9 +7482,9 @@ bdev_histogram_disable_channel_cb(struct spdk_bdev *bdev, void *_ctx, int status
{ {
struct spdk_bdev_histogram_ctx *ctx = _ctx; struct spdk_bdev_histogram_ctx *ctx = _ctx;
pthread_spin_lock(&ctx->bdev->internal.spinlock); spdk_spin_lock(&ctx->bdev->internal.spinlock);
ctx->bdev->internal.histogram_in_progress = false; ctx->bdev->internal.histogram_in_progress = false;
pthread_spin_unlock(&ctx->bdev->internal.spinlock); spdk_spin_unlock(&ctx->bdev->internal.spinlock);
ctx->cb_fn(ctx->cb_arg, ctx->status); ctx->cb_fn(ctx->cb_arg, ctx->status);
free(ctx); free(ctx);
} }
@ -7513,9 +7513,9 @@ bdev_histogram_enable_channel_cb(struct spdk_bdev *bdev, void *_ctx, int status)
spdk_bdev_for_each_channel(ctx->bdev, bdev_histogram_disable_channel, ctx, spdk_bdev_for_each_channel(ctx->bdev, bdev_histogram_disable_channel, ctx,
bdev_histogram_disable_channel_cb); bdev_histogram_disable_channel_cb);
} else { } else {
pthread_spin_lock(&ctx->bdev->internal.spinlock); spdk_spin_lock(&ctx->bdev->internal.spinlock);
ctx->bdev->internal.histogram_in_progress = false; ctx->bdev->internal.histogram_in_progress = false;
pthread_spin_unlock(&ctx->bdev->internal.spinlock); spdk_spin_unlock(&ctx->bdev->internal.spinlock);
ctx->cb_fn(ctx->cb_arg, ctx->status); ctx->cb_fn(ctx->cb_arg, ctx->status);
free(ctx); free(ctx);
} }
@ -7555,16 +7555,16 @@ spdk_bdev_histogram_enable(struct spdk_bdev *bdev, spdk_bdev_histogram_status_cb
ctx->cb_fn = cb_fn; ctx->cb_fn = cb_fn;
ctx->cb_arg = cb_arg; ctx->cb_arg = cb_arg;
pthread_spin_lock(&bdev->internal.spinlock); spdk_spin_lock(&bdev->internal.spinlock);
if (bdev->internal.histogram_in_progress) { if (bdev->internal.histogram_in_progress) {
pthread_spin_unlock(&bdev->internal.spinlock); spdk_spin_unlock(&bdev->internal.spinlock);
free(ctx); free(ctx);
cb_fn(cb_arg, -EAGAIN); cb_fn(cb_arg, -EAGAIN);
return; return;
} }
bdev->internal.histogram_in_progress = true; bdev->internal.histogram_in_progress = true;
pthread_spin_unlock(&bdev->internal.spinlock); spdk_spin_unlock(&bdev->internal.spinlock);
bdev->internal.histogram_enabled = enable; bdev->internal.histogram_enabled = enable;
@ -7682,7 +7682,7 @@ spdk_bdev_push_media_events(struct spdk_bdev *bdev, const struct spdk_bdev_media
assert(bdev->media_events); assert(bdev->media_events);
pthread_spin_lock(&bdev->internal.spinlock); spdk_spin_lock(&bdev->internal.spinlock);
TAILQ_FOREACH(desc, &bdev->internal.open_descs, link) { TAILQ_FOREACH(desc, &bdev->internal.open_descs, link) {
if (desc->write) { if (desc->write) {
break; break;
@ -7707,7 +7707,7 @@ spdk_bdev_push_media_events(struct spdk_bdev *bdev, const struct spdk_bdev_media
rc = event_id; rc = event_id;
out: out:
pthread_spin_unlock(&bdev->internal.spinlock); spdk_spin_unlock(&bdev->internal.spinlock);
return rc; return rc;
} }
@ -7716,14 +7716,14 @@ spdk_bdev_notify_media_management(struct spdk_bdev *bdev)
{ {
struct spdk_bdev_desc *desc; struct spdk_bdev_desc *desc;
pthread_spin_lock(&bdev->internal.spinlock); spdk_spin_lock(&bdev->internal.spinlock);
TAILQ_FOREACH(desc, &bdev->internal.open_descs, link) { TAILQ_FOREACH(desc, &bdev->internal.open_descs, link) {
if (!TAILQ_EMPTY(&desc->pending_media_events)) { if (!TAILQ_EMPTY(&desc->pending_media_events)) {
desc->callback.event_fn(SPDK_BDEV_EVENT_MEDIA_MANAGEMENT, bdev, desc->callback.event_fn(SPDK_BDEV_EVENT_MEDIA_MANAGEMENT, bdev,
desc->callback.ctx); desc->callback.ctx);
} }
} }
pthread_spin_unlock(&bdev->internal.spinlock); spdk_spin_unlock(&bdev->internal.spinlock);
} }
struct locked_lba_range_ctx { struct locked_lba_range_ctx {
@ -7901,7 +7901,7 @@ bdev_lock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch,
ctx->cb_fn = cb_fn; ctx->cb_fn = cb_fn;
ctx->cb_arg = cb_arg; ctx->cb_arg = cb_arg;
pthread_spin_lock(&bdev->internal.spinlock); spdk_spin_lock(&bdev->internal.spinlock);
if (bdev_lba_range_overlaps_tailq(&ctx->range, &bdev->internal.locked_ranges)) { if (bdev_lba_range_overlaps_tailq(&ctx->range, &bdev->internal.locked_ranges)) {
/* There is an active lock overlapping with this range. /* There is an active lock overlapping with this range.
* Put it on the pending list until this range no * Put it on the pending list until this range no
@ -7912,7 +7912,7 @@ bdev_lock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch,
TAILQ_INSERT_TAIL(&bdev->internal.locked_ranges, &ctx->range, tailq); TAILQ_INSERT_TAIL(&bdev->internal.locked_ranges, &ctx->range, tailq);
bdev_lock_lba_range_ctx(bdev, ctx); bdev_lock_lba_range_ctx(bdev, ctx);
} }
pthread_spin_unlock(&bdev->internal.spinlock); spdk_spin_unlock(&bdev->internal.spinlock);
return 0; return 0;
} }
@ -7931,7 +7931,7 @@ bdev_unlock_lba_range_cb(struct spdk_bdev *bdev, void *_ctx, int status)
struct locked_lba_range_ctx *pending_ctx; struct locked_lba_range_ctx *pending_ctx;
struct lba_range *range, *tmp; struct lba_range *range, *tmp;
pthread_spin_lock(&bdev->internal.spinlock); spdk_spin_lock(&bdev->internal.spinlock);
/* Check if there are any pending locked ranges that overlap with this range /* Check if there are any pending locked ranges that overlap with this range
* that was just unlocked. If there are, check that it doesn't overlap with any * that was just unlocked. If there are, check that it doesn't overlap with any
* other locked ranges before calling bdev_lock_lba_range_ctx which will start * other locked ranges before calling bdev_lock_lba_range_ctx which will start
@ -7947,7 +7947,7 @@ bdev_unlock_lba_range_cb(struct spdk_bdev *bdev, void *_ctx, int status)
bdev_lock_lba_range_ctx_msg, pending_ctx); bdev_lock_lba_range_ctx_msg, pending_ctx);
} }
} }
pthread_spin_unlock(&bdev->internal.spinlock); spdk_spin_unlock(&bdev->internal.spinlock);
ctx->cb_fn(ctx->cb_arg, status); ctx->cb_fn(ctx->cb_arg, status);
free(ctx); free(ctx);
@ -8024,7 +8024,7 @@ bdev_unlock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch,
return -EINVAL; return -EINVAL;
} }
pthread_spin_lock(&bdev->internal.spinlock); spdk_spin_lock(&bdev->internal.spinlock);
/* We confirmed that this channel has locked the specified range. To /* We confirmed that this channel has locked the specified range. To
* start the unlock the process, we find the range in the bdev's locked_ranges * start the unlock the process, we find the range in the bdev's locked_ranges
* and remove it. This ensures new channels don't inherit the locked range. * and remove it. This ensures new channels don't inherit the locked range.
@ -8039,12 +8039,12 @@ bdev_unlock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch,
} }
if (range == NULL) { if (range == NULL) {
assert(false); assert(false);
pthread_spin_unlock(&bdev->internal.spinlock); spdk_spin_unlock(&bdev->internal.spinlock);
return -EINVAL; return -EINVAL;
} }
TAILQ_REMOVE(&bdev->internal.locked_ranges, range, tailq); TAILQ_REMOVE(&bdev->internal.locked_ranges, range, tailq);
ctx = SPDK_CONTAINEROF(range, struct locked_lba_range_ctx, range); ctx = SPDK_CONTAINEROF(range, struct locked_lba_range_ctx, range);
pthread_spin_unlock(&bdev->internal.spinlock); spdk_spin_unlock(&bdev->internal.spinlock);
ctx->cb_fn = cb_fn; ctx->cb_fn = cb_fn;
ctx->cb_arg = cb_arg; ctx->cb_arg = cb_arg;

View File

@ -921,7 +921,7 @@ io_valid_test(void)
memset(&bdev, 0, sizeof(bdev)); memset(&bdev, 0, sizeof(bdev));
bdev.blocklen = 512; bdev.blocklen = 512;
CU_ASSERT(pthread_spin_init(&bdev.internal.spinlock, PTHREAD_PROCESS_PRIVATE) == 0); spdk_spin_init(&bdev.internal.spinlock);
spdk_bdev_notify_blockcnt_change(&bdev, 100); spdk_bdev_notify_blockcnt_change(&bdev, 100);
@ -940,7 +940,7 @@ io_valid_test(void)
/* Offset near end of uint64_t range (2^64 - 1) */ /* Offset near end of uint64_t range (2^64 - 1) */
CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false); CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false);
CU_ASSERT(pthread_spin_destroy(&bdev.internal.spinlock) == 0); spdk_spin_destroy(&bdev.internal.spinlock);
} }
static void static void