From 0dc6aac101a28bf593a1497a1f0479608b53660c Mon Sep 17 00:00:00 2001 From: Mike Gerdts Date: Mon, 14 Nov 2022 16:50:09 -0600 Subject: [PATCH] bdev: use SPDK spinlocks Transition from pthread spinlocks to SPDK spinlocks for improved error checking. Signed-off-by: Mike Gerdts Change-Id: I7877c3a4601d7d5cf03e632df493974f97782272 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/15439 Tested-by: SPDK CI Jenkins Reviewed-by: Aleksey Marchuk Reviewed-by: Jim Harris Reviewed-by: Shuhei Matsumoto --- CHANGELOG.md | 4 + include/spdk/bdev_module.h | 2 +- lib/bdev/bdev.c | 242 ++++++++++++++-------------- test/unit/lib/bdev/bdev.c/bdev_ut.c | 4 +- 4 files changed, 128 insertions(+), 124 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fa5ab6d0f..ea3820587 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,6 +36,10 @@ device. A new API `spdk_bdev_channel_get_histogram` was added to get the histogram of a specified channel for a bdev. +Converted internal use of `pthread_mutex_t` to `struct spdk_spinlock`. Consumers of bdev +API functions must be on an SPDK thread or the program will abort. It is now enforced +that no internal bdev locks can be held when a poller or message goes off CPU. + ### event Added core lock file mechanism to prevent the same CPU cores from being used by multiple diff --git a/include/spdk/bdev_module.h b/include/spdk/bdev_module.h index fc70278e1..467ae3e66 100644 --- a/include/spdk/bdev_module.h +++ b/include/spdk/bdev_module.h @@ -490,7 +490,7 @@ struct spdk_bdev { bool qos_mod_in_progress; /** Spin lock protecting claimed */ - pthread_spinlock_t spinlock; + struct spdk_spinlock spinlock; /** The bdev status */ enum spdk_bdev_status status; diff --git a/lib/bdev/bdev.c b/lib/bdev/bdev.c index 713fc3e00..76cc32852 100644 --- a/lib/bdev/bdev.c +++ b/lib/bdev/bdev.c @@ -93,7 +93,7 @@ struct spdk_bdev_mgr { bool init_complete; bool module_init_complete; - pthread_spinlock_t spinlock; + struct spdk_spinlock spinlock; #ifdef SPDK_CONFIG_VTUNE __itt_domain *domain; @@ -112,7 +112,7 @@ static void __attribute__((constructor)) _bdev_init(void) { - pthread_spin_init(&g_bdev_mgr.spinlock, PTHREAD_PROCESS_PRIVATE); + spdk_spin_init(&g_bdev_mgr.spinlock); } typedef void (*lock_range_cb)(void *ctx, int status); @@ -307,7 +307,7 @@ struct spdk_bdev_desc { bool closed; bool write; bool memory_domains_supported; - pthread_spinlock_t spinlock; + struct spdk_spinlock spinlock; uint32_t refs; TAILQ_HEAD(, media_event_entry) pending_media_events; TAILQ_HEAD(, media_event_entry) free_media_events; @@ -486,9 +486,9 @@ spdk_bdev_get_by_name(const char *bdev_name) { struct spdk_bdev *bdev; - pthread_spin_lock(&g_bdev_mgr.spinlock); + spdk_spin_lock(&g_bdev_mgr.spinlock); bdev = bdev_get_by_name(bdev_name); - pthread_spin_unlock(&g_bdev_mgr.spinlock); + spdk_spin_unlock(&g_bdev_mgr.spinlock); return bdev; } @@ -1384,7 +1384,7 @@ spdk_bdev_subsystem_config_json(struct spdk_json_write_ctx *w) } } - pthread_spin_lock(&g_bdev_mgr.spinlock); + spdk_spin_lock(&g_bdev_mgr.spinlock); TAILQ_FOREACH(bdev, &g_bdev_mgr.bdevs, internal.link) { if (bdev->fn_table->write_config_json) { @@ -1394,7 +1394,7 @@ spdk_bdev_subsystem_config_json(struct spdk_json_write_ctx *w) bdev_qos_config_json(bdev, w); } - pthread_spin_unlock(&g_bdev_mgr.spinlock); + spdk_spin_unlock(&g_bdev_mgr.spinlock); /* This has to be last RPC in array to make sure all bdevs finished examine */ spdk_json_write_object_begin(w); @@ -3267,7 +3267,7 @@ struct poll_timeout_ctx { static void bdev_desc_free(struct spdk_bdev_desc *desc) { - pthread_spin_destroy(&desc->spinlock); + spdk_spin_destroy(&desc->spinlock); free(desc->media_events_buffer); free(desc); } @@ -3280,14 +3280,14 @@ bdev_channel_poll_timeout_io_done(struct spdk_bdev *bdev, void *_ctx, int status free(ctx); - pthread_spin_lock(&desc->spinlock); + spdk_spin_lock(&desc->spinlock); desc->refs--; if (desc->closed == true && desc->refs == 0) { - pthread_spin_unlock(&desc->spinlock); + spdk_spin_unlock(&desc->spinlock); bdev_desc_free(desc); return; } - pthread_spin_unlock(&desc->spinlock); + spdk_spin_unlock(&desc->spinlock); } static void @@ -3300,13 +3300,13 @@ bdev_channel_poll_timeout_io(struct spdk_bdev_channel_iter *i, struct spdk_bdev struct spdk_bdev_io *bdev_io; uint64_t now; - pthread_spin_lock(&desc->spinlock); + spdk_spin_lock(&desc->spinlock); if (desc->closed == true) { - pthread_spin_unlock(&desc->spinlock); + spdk_spin_unlock(&desc->spinlock); spdk_bdev_for_each_channel_continue(i, -1); return; } - pthread_spin_unlock(&desc->spinlock); + spdk_spin_unlock(&desc->spinlock); now = spdk_get_ticks(); TAILQ_FOREACH(bdev_io, &bdev_ch->io_submitted, internal.ch_link) { @@ -3352,9 +3352,9 @@ bdev_poll_timeout_io(void *arg) /* Take a ref on the descriptor in case it gets closed while we are checking * all of the channels. */ - pthread_spin_lock(&desc->spinlock); + spdk_spin_lock(&desc->spinlock); desc->refs++; - pthread_spin_unlock(&desc->spinlock); + spdk_spin_unlock(&desc->spinlock); spdk_bdev_for_each_channel(bdev, bdev_channel_poll_timeout_io, ctx, bdev_channel_poll_timeout_io_done); @@ -3476,7 +3476,7 @@ bdev_channel_create(void *io_device, void *ctx_buf) } #endif - pthread_spin_lock(&bdev->internal.spinlock); + spdk_spin_lock(&bdev->internal.spinlock); bdev_enable_qos(bdev, ch); TAILQ_FOREACH(range, &bdev->internal.locked_ranges, tailq) { @@ -3484,7 +3484,7 @@ bdev_channel_create(void *io_device, void *ctx_buf) new_range = calloc(1, sizeof(*new_range)); if (new_range == NULL) { - pthread_spin_unlock(&bdev->internal.spinlock); + spdk_spin_unlock(&bdev->internal.spinlock); bdev_channel_destroy_resource(ch); return -1; } @@ -3494,7 +3494,7 @@ bdev_channel_create(void *io_device, void *ctx_buf) TAILQ_INSERT_TAIL(&ch->locked_ranges, new_range, tailq); } - pthread_spin_unlock(&bdev->internal.spinlock); + spdk_spin_unlock(&bdev->internal.spinlock); return 0; } @@ -3694,9 +3694,9 @@ bdev_channel_destroy(void *io_device, void *ctx_buf) spdk_thread_get_id(spdk_io_channel_get_thread(ch->channel))); /* This channel is going away, so add its statistics into the bdev so that they don't get lost. */ - pthread_spin_lock(&ch->bdev->internal.spinlock); + spdk_spin_lock(&ch->bdev->internal.spinlock); bdev_io_stat_add(&ch->bdev->internal.stat, &ch->stat); - pthread_spin_unlock(&ch->bdev->internal.spinlock); + spdk_spin_unlock(&ch->bdev->internal.spinlock); bdev_abort_all_queued_io(&ch->queued_resets, ch); @@ -3726,9 +3726,9 @@ bdev_name_add(struct spdk_bdev_name *bdev_name, struct spdk_bdev *bdev, const ch bdev_name->bdev = bdev; - pthread_spin_lock(&g_bdev_mgr.spinlock); + spdk_spin_lock(&g_bdev_mgr.spinlock); tmp = RB_INSERT(bdev_name_tree, &g_bdev_mgr.bdev_names, bdev_name); - pthread_spin_unlock(&g_bdev_mgr.spinlock); + spdk_spin_unlock(&g_bdev_mgr.spinlock); if (tmp != NULL) { SPDK_ERRLOG("Bdev name %s already exists\n", name); @@ -3749,9 +3749,9 @@ bdev_name_del_unsafe(struct spdk_bdev_name *bdev_name) static void bdev_name_del(struct spdk_bdev_name *bdev_name) { - pthread_spin_lock(&g_bdev_mgr.spinlock); + spdk_spin_lock(&g_bdev_mgr.spinlock); bdev_name_del_unsafe(bdev_name); - pthread_spin_unlock(&g_bdev_mgr.spinlock); + spdk_spin_unlock(&g_bdev_mgr.spinlock); } int @@ -3899,7 +3899,7 @@ spdk_bdev_get_qos_rate_limits(struct spdk_bdev *bdev, uint64_t *limits) memset(limits, 0, sizeof(*limits) * SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES); - pthread_spin_lock(&bdev->internal.spinlock); + spdk_spin_lock(&bdev->internal.spinlock); if (bdev->internal.qos) { for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) { if (bdev->internal.qos->rate_limits[i].limit != @@ -3912,7 +3912,7 @@ spdk_bdev_get_qos_rate_limits(struct spdk_bdev *bdev, uint64_t *limits) } } } - pthread_spin_unlock(&bdev->internal.spinlock); + spdk_spin_unlock(&bdev->internal.spinlock); } size_t @@ -4222,10 +4222,10 @@ _resize_notify(void *arg) { struct spdk_bdev_desc *desc = arg; - pthread_spin_lock(&desc->spinlock); + spdk_spin_lock(&desc->spinlock); desc->refs--; if (!desc->closed) { - pthread_spin_unlock(&desc->spinlock); + spdk_spin_unlock(&desc->spinlock); desc->callback.event_fn(SPDK_BDEV_EVENT_RESIZE, desc->bdev, desc->callback.ctx); @@ -4235,11 +4235,11 @@ _resize_notify(void *arg) * spdk_bdev_close() could not free the descriptor since this message was * in flight, so we free it now using bdev_desc_free(). */ - pthread_spin_unlock(&desc->spinlock); + spdk_spin_unlock(&desc->spinlock); bdev_desc_free(desc); return; } - pthread_spin_unlock(&desc->spinlock); + spdk_spin_unlock(&desc->spinlock); } int @@ -4252,7 +4252,7 @@ spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size) return 0; } - pthread_spin_lock(&bdev->internal.spinlock); + spdk_spin_lock(&bdev->internal.spinlock); /* bdev has open descriptors */ if (!TAILQ_EMPTY(&bdev->internal.open_descs) && @@ -4261,17 +4261,17 @@ spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size) } else { bdev->blockcnt = size; TAILQ_FOREACH(desc, &bdev->internal.open_descs, link) { - pthread_spin_lock(&desc->spinlock); + spdk_spin_lock(&desc->spinlock); if (!desc->closed) { desc->refs++; spdk_thread_send_msg(desc->thread, _resize_notify, desc); } - pthread_spin_unlock(&desc->spinlock); + spdk_spin_unlock(&desc->spinlock); } ret = 0; } - pthread_spin_unlock(&bdev->internal.spinlock); + spdk_spin_unlock(&bdev->internal.spinlock); return ret; } @@ -5497,11 +5497,11 @@ bdev_reset_freeze_channel(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bd * the channel flag is set, so the lock here should not * be necessary. We're not in the fast path though, so * just take it anyway. */ - pthread_spin_lock(&channel->bdev->internal.spinlock); + spdk_spin_lock(&channel->bdev->internal.spinlock); if (channel->bdev->internal.qos->ch == channel) { TAILQ_SWAP(&channel->bdev->internal.qos->queued, &tmp_queued, spdk_bdev_io, internal.link); } - pthread_spin_unlock(&channel->bdev->internal.spinlock); + spdk_spin_unlock(&channel->bdev->internal.spinlock); } bdev_abort_all_queued_io(&shared_resource->nomem_io, channel); @@ -5528,7 +5528,7 @@ bdev_channel_start_reset(struct spdk_bdev_channel *ch) assert(!TAILQ_EMPTY(&ch->queued_resets)); - pthread_spin_lock(&bdev->internal.spinlock); + spdk_spin_lock(&bdev->internal.spinlock); if (bdev->internal.reset_in_progress == NULL) { bdev->internal.reset_in_progress = TAILQ_FIRST(&ch->queued_resets); /* @@ -5541,7 +5541,7 @@ bdev_channel_start_reset(struct spdk_bdev_channel *ch) bdev->internal.reset_in_progress->u.reset.ch_ref = spdk_get_io_channel(__bdev_to_io_dev(bdev)); bdev_start_reset(ch); } - pthread_spin_unlock(&bdev->internal.spinlock); + spdk_spin_unlock(&bdev->internal.spinlock); } int @@ -5564,9 +5564,9 @@ spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, bdev_io->u.reset.ch_ref = NULL; bdev_io_init(bdev_io, bdev, cb_arg, cb); - pthread_spin_lock(&bdev->internal.spinlock); + spdk_spin_lock(&bdev->internal.spinlock); TAILQ_INSERT_TAIL(&channel->queued_resets, bdev_io, internal.link); - pthread_spin_unlock(&bdev->internal.spinlock); + spdk_spin_unlock(&bdev->internal.spinlock); TAILQ_INSERT_TAIL(&bdev_io->internal.ch->io_submitted, bdev_io, internal.ch_link); @@ -5628,9 +5628,9 @@ spdk_bdev_get_device_stat(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat bdev_iostat_ctx->cb_arg = cb_arg; /* Start with the statistics from previously deleted channels. */ - pthread_spin_lock(&bdev->internal.spinlock); + spdk_spin_lock(&bdev->internal.spinlock); bdev_io_stat_add(bdev_iostat_ctx->stat, &bdev->internal.stat); - pthread_spin_unlock(&bdev->internal.spinlock); + spdk_spin_unlock(&bdev->internal.spinlock); /* Then iterate and add the statistics from each existing channel. */ spdk_bdev_for_each_channel(bdev, bdev_get_each_channel_stat, bdev_iostat_ctx, @@ -6159,12 +6159,12 @@ spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status sta if (status == SPDK_BDEV_IO_STATUS_NOMEM) { SPDK_ERRLOG("NOMEM returned for reset\n"); } - pthread_spin_lock(&bdev->internal.spinlock); + spdk_spin_lock(&bdev->internal.spinlock); if (bdev_io == bdev->internal.reset_in_progress) { bdev->internal.reset_in_progress = NULL; unlock_channels = true; } - pthread_spin_unlock(&bdev->internal.spinlock); + spdk_spin_unlock(&bdev->internal.spinlock); if (unlock_channels) { spdk_bdev_for_each_channel(bdev, bdev_unfreeze_channel, bdev_io, @@ -6481,7 +6481,7 @@ bdev_register(struct spdk_bdev *bdev) free(bdev_name); - pthread_spin_init(&bdev->internal.spinlock, PTHREAD_PROCESS_PRIVATE); + spdk_spin_init(&bdev->internal.spinlock); SPDK_DEBUGLOG(bdev, "Inserting bdev %s into list\n", bdev->name); TAILQ_INSERT_TAIL(&g_bdev_mgr.bdevs, bdev, internal.link); @@ -6501,7 +6501,7 @@ bdev_destroy_cb(void *io_device) cb_fn = bdev->internal.unregister_cb; cb_arg = bdev->internal.unregister_ctx; - pthread_spin_destroy(&bdev->internal.spinlock); + spdk_spin_destroy(&bdev->internal.spinlock); free(bdev->internal.qos); rc = bdev->fn_table->destruct(bdev->ctxt); @@ -6526,11 +6526,11 @@ _remove_notify(void *arg) { struct spdk_bdev_desc *desc = arg; - pthread_spin_lock(&desc->spinlock); + spdk_spin_lock(&desc->spinlock); desc->refs--; if (!desc->closed) { - pthread_spin_unlock(&desc->spinlock); + spdk_spin_unlock(&desc->spinlock); desc->callback.event_fn(SPDK_BDEV_EVENT_REMOVE, desc->bdev, desc->callback.ctx); return; } else if (0 == desc->refs) { @@ -6538,11 +6538,11 @@ _remove_notify(void *arg) * spdk_bdev_close() could not free the descriptor since this message was * in flight, so we free it now using bdev_desc_free(). */ - pthread_spin_unlock(&desc->spinlock); + spdk_spin_unlock(&desc->spinlock); bdev_desc_free(desc); return; } - pthread_spin_unlock(&desc->spinlock); + spdk_spin_unlock(&desc->spinlock); } /* Must be called while holding g_bdev_mgr.spinlock and bdev->internal.spinlock. @@ -6558,7 +6558,7 @@ bdev_unregister_unsafe(struct spdk_bdev *bdev) /* Notify each descriptor about hotremoval */ TAILQ_FOREACH_SAFE(desc, &bdev->internal.open_descs, link, tmp) { rc = -EBUSY; - pthread_spin_lock(&desc->spinlock); + spdk_spin_lock(&desc->spinlock); /* * Defer invocation of the event_cb to a separate message that will * run later on its thread. This ensures this context unwinds and @@ -6567,7 +6567,7 @@ bdev_unregister_unsafe(struct spdk_bdev *bdev) */ desc->refs++; spdk_thread_send_msg(desc->thread, _remove_notify, desc); - pthread_spin_unlock(&desc->spinlock); + spdk_spin_unlock(&desc->spinlock); } /* If there are no descriptors, proceed removing the bdev */ @@ -6608,8 +6608,8 @@ bdev_unregister(struct spdk_bdev *bdev, void *_ctx, int status) { int rc; - pthread_spin_lock(&g_bdev_mgr.spinlock); - pthread_spin_lock(&bdev->internal.spinlock); + spdk_spin_lock(&g_bdev_mgr.spinlock); + spdk_spin_lock(&bdev->internal.spinlock); /* * Set the status to REMOVING after completing to abort channels. Otherwise, * the last spdk_bdev_close() may call spdk_io_device_unregister() while @@ -6618,8 +6618,8 @@ bdev_unregister(struct spdk_bdev *bdev, void *_ctx, int status) */ bdev->internal.status = SPDK_BDEV_STATUS_REMOVING; rc = bdev_unregister_unsafe(bdev); - pthread_spin_unlock(&bdev->internal.spinlock); - pthread_spin_unlock(&g_bdev_mgr.spinlock); + spdk_spin_unlock(&bdev->internal.spinlock); + spdk_spin_unlock(&g_bdev_mgr.spinlock); if (rc == 0) { spdk_io_device_unregister(__bdev_to_io_dev(bdev), bdev_destroy_cb); @@ -6642,22 +6642,22 @@ spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void return; } - pthread_spin_lock(&g_bdev_mgr.spinlock); + spdk_spin_lock(&g_bdev_mgr.spinlock); if (bdev->internal.status == SPDK_BDEV_STATUS_UNREGISTERING || bdev->internal.status == SPDK_BDEV_STATUS_REMOVING) { - pthread_spin_unlock(&g_bdev_mgr.spinlock); + spdk_spin_unlock(&g_bdev_mgr.spinlock); if (cb_fn) { cb_fn(cb_arg, -EBUSY); } return; } - pthread_spin_lock(&bdev->internal.spinlock); + spdk_spin_lock(&bdev->internal.spinlock); bdev->internal.status = SPDK_BDEV_STATUS_UNREGISTERING; bdev->internal.unregister_cb = cb_fn; bdev->internal.unregister_ctx = cb_arg; - pthread_spin_unlock(&bdev->internal.spinlock); - pthread_spin_unlock(&g_bdev_mgr.spinlock); + spdk_spin_unlock(&bdev->internal.spinlock); + spdk_spin_unlock(&g_bdev_mgr.spinlock); spdk_bdev_set_qd_sampling_period(bdev, 0); @@ -6733,30 +6733,30 @@ bdev_open(struct spdk_bdev *bdev, bool write, struct spdk_bdev_desc *desc) desc->thread = thread; desc->write = write; - pthread_spin_lock(&bdev->internal.spinlock); + spdk_spin_lock(&bdev->internal.spinlock); if (bdev->internal.status == SPDK_BDEV_STATUS_UNREGISTERING || bdev->internal.status == SPDK_BDEV_STATUS_REMOVING) { - pthread_spin_unlock(&bdev->internal.spinlock); + spdk_spin_unlock(&bdev->internal.spinlock); return -ENODEV; } if (write && bdev->internal.claim_module) { SPDK_ERRLOG("Could not open %s - %s module already claimed it\n", bdev->name, bdev->internal.claim_module->name); - pthread_spin_unlock(&bdev->internal.spinlock); + spdk_spin_unlock(&bdev->internal.spinlock); return -EPERM; } rc = bdev_start_qos(bdev); if (rc != 0) { SPDK_ERRLOG("Failed to start QoS on bdev %s\n", bdev->name); - pthread_spin_unlock(&bdev->internal.spinlock); + spdk_spin_unlock(&bdev->internal.spinlock); return rc; } TAILQ_INSERT_TAIL(&bdev->internal.open_descs, desc, link); - pthread_spin_unlock(&bdev->internal.spinlock); + spdk_spin_unlock(&bdev->internal.spinlock); return 0; } @@ -6780,7 +6780,7 @@ bdev_desc_alloc(struct spdk_bdev *bdev, spdk_bdev_event_cb_t event_cb, void *eve desc->memory_domains_supported = spdk_bdev_get_memory_domains(bdev, NULL, 0) > 0; desc->callback.event_fn = event_cb; desc->callback.ctx = event_ctx; - pthread_spin_init(&desc->spinlock, PTHREAD_PROCESS_PRIVATE); + spdk_spin_init(&desc->spinlock); if (bdev->media_events) { desc->media_events_buffer = calloc(MEDIA_EVENT_POOL_SIZE, @@ -6815,19 +6815,19 @@ spdk_bdev_open_ext(const char *bdev_name, bool write, spdk_bdev_event_cb_t event return -EINVAL; } - pthread_spin_lock(&g_bdev_mgr.spinlock); + spdk_spin_lock(&g_bdev_mgr.spinlock); bdev = bdev_get_by_name(bdev_name); if (bdev == NULL) { SPDK_NOTICELOG("Currently unable to find bdev with name: %s\n", bdev_name); - pthread_spin_unlock(&g_bdev_mgr.spinlock); + spdk_spin_unlock(&g_bdev_mgr.spinlock); return -ENODEV; } rc = bdev_desc_alloc(bdev, event_cb, event_ctx, &desc); if (rc != 0) { - pthread_spin_unlock(&g_bdev_mgr.spinlock); + spdk_spin_unlock(&g_bdev_mgr.spinlock); return rc; } @@ -6839,7 +6839,7 @@ spdk_bdev_open_ext(const char *bdev_name, bool write, spdk_bdev_event_cb_t event *_desc = desc; - pthread_spin_unlock(&g_bdev_mgr.spinlock); + spdk_spin_unlock(&g_bdev_mgr.spinlock); return rc; } @@ -6849,18 +6849,18 @@ bdev_close(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc) { int rc; - pthread_spin_lock(&bdev->internal.spinlock); - pthread_spin_lock(&desc->spinlock); + spdk_spin_lock(&bdev->internal.spinlock); + spdk_spin_lock(&desc->spinlock); TAILQ_REMOVE(&bdev->internal.open_descs, desc, link); desc->closed = true; if (0 == desc->refs) { - pthread_spin_unlock(&desc->spinlock); + spdk_spin_unlock(&desc->spinlock); bdev_desc_free(desc); } else { - pthread_spin_unlock(&desc->spinlock); + spdk_spin_unlock(&desc->spinlock); } /* If no more descriptors, kill QoS channel */ @@ -6878,13 +6878,13 @@ bdev_close(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc) if (bdev->internal.status == SPDK_BDEV_STATUS_REMOVING && TAILQ_EMPTY(&bdev->internal.open_descs)) { rc = bdev_unregister_unsafe(bdev); - pthread_spin_unlock(&bdev->internal.spinlock); + spdk_spin_unlock(&bdev->internal.spinlock); if (rc == 0) { spdk_io_device_unregister(__bdev_to_io_dev(bdev), bdev_destroy_cb); } } else { - pthread_spin_unlock(&bdev->internal.spinlock); + spdk_spin_unlock(&bdev->internal.spinlock); } } @@ -6900,11 +6900,11 @@ spdk_bdev_close(struct spdk_bdev_desc *desc) spdk_poller_unregister(&desc->io_timeout_poller); - pthread_spin_lock(&g_bdev_mgr.spinlock); + spdk_spin_lock(&g_bdev_mgr.spinlock); bdev_close(bdev, desc); - pthread_spin_unlock(&g_bdev_mgr.spinlock); + spdk_spin_unlock(&g_bdev_mgr.spinlock); } static void @@ -6996,7 +6996,7 @@ spdk_for_each_bdev(void *ctx, spdk_for_each_bdev_fn fn) assert(fn != NULL); - pthread_spin_lock(&g_bdev_mgr.spinlock); + spdk_spin_lock(&g_bdev_mgr.spinlock); bdev = spdk_bdev_first(); while (bdev != NULL) { rc = bdev_desc_alloc(bdev, _tmp_bdev_event_cb, NULL, &desc); @@ -7014,11 +7014,11 @@ spdk_for_each_bdev(void *ctx, spdk_for_each_bdev_fn fn) } break; } - pthread_spin_unlock(&g_bdev_mgr.spinlock); + spdk_spin_unlock(&g_bdev_mgr.spinlock); rc = fn(ctx, bdev); - pthread_spin_lock(&g_bdev_mgr.spinlock); + spdk_spin_lock(&g_bdev_mgr.spinlock); tmp = spdk_bdev_next(bdev); bdev_close(bdev, desc); if (rc != 0) { @@ -7026,7 +7026,7 @@ spdk_for_each_bdev(void *ctx, spdk_for_each_bdev_fn fn) } bdev = tmp; } - pthread_spin_unlock(&g_bdev_mgr.spinlock); + spdk_spin_unlock(&g_bdev_mgr.spinlock); return rc; } @@ -7040,7 +7040,7 @@ spdk_for_each_bdev_leaf(void *ctx, spdk_for_each_bdev_fn fn) assert(fn != NULL); - pthread_spin_lock(&g_bdev_mgr.spinlock); + spdk_spin_lock(&g_bdev_mgr.spinlock); bdev = spdk_bdev_first_leaf(); while (bdev != NULL) { rc = bdev_desc_alloc(bdev, _tmp_bdev_event_cb, NULL, &desc); @@ -7058,11 +7058,11 @@ spdk_for_each_bdev_leaf(void *ctx, spdk_for_each_bdev_fn fn) } break; } - pthread_spin_unlock(&g_bdev_mgr.spinlock); + spdk_spin_unlock(&g_bdev_mgr.spinlock); rc = fn(ctx, bdev); - pthread_spin_lock(&g_bdev_mgr.spinlock); + spdk_spin_lock(&g_bdev_mgr.spinlock); tmp = spdk_bdev_next_leaf(bdev); bdev_close(bdev, desc); if (rc != 0) { @@ -7070,7 +7070,7 @@ spdk_for_each_bdev_leaf(void *ctx, spdk_for_each_bdev_fn fn) } bdev = tmp; } - pthread_spin_unlock(&g_bdev_mgr.spinlock); + spdk_spin_unlock(&g_bdev_mgr.spinlock); return rc; } @@ -7231,9 +7231,9 @@ bdev_write_zero_buffer_done(struct spdk_bdev_io *bdev_io, bool success, void *cb static void bdev_set_qos_limit_done(struct set_qos_limit_ctx *ctx, int status) { - pthread_spin_lock(&ctx->bdev->internal.spinlock); + spdk_spin_lock(&ctx->bdev->internal.spinlock); ctx->bdev->internal.qos_mod_in_progress = false; - pthread_spin_unlock(&ctx->bdev->internal.spinlock); + spdk_spin_unlock(&ctx->bdev->internal.spinlock); if (ctx->cb_fn) { ctx->cb_fn(ctx->cb_arg, status); @@ -7249,10 +7249,10 @@ bdev_disable_qos_done(void *cb_arg) struct spdk_bdev_io *bdev_io; struct spdk_bdev_qos *qos; - pthread_spin_lock(&bdev->internal.spinlock); + spdk_spin_lock(&bdev->internal.spinlock); qos = bdev->internal.qos; bdev->internal.qos = NULL; - pthread_spin_unlock(&bdev->internal.spinlock); + spdk_spin_unlock(&bdev->internal.spinlock); while (!TAILQ_EMPTY(&qos->queued)) { /* Send queued I/O back to their original thread for resubmission. */ @@ -7288,9 +7288,9 @@ bdev_disable_qos_msg_done(struct spdk_bdev *bdev, void *_ctx, int status) struct set_qos_limit_ctx *ctx = _ctx; struct spdk_thread *thread; - pthread_spin_lock(&bdev->internal.spinlock); + spdk_spin_lock(&bdev->internal.spinlock); thread = bdev->internal.qos->thread; - pthread_spin_unlock(&bdev->internal.spinlock); + spdk_spin_unlock(&bdev->internal.spinlock); if (thread != NULL) { spdk_thread_send_msg(thread, bdev_disable_qos_done, ctx); @@ -7316,9 +7316,9 @@ bdev_update_qos_rate_limit_msg(void *cb_arg) struct set_qos_limit_ctx *ctx = cb_arg; struct spdk_bdev *bdev = ctx->bdev; - pthread_spin_lock(&bdev->internal.spinlock); + spdk_spin_lock(&bdev->internal.spinlock); bdev_qos_update_max_quota_per_timeslice(bdev->internal.qos); - pthread_spin_unlock(&bdev->internal.spinlock); + spdk_spin_unlock(&bdev->internal.spinlock); bdev_set_qos_limit_done(ctx, 0); } @@ -7329,9 +7329,9 @@ bdev_enable_qos_msg(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev, { struct spdk_bdev_channel *bdev_ch = __io_ch_to_bdev_ch(ch); - pthread_spin_lock(&bdev->internal.spinlock); + spdk_spin_lock(&bdev->internal.spinlock); bdev_enable_qos(bdev, bdev_ch); - pthread_spin_unlock(&bdev->internal.spinlock); + spdk_spin_unlock(&bdev->internal.spinlock); spdk_bdev_for_each_channel_continue(i, 0); } @@ -7408,9 +7408,9 @@ spdk_bdev_set_qos_rate_limits(struct spdk_bdev *bdev, uint64_t *limits, ctx->cb_arg = cb_arg; ctx->bdev = bdev; - pthread_spin_lock(&bdev->internal.spinlock); + spdk_spin_lock(&bdev->internal.spinlock); if (bdev->internal.qos_mod_in_progress) { - pthread_spin_unlock(&bdev->internal.spinlock); + spdk_spin_unlock(&bdev->internal.spinlock); free(ctx); cb_fn(cb_arg, -EAGAIN); return; @@ -7433,7 +7433,7 @@ spdk_bdev_set_qos_rate_limits(struct spdk_bdev *bdev, uint64_t *limits, if (bdev->internal.qos == NULL) { bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos)); if (!bdev->internal.qos) { - pthread_spin_unlock(&bdev->internal.spinlock); + spdk_spin_unlock(&bdev->internal.spinlock); SPDK_ERRLOG("Unable to allocate memory for QoS tracking\n"); bdev_set_qos_limit_done(ctx, -ENOMEM); return; @@ -7461,13 +7461,13 @@ spdk_bdev_set_qos_rate_limits(struct spdk_bdev *bdev, uint64_t *limits, spdk_bdev_for_each_channel(bdev, bdev_disable_qos_msg, ctx, bdev_disable_qos_msg_done); } else { - pthread_spin_unlock(&bdev->internal.spinlock); + spdk_spin_unlock(&bdev->internal.spinlock); bdev_set_qos_limit_done(ctx, 0); return; } } - pthread_spin_unlock(&bdev->internal.spinlock); + spdk_spin_unlock(&bdev->internal.spinlock); } struct spdk_bdev_histogram_ctx { @@ -7482,9 +7482,9 @@ bdev_histogram_disable_channel_cb(struct spdk_bdev *bdev, void *_ctx, int status { struct spdk_bdev_histogram_ctx *ctx = _ctx; - pthread_spin_lock(&ctx->bdev->internal.spinlock); + spdk_spin_lock(&ctx->bdev->internal.spinlock); ctx->bdev->internal.histogram_in_progress = false; - pthread_spin_unlock(&ctx->bdev->internal.spinlock); + spdk_spin_unlock(&ctx->bdev->internal.spinlock); ctx->cb_fn(ctx->cb_arg, ctx->status); free(ctx); } @@ -7513,9 +7513,9 @@ bdev_histogram_enable_channel_cb(struct spdk_bdev *bdev, void *_ctx, int status) spdk_bdev_for_each_channel(ctx->bdev, bdev_histogram_disable_channel, ctx, bdev_histogram_disable_channel_cb); } else { - pthread_spin_lock(&ctx->bdev->internal.spinlock); + spdk_spin_lock(&ctx->bdev->internal.spinlock); ctx->bdev->internal.histogram_in_progress = false; - pthread_spin_unlock(&ctx->bdev->internal.spinlock); + spdk_spin_unlock(&ctx->bdev->internal.spinlock); ctx->cb_fn(ctx->cb_arg, ctx->status); free(ctx); } @@ -7555,16 +7555,16 @@ spdk_bdev_histogram_enable(struct spdk_bdev *bdev, spdk_bdev_histogram_status_cb ctx->cb_fn = cb_fn; ctx->cb_arg = cb_arg; - pthread_spin_lock(&bdev->internal.spinlock); + spdk_spin_lock(&bdev->internal.spinlock); if (bdev->internal.histogram_in_progress) { - pthread_spin_unlock(&bdev->internal.spinlock); + spdk_spin_unlock(&bdev->internal.spinlock); free(ctx); cb_fn(cb_arg, -EAGAIN); return; } bdev->internal.histogram_in_progress = true; - pthread_spin_unlock(&bdev->internal.spinlock); + spdk_spin_unlock(&bdev->internal.spinlock); bdev->internal.histogram_enabled = enable; @@ -7682,7 +7682,7 @@ spdk_bdev_push_media_events(struct spdk_bdev *bdev, const struct spdk_bdev_media assert(bdev->media_events); - pthread_spin_lock(&bdev->internal.spinlock); + spdk_spin_lock(&bdev->internal.spinlock); TAILQ_FOREACH(desc, &bdev->internal.open_descs, link) { if (desc->write) { break; @@ -7707,7 +7707,7 @@ spdk_bdev_push_media_events(struct spdk_bdev *bdev, const struct spdk_bdev_media rc = event_id; out: - pthread_spin_unlock(&bdev->internal.spinlock); + spdk_spin_unlock(&bdev->internal.spinlock); return rc; } @@ -7716,14 +7716,14 @@ spdk_bdev_notify_media_management(struct spdk_bdev *bdev) { struct spdk_bdev_desc *desc; - pthread_spin_lock(&bdev->internal.spinlock); + spdk_spin_lock(&bdev->internal.spinlock); TAILQ_FOREACH(desc, &bdev->internal.open_descs, link) { if (!TAILQ_EMPTY(&desc->pending_media_events)) { desc->callback.event_fn(SPDK_BDEV_EVENT_MEDIA_MANAGEMENT, bdev, desc->callback.ctx); } } - pthread_spin_unlock(&bdev->internal.spinlock); + spdk_spin_unlock(&bdev->internal.spinlock); } struct locked_lba_range_ctx { @@ -7901,7 +7901,7 @@ bdev_lock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch, ctx->cb_fn = cb_fn; ctx->cb_arg = cb_arg; - pthread_spin_lock(&bdev->internal.spinlock); + spdk_spin_lock(&bdev->internal.spinlock); if (bdev_lba_range_overlaps_tailq(&ctx->range, &bdev->internal.locked_ranges)) { /* There is an active lock overlapping with this range. * Put it on the pending list until this range no @@ -7912,7 +7912,7 @@ bdev_lock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch, TAILQ_INSERT_TAIL(&bdev->internal.locked_ranges, &ctx->range, tailq); bdev_lock_lba_range_ctx(bdev, ctx); } - pthread_spin_unlock(&bdev->internal.spinlock); + spdk_spin_unlock(&bdev->internal.spinlock); return 0; } @@ -7931,7 +7931,7 @@ bdev_unlock_lba_range_cb(struct spdk_bdev *bdev, void *_ctx, int status) struct locked_lba_range_ctx *pending_ctx; struct lba_range *range, *tmp; - pthread_spin_lock(&bdev->internal.spinlock); + spdk_spin_lock(&bdev->internal.spinlock); /* Check if there are any pending locked ranges that overlap with this range * that was just unlocked. If there are, check that it doesn't overlap with any * other locked ranges before calling bdev_lock_lba_range_ctx which will start @@ -7947,7 +7947,7 @@ bdev_unlock_lba_range_cb(struct spdk_bdev *bdev, void *_ctx, int status) bdev_lock_lba_range_ctx_msg, pending_ctx); } } - pthread_spin_unlock(&bdev->internal.spinlock); + spdk_spin_unlock(&bdev->internal.spinlock); ctx->cb_fn(ctx->cb_arg, status); free(ctx); @@ -8024,7 +8024,7 @@ bdev_unlock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch, return -EINVAL; } - pthread_spin_lock(&bdev->internal.spinlock); + spdk_spin_lock(&bdev->internal.spinlock); /* We confirmed that this channel has locked the specified range. To * start the unlock the process, we find the range in the bdev's locked_ranges * and remove it. This ensures new channels don't inherit the locked range. @@ -8039,12 +8039,12 @@ bdev_unlock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch, } if (range == NULL) { assert(false); - pthread_spin_unlock(&bdev->internal.spinlock); + spdk_spin_unlock(&bdev->internal.spinlock); return -EINVAL; } TAILQ_REMOVE(&bdev->internal.locked_ranges, range, tailq); ctx = SPDK_CONTAINEROF(range, struct locked_lba_range_ctx, range); - pthread_spin_unlock(&bdev->internal.spinlock); + spdk_spin_unlock(&bdev->internal.spinlock); ctx->cb_fn = cb_fn; ctx->cb_arg = cb_arg; diff --git a/test/unit/lib/bdev/bdev.c/bdev_ut.c b/test/unit/lib/bdev/bdev.c/bdev_ut.c index 02b305e0a..b111c3d6f 100644 --- a/test/unit/lib/bdev/bdev.c/bdev_ut.c +++ b/test/unit/lib/bdev/bdev.c/bdev_ut.c @@ -921,7 +921,7 @@ io_valid_test(void) memset(&bdev, 0, sizeof(bdev)); bdev.blocklen = 512; - CU_ASSERT(pthread_spin_init(&bdev.internal.spinlock, PTHREAD_PROCESS_PRIVATE) == 0); + spdk_spin_init(&bdev.internal.spinlock); spdk_bdev_notify_blockcnt_change(&bdev, 100); @@ -940,7 +940,7 @@ io_valid_test(void) /* Offset near end of uint64_t range (2^64 - 1) */ CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false); - CU_ASSERT(pthread_spin_destroy(&bdev.internal.spinlock) == 0); + spdk_spin_destroy(&bdev.internal.spinlock); } static void