From 9bea56c004db784e6fc4fefafea3f270aac1d6b6 Mon Sep 17 00:00:00 2001 From: Shuhei Matsumoto Date: Wed, 20 May 2020 06:30:54 +0900 Subject: [PATCH] example/nvmf: Enlarge critical section guarded by g_mutex in nvmf_schedule_spdk_thread() nvmf_schedule_spdk_thread() is not so performance critical but should work as designed. Guard the whole operation to decide the target core by g_mutex. This matches the reactor's _reactor_schedule_thread(). If _reactor_schedule_thread() is refined, change this accordingly too. Signed-off-by: Shuhei Matsumoto Change-Id: I9382230577442897d4d2d22b85b1ae4edd77aa98 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/2536 Community-CI: Mellanox Build Bot Community-CI: Broadcom CI Tested-by: SPDK CI Jenkins Reviewed-by: Ben Walker Reviewed-by: Aleksey Marchuk Reviewed-by: Jim Harris --- examples/nvmf/nvmf/nvmf.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/nvmf/nvmf/nvmf.c b/examples/nvmf/nvmf/nvmf.c index 4cb29af9a..03a568a08 100644 --- a/examples/nvmf/nvmf/nvmf.c +++ b/examples/nvmf/nvmf/nvmf.c @@ -237,14 +237,13 @@ nvmf_schedule_spdk_thread(struct spdk_thread *thread) * Here we use the mutex.The way the actual SPDK event framework * solves this is by using internal rings for messages between reactors */ + pthread_mutex_lock(&g_mutex); for (i = 0; i < spdk_env_get_core_count(); i++) { - pthread_mutex_lock(&g_mutex); if (g_next_reactor == NULL) { g_next_reactor = TAILQ_FIRST(&g_reactors); } nvmf_reactor = g_next_reactor; g_next_reactor = TAILQ_NEXT(g_next_reactor, link); - pthread_mutex_unlock(&g_mutex); /* each spdk_thread has the core affinity */ if (spdk_cpuset_get_cpu(cpumask, nvmf_reactor->core)) { @@ -254,6 +253,7 @@ nvmf_schedule_spdk_thread(struct spdk_thread *thread) break; } } + pthread_mutex_unlock(&g_mutex); if (i == spdk_env_get_core_count()) { fprintf(stderr, "failed to schedule spdk thread\n");