lib/event: remove pending_threads_count from core_info
This field was only used to keep track of number of threads that will be present on a core after scheduler moves. It was used only internally within scheduler_dynamic. Event framework has no need to keep such field in core_info. Instead added field in cores_stats internal to scheduler_dynamic. Signed-off-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com> Change-Id: I3ce74d4a25eac81e58da8705a1c4553730fc1e57 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8049 Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Reviewed-by: Konrad Sztyber <konrad.sztyber@intel.com> Community-CI: Mellanox Build Bot Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
parent
4324834113
commit
5e4fbe7364
@ -268,7 +268,6 @@ struct spdk_scheduler_core_info {
|
|||||||
|
|
||||||
uint32_t lcore;
|
uint32_t lcore;
|
||||||
uint32_t threads_count;
|
uint32_t threads_count;
|
||||||
uint32_t pending_threads_count;
|
|
||||||
bool interrupt_mode;
|
bool interrupt_mode;
|
||||||
struct spdk_lw_thread **threads;
|
struct spdk_lw_thread **threads;
|
||||||
};
|
};
|
||||||
|
@ -47,6 +47,7 @@ static bool g_core_mngmnt_available;
|
|||||||
struct core_stats {
|
struct core_stats {
|
||||||
uint64_t busy;
|
uint64_t busy;
|
||||||
uint64_t idle;
|
uint64_t idle;
|
||||||
|
uint32_t thread_count;
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct core_stats *g_cores;
|
static struct core_stats *g_cores;
|
||||||
@ -151,7 +152,7 @@ balance(struct spdk_scheduler_core_info *cores_info, int cores_count,
|
|||||||
bool busy_threads_present = false;
|
bool busy_threads_present = false;
|
||||||
|
|
||||||
SPDK_ENV_FOREACH_CORE(i) {
|
SPDK_ENV_FOREACH_CORE(i) {
|
||||||
cores_info[i].pending_threads_count = cores_info[i].threads_count;
|
g_cores[i].thread_count = cores_info[i].threads_count;
|
||||||
g_cores[i].busy = cores_info[i].current_busy_tsc;
|
g_cores[i].busy = cores_info[i].current_busy_tsc;
|
||||||
g_cores[i].idle = cores_info[i].current_idle_tsc;
|
g_cores[i].idle = cores_info[i].current_idle_tsc;
|
||||||
}
|
}
|
||||||
@ -182,8 +183,9 @@ balance(struct spdk_scheduler_core_info *cores_info, int cores_count,
|
|||||||
|
|
||||||
if (spdk_cpuset_get_cpu(cpumask, target_lcore)) {
|
if (spdk_cpuset_get_cpu(cpumask, target_lcore)) {
|
||||||
lw_thread->lcore = target_lcore;
|
lw_thread->lcore = target_lcore;
|
||||||
cores_info[target_lcore].pending_threads_count++;
|
g_cores[target_lcore].thread_count++;
|
||||||
core->pending_threads_count--;
|
assert(g_cores[i].thread_count > 0);
|
||||||
|
g_cores[i].thread_count--;
|
||||||
|
|
||||||
if (target_lcore != g_main_lcore) {
|
if (target_lcore != g_main_lcore) {
|
||||||
busy_threads_present = true;
|
busy_threads_present = true;
|
||||||
@ -197,9 +199,10 @@ balance(struct spdk_scheduler_core_info *cores_info, int cores_count,
|
|||||||
} else if (i != g_main_lcore && load < SCHEDULER_LOAD_LIMIT) {
|
} else if (i != g_main_lcore && load < SCHEDULER_LOAD_LIMIT) {
|
||||||
/* This thread is idle but not on the main core, so we need to move it to the main core */
|
/* This thread is idle but not on the main core, so we need to move it to the main core */
|
||||||
lw_thread->lcore = g_main_lcore;
|
lw_thread->lcore = g_main_lcore;
|
||||||
cores_info[g_main_lcore].pending_threads_count++;
|
assert(g_cores[i].thread_count > 0);
|
||||||
core->pending_threads_count--;
|
g_cores[i].thread_count--;
|
||||||
|
|
||||||
|
main_core->thread_count++;
|
||||||
main_core->busy += spdk_min(UINT64_MAX - main_core->busy, thread_busy);
|
main_core->busy += spdk_min(UINT64_MAX - main_core->busy, thread_busy);
|
||||||
main_core->idle -= spdk_min(main_core->idle, thread_busy);
|
main_core->idle -= spdk_min(main_core->idle, thread_busy);
|
||||||
} else {
|
} else {
|
||||||
@ -211,8 +214,9 @@ balance(struct spdk_scheduler_core_info *cores_info, int cores_count,
|
|||||||
|
|
||||||
if (spdk_cpuset_get_cpu(cpumask, target_lcore)) {
|
if (spdk_cpuset_get_cpu(cpumask, target_lcore)) {
|
||||||
lw_thread->lcore = target_lcore;
|
lw_thread->lcore = target_lcore;
|
||||||
cores_info[target_lcore].pending_threads_count++;
|
g_cores[target_lcore].thread_count++;
|
||||||
core->pending_threads_count--;
|
assert(g_cores[i].thread_count > 0);
|
||||||
|
g_cores[i].thread_count--;
|
||||||
|
|
||||||
if (target_lcore == g_main_lcore) {
|
if (target_lcore == g_main_lcore) {
|
||||||
main_core->busy += spdk_min(UINT64_MAX - main_core->busy, thread_busy);
|
main_core->busy += spdk_min(UINT64_MAX - main_core->busy, thread_busy);
|
||||||
@ -235,9 +239,9 @@ balance(struct spdk_scheduler_core_info *cores_info, int cores_count,
|
|||||||
reactor = spdk_reactor_get(i);
|
reactor = spdk_reactor_get(i);
|
||||||
core = &cores_info[i];
|
core = &cores_info[i];
|
||||||
/* We can switch mode only if reactor already does not have any threads */
|
/* We can switch mode only if reactor already does not have any threads */
|
||||||
if (core->pending_threads_count == 0 && TAILQ_EMPTY(&reactor->threads)) {
|
if (g_cores[i].thread_count == 0 && TAILQ_EMPTY(&reactor->threads)) {
|
||||||
core->interrupt_mode = true;
|
core->interrupt_mode = true;
|
||||||
} else if (core->pending_threads_count != 0) {
|
} else if (g_cores[i].thread_count != 0) {
|
||||||
core->interrupt_mode = false;
|
core->interrupt_mode = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user