sched/core: Introduce sched_asym_cpucap_active()

[ Upstream commit 740cf8a760b73e8375bfb4bedcbe9746183350f9 ]

Create an inline helper for conditional code to be only executed on
asymmetric CPU capacity systems. This makes these (currently ~10 and
future) conditions a lot more readable.

Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20220729111305.1275158-2-dietmar.eggemann@arm.com
Stable-dep-of: a2e7f03ed28f ("sched/uclamp: Make asym_fits_capacity() use util_fits_cpu()")
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Dietmar Eggemann
2022-07-29 13:13:03 +02:00
committed by Greg Kroah-Hartman
parent 41c2dba388
commit 23cb580e0c
5 changed files with 14 additions and 9 deletions

View File

@@ -124,7 +124,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
unsigned long cap, max_cap = 0;
int cpu, max_cpu = -1;
if (!static_branch_unlikely(&sched_asym_cpucapacity))
if (!sched_asym_cpucap_active())
return 1;
/* Ensure the capacity of the CPUs fits the task. */

View File

@@ -112,7 +112,7 @@ static inline unsigned long __dl_bw_capacity(int i)
*/
static inline unsigned long dl_bw_capacity(int i)
{
if (!static_branch_unlikely(&sched_asym_cpucapacity) &&
if (!sched_asym_cpucap_active() &&
capacity_orig_of(i) == SCHED_CAPACITY_SCALE) {
return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT;
} else {
@@ -1703,7 +1703,7 @@ select_task_rq_dl(struct task_struct *p, int cpu, int flags)
* Take the capacity of the CPU into account to
* ensure it fits the requirement of the task.
*/
if (static_branch_unlikely(&sched_asym_cpucapacity))
if (sched_asym_cpucap_active())
select_rq |= !dl_task_fits_capacity(p, cpu);
if (select_rq) {

View File

@@ -4253,7 +4253,7 @@ static inline int task_fits_cpu(struct task_struct *p, int cpu)
static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
{
if (!static_branch_unlikely(&sched_asym_cpucapacity))
if (!sched_asym_cpucap_active())
return;
if (!p || p->nr_cpus_allowed == 1) {
@@ -6528,7 +6528,7 @@ select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
static inline bool asym_fits_capacity(unsigned long task_util, int cpu)
{
if (static_branch_unlikely(&sched_asym_cpucapacity))
if (sched_asym_cpucap_active())
return fits_capacity(task_util, capacity_of(cpu));
return true;
@@ -6548,7 +6548,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
* On asymmetric system, update task utilization because we will check
* that the task fits with cpu's capacity.
*/
if (static_branch_unlikely(&sched_asym_cpucapacity)) {
if (sched_asym_cpucap_active()) {
sync_entity_load_avg(&p->se);
task_util = uclamp_task_util(p);
}
@@ -6602,7 +6602,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
* For asymmetric CPU capacity systems, our domain of interest is
* sd_asym_cpucapacity rather than sd_llc.
*/
if (static_branch_unlikely(&sched_asym_cpucapacity)) {
if (sched_asym_cpucap_active()) {
sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, target));
/*
* On an asymmetric CPU capacity system where an exclusive

View File

@@ -473,7 +473,7 @@ static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
unsigned int cpu_cap;
/* Only heterogeneous systems can benefit from this check */
if (!static_branch_unlikely(&sched_asym_cpucapacity))
if (!sched_asym_cpucap_active())
return true;
min_cap = uclamp_eff_value(p, UCLAMP_MIN);
@@ -1736,7 +1736,7 @@ static int find_lowest_rq(struct task_struct *task)
* If we're on asym system ensure we consider the different capacities
* of the CPUs when searching for the lowest_mask.
*/
if (static_branch_unlikely(&sched_asym_cpucapacity)) {
if (sched_asym_cpucap_active()) {
ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri,
task, lowest_mask,

View File

@@ -1788,6 +1788,11 @@ DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
extern struct static_key_false sched_asym_cpucapacity;
static __always_inline bool sched_asym_cpucap_active(void)
{
return static_branch_unlikely(&sched_asym_cpucapacity);
}
struct sched_group_capacity {
atomic_t ref;
/*