Revert "sched/fair: Introduce SIS_UTIL to search idle CPU based on sum of util_avg"
This reverts commit 079651c6cf which is
commit 70fb5ccf2ebb09a0c8ebba775041567812d45f86 upstream.
It breaks the Android GKI kernel abi, and is not needed for Android
devices, so revert it for now. If it is needed for this branch, it can
come back later in an ABI-stable way.
Bug: 161946584
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Ie4a8739747859bfd57e48a2745b7fcf90233fd6c
This commit is contained in:
@@ -76,7 +76,6 @@ struct sched_domain_shared {
|
||||
atomic_t ref;
|
||||
atomic_t nr_busy_cpus;
|
||||
int has_idle_cores;
|
||||
int nr_idle_scan;
|
||||
|
||||
ANDROID_VENDOR_DATA(1);
|
||||
};
|
||||
|
||||
@@ -6328,7 +6328,6 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool
|
||||
{
|
||||
struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
|
||||
int i, cpu, idle_cpu = -1, nr = INT_MAX;
|
||||
struct sched_domain_shared *sd_share;
|
||||
struct rq *this_rq = this_rq();
|
||||
int this = smp_processor_id();
|
||||
struct sched_domain *this_sd;
|
||||
@@ -6368,17 +6367,6 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool
|
||||
time = cpu_clock(this);
|
||||
}
|
||||
|
||||
if (sched_feat(SIS_UTIL)) {
|
||||
sd_share = rcu_dereference(per_cpu(sd_llc_shared, target));
|
||||
if (sd_share) {
|
||||
/* because !--nr is the condition to stop scan */
|
||||
nr = READ_ONCE(sd_share->nr_idle_scan) + 1;
|
||||
/* overloaded LLC is unlikely to have idle cpu/core */
|
||||
if (nr == 1)
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
for_each_cpu_wrap(cpu, cpus, target + 1) {
|
||||
if (has_idle_core) {
|
||||
i = select_idle_core(p, cpu, cpus, &idle_cpu);
|
||||
@@ -9327,77 +9315,6 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
|
||||
return idlest;
|
||||
}
|
||||
|
||||
static void update_idle_cpu_scan(struct lb_env *env,
|
||||
unsigned long sum_util)
|
||||
{
|
||||
struct sched_domain_shared *sd_share;
|
||||
int llc_weight, pct;
|
||||
u64 x, y, tmp;
|
||||
/*
|
||||
* Update the number of CPUs to scan in LLC domain, which could
|
||||
* be used as a hint in select_idle_cpu(). The update of sd_share
|
||||
* could be expensive because it is within a shared cache line.
|
||||
* So the write of this hint only occurs during periodic load
|
||||
* balancing, rather than CPU_NEWLY_IDLE, because the latter
|
||||
* can fire way more frequently than the former.
|
||||
*/
|
||||
if (!sched_feat(SIS_UTIL) || env->idle == CPU_NEWLY_IDLE)
|
||||
return;
|
||||
|
||||
llc_weight = per_cpu(sd_llc_size, env->dst_cpu);
|
||||
if (env->sd->span_weight != llc_weight)
|
||||
return;
|
||||
|
||||
sd_share = rcu_dereference(per_cpu(sd_llc_shared, env->dst_cpu));
|
||||
if (!sd_share)
|
||||
return;
|
||||
|
||||
/*
|
||||
* The number of CPUs to search drops as sum_util increases, when
|
||||
* sum_util hits 85% or above, the scan stops.
|
||||
* The reason to choose 85% as the threshold is because this is the
|
||||
* imbalance_pct(117) when a LLC sched group is overloaded.
|
||||
*
|
||||
* let y = SCHED_CAPACITY_SCALE - p * x^2 [1]
|
||||
* and y'= y / SCHED_CAPACITY_SCALE
|
||||
*
|
||||
* x is the ratio of sum_util compared to the CPU capacity:
|
||||
* x = sum_util / (llc_weight * SCHED_CAPACITY_SCALE)
|
||||
* y' is the ratio of CPUs to be scanned in the LLC domain,
|
||||
* and the number of CPUs to scan is calculated by:
|
||||
*
|
||||
* nr_scan = llc_weight * y' [2]
|
||||
*
|
||||
* When x hits the threshold of overloaded, AKA, when
|
||||
* x = 100 / pct, y drops to 0. According to [1],
|
||||
* p should be SCHED_CAPACITY_SCALE * pct^2 / 10000
|
||||
*
|
||||
* Scale x by SCHED_CAPACITY_SCALE:
|
||||
* x' = sum_util / llc_weight; [3]
|
||||
*
|
||||
* and finally [1] becomes:
|
||||
* y = SCHED_CAPACITY_SCALE -
|
||||
* x'^2 * pct^2 / (10000 * SCHED_CAPACITY_SCALE) [4]
|
||||
*
|
||||
*/
|
||||
/* equation [3] */
|
||||
x = sum_util;
|
||||
do_div(x, llc_weight);
|
||||
|
||||
/* equation [4] */
|
||||
pct = env->sd->imbalance_pct;
|
||||
tmp = x * x * pct * pct;
|
||||
do_div(tmp, 10000 * SCHED_CAPACITY_SCALE);
|
||||
tmp = min_t(long, tmp, SCHED_CAPACITY_SCALE);
|
||||
y = SCHED_CAPACITY_SCALE - tmp;
|
||||
|
||||
/* equation [2] */
|
||||
y *= llc_weight;
|
||||
do_div(y, SCHED_CAPACITY_SCALE);
|
||||
if ((int)y != sd_share->nr_idle_scan)
|
||||
WRITE_ONCE(sd_share->nr_idle_scan, (int)y);
|
||||
}
|
||||
|
||||
/**
|
||||
* update_sd_lb_stats - Update sched_domain's statistics for load balancing.
|
||||
* @env: The load balancing environment.
|
||||
@@ -9410,7 +9327,6 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
|
||||
struct sched_group *sg = env->sd->groups;
|
||||
struct sg_lb_stats *local = &sds->local_stat;
|
||||
struct sg_lb_stats tmp_sgs;
|
||||
unsigned long sum_util = 0;
|
||||
int sg_status = 0;
|
||||
|
||||
do {
|
||||
@@ -9443,7 +9359,6 @@ next_group:
|
||||
sds->total_load += sgs->group_load;
|
||||
sds->total_capacity += sgs->group_capacity;
|
||||
|
||||
sum_util += sgs->group_util;
|
||||
sg = sg->next;
|
||||
} while (sg != env->sd->groups);
|
||||
|
||||
@@ -9469,8 +9384,6 @@ next_group:
|
||||
WRITE_ONCE(rd->overutilized, SG_OVERUTILIZED);
|
||||
trace_sched_overutilized_tp(rd, SG_OVERUTILIZED);
|
||||
}
|
||||
|
||||
update_idle_cpu_scan(env, sum_util);
|
||||
}
|
||||
|
||||
#define NUMA_IMBALANCE_MIN 2
|
||||
|
||||
@@ -55,8 +55,7 @@ SCHED_FEAT(TTWU_QUEUE, true)
|
||||
/*
|
||||
* When doing wakeups, attempt to limit superfluous scans of the LLC domain.
|
||||
*/
|
||||
SCHED_FEAT(SIS_PROP, false)
|
||||
SCHED_FEAT(SIS_UTIL, true)
|
||||
SCHED_FEAT(SIS_PROP, true)
|
||||
|
||||
/*
|
||||
* Issue a WARN when we do multiple update_rq_clock() calls
|
||||
|
||||
Reference in New Issue
Block a user