sched/tune: Introducing a new schedtune attribute prefer_idle
Hint to enable biasing of tasks towards idle cpus, even when a given task is negatively boosted. The mechanism allows upto 20% reduction in camera power without hurting performance. bug: 28312446 Change-Id: I97ea5671aa1e6bcb165408b41e17bc82e41c2c9e
This commit is contained in:
committed by
John Stultz
parent
d4cda03828
commit
c5a00c2dad
@@ -5589,7 +5589,7 @@ done:
|
||||
return target;
|
||||
}
|
||||
|
||||
static inline int find_best_target(struct task_struct *p, bool boosted)
|
||||
static inline int find_best_target(struct task_struct *p, bool prefer_idle)
|
||||
{
|
||||
int iter_cpu;
|
||||
int target_cpu = -1;
|
||||
@@ -5607,9 +5607,9 @@ static inline int find_best_target(struct task_struct *p, bool boosted)
|
||||
int idle_idx;
|
||||
|
||||
/*
|
||||
* favor higher cpus for boosted tasks
|
||||
* favor higher cpus for tasks that prefer idle cores
|
||||
*/
|
||||
int i = boosted ? NR_CPUS-iter_cpu-1 : iter_cpu;
|
||||
int i = prefer_idle ? NR_CPUS-iter_cpu-1 : iter_cpu;
|
||||
|
||||
if (!cpu_online(i) || !cpumask_test_cpu(i, tsk_cpus_allowed(p)))
|
||||
continue;
|
||||
@@ -5634,10 +5634,10 @@ static inline int find_best_target(struct task_struct *p, bool boosted)
|
||||
continue;
|
||||
#endif
|
||||
/*
|
||||
* For boosted tasks we favor idle cpus unconditionally to
|
||||
* Unconditionally favoring tasks that prefer idle cpus to
|
||||
* improve latency.
|
||||
*/
|
||||
if (idle_cpu(i) && boosted) {
|
||||
if (idle_cpu(i) && prefer_idle) {
|
||||
if (best_idle_cpu < 0)
|
||||
best_idle_cpu = i;
|
||||
continue;
|
||||
@@ -5654,7 +5654,7 @@ static inline int find_best_target(struct task_struct *p, bool boosted)
|
||||
target_cpu = i;
|
||||
target_util = new_util;
|
||||
}
|
||||
} else if (!boosted) {
|
||||
} else if (!prefer_idle) {
|
||||
if (best_idle_cpu < 0 ||
|
||||
(sysctl_sched_cstate_aware &&
|
||||
best_idle_cstate > idle_idx)) {
|
||||
@@ -5669,7 +5669,7 @@ static inline int find_best_target(struct task_struct *p, bool boosted)
|
||||
}
|
||||
}
|
||||
|
||||
if (boosted && best_idle_cpu >= 0)
|
||||
if (prefer_idle && best_idle_cpu >= 0)
|
||||
target_cpu = best_idle_cpu;
|
||||
else if (target_cpu < 0)
|
||||
target_cpu = best_idle_cpu >= 0 ? best_idle_cpu : backup_cpu;
|
||||
@@ -5761,14 +5761,17 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
|
||||
*/
|
||||
#ifdef CONFIG_CGROUP_SCHEDTUNE
|
||||
bool boosted = schedtune_task_boost(p) > 0;
|
||||
bool prefer_idle = schedtune_prefer_idle(p) > 0;
|
||||
#else
|
||||
bool boosted = 0;
|
||||
bool prefer_idle = 0;
|
||||
#endif
|
||||
int tmp_target = find_best_target(p, boosted);
|
||||
if (tmp_target >= 0)
|
||||
int tmp_target = find_best_target(p, boosted || prefer_idle);
|
||||
if (tmp_target >= 0) {
|
||||
target_cpu = tmp_target;
|
||||
if (boosted && idle_cpu(target_cpu))
|
||||
if ((boosted || prefer_idle) && idle_cpu(target_cpu))
|
||||
return target_cpu;
|
||||
}
|
||||
}
|
||||
|
||||
if (target_cpu != task_cpu(p)) {
|
||||
|
||||
@@ -125,6 +125,10 @@ struct schedtune {
|
||||
|
||||
/* Performance Constraint (C) region threshold params */
|
||||
int perf_constrain_idx;
|
||||
|
||||
/* Hint to bias scheduling of tasks on that SchedTune CGroup
|
||||
* towards idle CPUs */
|
||||
int prefer_idle;
|
||||
};
|
||||
|
||||
static inline struct schedtune *css_st(struct cgroup_subsys_state *css)
|
||||
@@ -156,6 +160,7 @@ root_schedtune = {
|
||||
.boost = 0,
|
||||
.perf_boost_idx = 0,
|
||||
.perf_constrain_idx = 0,
|
||||
.prefer_idle = 0,
|
||||
};
|
||||
|
||||
int
|
||||
@@ -536,6 +541,38 @@ int schedtune_task_boost(struct task_struct *p)
|
||||
return task_boost;
|
||||
}
|
||||
|
||||
int schedtune_prefer_idle(struct task_struct *p)
|
||||
{
|
||||
struct schedtune *st;
|
||||
int prefer_idle;
|
||||
|
||||
/* Get prefer_idle value */
|
||||
rcu_read_lock();
|
||||
st = task_schedtune(p);
|
||||
prefer_idle = st->prefer_idle;
|
||||
rcu_read_unlock();
|
||||
|
||||
return prefer_idle;
|
||||
}
|
||||
|
||||
static u64
|
||||
prefer_idle_read(struct cgroup_subsys_state *css, struct cftype *cft)
|
||||
{
|
||||
struct schedtune *st = css_st(css);
|
||||
|
||||
return st->prefer_idle;
|
||||
}
|
||||
|
||||
static int
|
||||
prefer_idle_write(struct cgroup_subsys_state *css, struct cftype *cft,
|
||||
u64 prefer_idle)
|
||||
{
|
||||
struct schedtune *st = css_st(css);
|
||||
st->prefer_idle = prefer_idle;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static s64
|
||||
boost_read(struct cgroup_subsys_state *css, struct cftype *cft)
|
||||
{
|
||||
@@ -587,6 +624,11 @@ static struct cftype files[] = {
|
||||
.read_s64 = boost_read,
|
||||
.write_s64 = boost_write,
|
||||
},
|
||||
{
|
||||
.name = "prefer_idle",
|
||||
.read_u64 = prefer_idle_read,
|
||||
.write_u64 = prefer_idle_write,
|
||||
},
|
||||
{ } /* terminate */
|
||||
};
|
||||
|
||||
|
||||
@@ -17,6 +17,8 @@ struct target_nrg {
|
||||
int schedtune_cpu_boost(int cpu);
|
||||
int schedtune_task_boost(struct task_struct *tsk);
|
||||
|
||||
int schedtune_prefer_idle(struct task_struct *tsk);
|
||||
|
||||
void schedtune_exit_task(struct task_struct *tsk);
|
||||
|
||||
void schedtune_enqueue_task(struct task_struct *p, int cpu);
|
||||
|
||||
Reference in New Issue
Block a user