sched/fair: add boosted task utilization

The task utilization signal, which is derived from PELT signals and
properly scaled to be architecture and frequency invariant, is used by
EAS as an estimation of the task requirements in terms of CPU bandwidth.

When the energy aware scheduler is in use, this signal affects the CPU
selection. Thus, a convenient way to bias that decision, which is also
little intrusive, is to boost the task utilization signal each time it
is required to support them.

This patch introduces the new function:
  boosted_task_util(task)
which returns a boosted value for the utilization of the specified task.
The margin added to the original utilization is:
  1. computed based on the "boosting strategy" in use
  2. proportional to boost value defined either by the sysctl interface,
     when global boosting is in use, or the "taskgroup" value, when
     per-task boosting is enabled.

The boosted signal is used by EAS
  a. transparently, via its integration into the task_fits() function
  b. explicitly, in the energy-aware wakeup path

Signed-off-by: Patrick Bellasi <patrick.bellasi@arm.com>
This commit is contained in:
Patrick Bellasi
2016-01-14 18:31:53 +00:00
committed by Leo Yan
parent a515b887ec
commit a8f65584fc
3 changed files with 55 additions and 2 deletions

View File

@@ -5048,11 +5048,13 @@ static inline unsigned long task_util(struct task_struct *p)
unsigned int capacity_margin = 1280; /* ~20% margin */
static inline unsigned long boosted_task_util(struct task_struct *task);
static inline bool __task_fits(struct task_struct *p, int cpu, int util)
{
unsigned long capacity = capacity_of(cpu);
util += task_util(p);
util += boosted_task_util(p);
return (capacity * 1024) > (util * capacity_margin);
}
@@ -5133,6 +5135,27 @@ schedtune_cpu_margin(unsigned long util, int cpu)
return schedtune_margin(util, boost);
}
static inline unsigned long
schedtune_task_margin(struct task_struct *task)
{
unsigned int boost;
unsigned long util;
unsigned long margin;
#ifdef CONFIG_CGROUP_SCHEDTUNE
boost = schedtune_task_boost(task);
#else
boost = get_sysctl_sched_cfs_boost();
#endif
if (boost == 0)
return 0;
util = task_util(task);
margin = schedtune_margin(util, boost);
return margin;
}
#else /* CONFIG_SCHED_TUNE */
static inline unsigned int
@@ -5141,6 +5164,12 @@ schedtune_cpu_margin(unsigned long util, int cpu)
return 0;
}
static inline unsigned int
schedtune_task_margin(struct task_struct *task)
{
return 0;
}
#endif /* CONFIG_SCHED_TUNE */
static inline unsigned long
@@ -5152,6 +5181,15 @@ boosted_cpu_util(int cpu)
return util + margin;
}
static inline unsigned long
boosted_task_util(struct task_struct *task)
{
unsigned long util = task_util(task);
unsigned long margin = schedtune_task_margin(task);
return util + margin;
}
/*
* find_idlest_group finds and returns the least busy CPU group within the
* domain.
@@ -5386,7 +5424,7 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target)
* so prev_cpu will receive a negative bias due to the double
* accounting. However, the blocked utilization may be zero.
*/
int new_util = cpu_util(i) + task_util(p);
int new_util = cpu_util(i) + boosted_task_util(p);
if (new_util > capacity_orig_of(i))
continue;

View File

@@ -240,6 +240,20 @@ int schedtune_cpu_boost(int cpu)
return bg->boost_max;
}
int schedtune_task_boost(struct task_struct *p)
{
struct schedtune *st;
int task_boost;
/* Get task boost value */
rcu_read_lock();
st = task_schedtune(p);
task_boost = st->boost;
rcu_read_unlock();
return task_boost;
}
static u64
boost_read(struct cgroup_subsys_state *css, struct cftype *cft)
{

View File

@@ -4,6 +4,7 @@
#ifdef CONFIG_CGROUP_SCHEDTUNE
int schedtune_cpu_boost(int cpu);
int schedtune_task_boost(struct task_struct *tsk);
void schedtune_enqueue_task(struct task_struct *p, int cpu);
void schedtune_dequeue_task(struct task_struct *p, int cpu);