sched: Calculate energy consumption of sched_group

For energy-aware load-balancing decisions it is necessary to know the
energy consumption estimates of groups of cpus. This patch introduces a
basic function, sched_group_energy(), which estimates the energy
consumption of the cpus in the group and any resources shared by the
members of the group.

NOTE: The function has five levels of identation and breaks the 80
character limit. Refactoring is necessary.

cc: Ingo Molnar <mingo@redhat.com>
cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com>
This commit is contained in:
Morten Rasmussen
2014-12-18 14:47:18 +00:00
committed by Leo Yan
parent 5ec8ccabfe
commit c1770a5213
3 changed files with 161 additions and 0 deletions

View File

@@ -5994,6 +5994,7 @@ DEFINE_PER_CPU(struct sched_domain *, sd_numa);
DEFINE_PER_CPU(struct sched_domain *, sd_busy);
DEFINE_PER_CPU(struct sched_domain *, sd_asym);
DEFINE_PER_CPU(struct sched_domain *, sd_ea);
DEFINE_PER_CPU(struct sched_domain *, sd_scs);
static void update_top_cache_domain(int cpu)
{
@@ -6027,6 +6028,9 @@ static void update_top_cache_domain(int cpu)
break;
}
rcu_assign_pointer(per_cpu(sd_ea, cpu), ea_sd);
sd = highest_flag_domain(cpu, SD_SHARE_CAP_STATES);
rcu_assign_pointer(per_cpu(sd_scs, cpu), sd);
}
/*

View File

@@ -4701,6 +4701,162 @@ static inline bool energy_aware(void)
return sched_feat(ENERGY_AWARE);
}
/*
* cpu_norm_util() returns the cpu util relative to a specific capacity,
* i.e. it's busy ratio, in the range [0..SCHED_LOAD_SCALE] which is useful for
* energy calculations. Using the scale-invariant util returned by
* cpu_util() and approximating scale-invariant util by:
*
* util ~ (curr_freq/max_freq)*1024 * capacity_orig/1024 * running_time/time
*
* the normalized util can be found using the specific capacity.
*
* capacity = capacity_orig * curr_freq/max_freq
*
* norm_util = running_time/time ~ util/capacity
*/
static unsigned long cpu_norm_util(int cpu, unsigned long capacity)
{
int util = cpu_util(cpu);
if (util >= capacity)
return SCHED_CAPACITY_SCALE;
return (util << SCHED_CAPACITY_SHIFT)/capacity;
}
static unsigned long group_max_util(struct sched_group *sg)
{
int i;
unsigned long max_util = 0;
for_each_cpu(i, sched_group_cpus(sg))
max_util = max(max_util, cpu_util(i));
return max_util;
}
/*
* group_norm_util() returns the approximated group util relative to it's
* current capacity (busy ratio) in the range [0..SCHED_LOAD_SCALE] for use in
* energy calculations. Since task executions may or may not overlap in time in
* the group the true normalized util is between max(cpu_norm_util(i)) and
* sum(cpu_norm_util(i)) when iterating over all cpus in the group, i. The
* latter is used as the estimate as it leads to a more pessimistic energy
* estimate (more busy).
*/
static unsigned long group_norm_util(struct sched_group *sg, int cap_idx)
{
int i;
unsigned long util_sum = 0;
unsigned long capacity = sg->sge->cap_states[cap_idx].cap;
for_each_cpu(i, sched_group_cpus(sg))
util_sum += cpu_norm_util(i, capacity);
if (util_sum > SCHED_CAPACITY_SCALE)
return SCHED_CAPACITY_SCALE;
return util_sum;
}
static int find_new_capacity(struct sched_group *sg,
const struct sched_group_energy const *sge)
{
int idx;
unsigned long util = group_max_util(sg);
for (idx = 0; idx < sge->nr_cap_states; idx++) {
if (sge->cap_states[idx].cap >= util)
return idx;
}
return idx;
}
/*
* sched_group_energy(): Computes the absolute energy consumption of cpus
* belonging to the sched_group including shared resources shared only by
* members of the group. Iterates over all cpus in the hierarchy below the
* sched_group starting from the bottom working it's way up before going to
* the next cpu until all cpus are covered at all levels. The current
* implementation is likely to gather the same util statistics multiple times.
* This can probably be done in a faster but more complex way.
* Note: sched_group_energy() may fail when racing with sched_domain updates.
*/
static int sched_group_energy(struct sched_group *sg_top)
{
struct sched_domain *sd;
int cpu, total_energy = 0;
struct cpumask visit_cpus;
struct sched_group *sg;
WARN_ON(!sg_top->sge);
cpumask_copy(&visit_cpus, sched_group_cpus(sg_top));
while (!cpumask_empty(&visit_cpus)) {
struct sched_group *sg_shared_cap = NULL;
cpu = cpumask_first(&visit_cpus);
/*
* Is the group utilization affected by cpus outside this
* sched_group?
*/
sd = rcu_dereference(per_cpu(sd_scs, cpu));
if (!sd)
/*
* We most probably raced with hotplug; returning a
* wrong energy estimation is better than entering an
* infinite loop.
*/
return -EINVAL;
if (sd->parent)
sg_shared_cap = sd->parent->groups;
for_each_domain(cpu, sd) {
sg = sd->groups;
/* Has this sched_domain already been visited? */
if (sd->child && group_first_cpu(sg) != cpu)
break;
do {
struct sched_group *sg_cap_util;
unsigned long group_util;
int sg_busy_energy, sg_idle_energy, cap_idx;
if (sg_shared_cap && sg_shared_cap->group_weight >= sg->group_weight)
sg_cap_util = sg_shared_cap;
else
sg_cap_util = sg;
cap_idx = find_new_capacity(sg_cap_util, sg->sge);
group_util = group_norm_util(sg, cap_idx);
sg_busy_energy = (group_util * sg->sge->cap_states[cap_idx].power)
>> SCHED_CAPACITY_SHIFT;
sg_idle_energy = ((SCHED_LOAD_SCALE-group_util) * sg->sge->idle_states[0].power)
>> SCHED_CAPACITY_SHIFT;
total_energy += sg_busy_energy + sg_idle_energy;
if (!sd->child)
cpumask_xor(&visit_cpus, &visit_cpus, sched_group_cpus(sg));
if (cpumask_equal(sched_group_cpus(sg), sched_group_cpus(sg_top)))
goto next_cpu;
} while (sg = sg->next, sg != sd->groups);
}
next_cpu:
continue;
}
return total_energy;
}
/*
* Detect M:N waker/wakee relationships via a switching-frequency heuristic.
* A waker of many should wake a different task than the one last awakened

View File

@@ -840,6 +840,7 @@ DECLARE_PER_CPU(struct sched_domain *, sd_numa);
DECLARE_PER_CPU(struct sched_domain *, sd_busy);
DECLARE_PER_CPU(struct sched_domain *, sd_asym);
DECLARE_PER_CPU(struct sched_domain *, sd_ea);
DECLARE_PER_CPU(struct sched_domain *, sd_scs);
struct sched_group_capacity {
atomic_t ref;