Files
zihan zhou dcc4fc8469 sched: Reduce the default slice to avoid tasks getting an extra tick
The old default value for slice is 0.75 msec * (1 + ilog(ncpus)) which
means that we have a default slice of:

  0.75 for 1 cpu
  1.50 up to 3 cpus
  2.25 up to 7 cpus
  3.00 for 8 cpus and above.

For HZ=250 and HZ=100, because of the tick accuracy, the runtime of
tasks is far higher than their slice.

For HZ=1000 with 8 cpus or more, the accuracy of tick is already
satisfactory, but there is still an issue that tasks will get an extra
tick because the tick often arrives a little faster than expected. In
this case, the task can only wait until the next tick to consider that it
has reached its deadline, and will run 1ms longer.

vruntime + sysctl_sched_base_slice =     deadline
        |-----------|-----------|-----------|-----------|
             1ms          1ms         1ms         1ms
                   ^           ^           ^           ^
                 tick1       tick2       tick3       tick4(nearly 4ms)

There are two reasons for tick error: clockevent precision and the
CONFIG_IRQ_TIME_ACCOUNTING/CONFIG_PARAVIRT_TIME_ACCOUNTING. with
CONFIG_IRQ_TIME_ACCOUNTING every tick will be less than 1ms, but even
without it, because of clockevent precision, tick still often less than
1ms.

In order to make scheduling more precise, we changed 0.75 to 0.70,
Using 0.70 instead of 0.75 should not change much for other configs
and would fix this issue:

  0.70 for 1 cpu
  1.40 up to 3 cpus
  2.10 up to 7 cpus
  2.8 for 8 cpus and above.

This does not guarantee that tasks can run the slice time accurately
every time, but occasionally running an extra tick has little impact.

Signed-off-by: zihan zhou <15645113830zzh@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
Link: https://lkml.kernel.org/r/20250208075322.13139-1-15645113830zzh@gmail.com
[Helium-Studio: Adapt for 8 cpus]
Signed-off-by: Helium-Studio <67852324+Helium-Studio@users.noreply.github.com>
2025-05-20 21:54:13 +03:00

13144 lines
348 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
*
* Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
*
* Interactivity improvements by Mike Galbraith
* (C) 2007 Mike Galbraith <efault@gmx.de>
*
* Various enhancements by Dmitry Adamushko.
* (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
*
* Group scheduling enhancements by Srivatsa Vaddagiri
* Copyright IBM Corporation, 2007
* Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
*
* Scaled math optimizations by Thomas Gleixner
* Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
*
* Adaptive scheduling granularity, math enhancements by Peter Zijlstra
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
*/
#include <linux/sched/mm.h>
#include <linux/sched/topology.h>
#include <linux/latencytop.h>
#include <linux/cpumask.h>
#include <linux/cpuidle.h>
#include <linux/slab.h>
#include <linux/profile.h>
#include <linux/interrupt.h>
#include <linux/mempolicy.h>
#include <linux/migrate.h>
#include <linux/task_work.h>
#include <linux/rbtree_augmented.h>
#include <trace/events/sched.h>
#include "sched.h"
#include "tune.h"
#include "walt.h"
#ifdef CONFIG_SMP
static inline bool task_fits_max(struct task_struct *p, int cpu);
#endif /* CONFIG_SMP */
#ifdef CONFIG_SCHED_WALT
static void walt_fixup_sched_stats_fair(struct rq *rq, struct task_struct *p,
u16 updated_demand_scaled,
u16 updated_pred_demand_scaled);
static void walt_fixup_nr_big_tasks(struct rq *rq, struct task_struct *p,
int delta, bool inc);
#endif /* CONFIG_SCHED_WALT */
#if defined(CONFIG_SCHED_WALT) && defined(CONFIG_CFS_BANDWIDTH)
static void walt_init_cfs_rq_stats(struct cfs_rq *cfs_rq);
static void walt_inc_cfs_rq_stats(struct cfs_rq *cfs_rq,
struct task_struct *p);
static void walt_dec_cfs_rq_stats(struct cfs_rq *cfs_rq,
struct task_struct *p);
static void walt_inc_throttled_cfs_rq_stats(struct walt_sched_stats *stats,
struct cfs_rq *cfs_rq);
static void walt_dec_throttled_cfs_rq_stats(struct walt_sched_stats *stats,
struct cfs_rq *cfs_rq);
#else
static inline void walt_init_cfs_rq_stats(struct cfs_rq *cfs_rq) {}
static inline void
walt_inc_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p) {}
static inline void
walt_dec_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p) {}
#define walt_inc_throttled_cfs_rq_stats(...)
#define walt_dec_throttled_cfs_rq_stats(...)
#endif
/*
* Enable/disable honoring sync flag in energy-aware wakeups.
*/
unsigned int sysctl_sched_sync_hint_enable = 1;
/*
* Enable/disable using cstate knowledge in idle sibling selection
*/
unsigned int sysctl_sched_cstate_aware = 1;
/*
* The initial- and re-scaling of tunables is configurable
*
* Options are:
*
* SCHED_TUNABLESCALING_NONE - unscaled, always *1
* SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
* SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
*
* (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
*/
enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE;
/*
* Minimal preemption granularity for CPU-bound tasks:
*
* (default: 2.8 msec * (1 + ilog(ncpus)), units: nanoseconds)
*/
unsigned int sysctl_sched_base_slice = 2800000ULL;
static unsigned int normalized_sysctl_sched_base_slice = 2800000ULL;
/*
* To enable/disable energy aware feature.
*/
unsigned int __read_mostly sysctl_sched_energy_aware = 1;
const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
DEFINE_PER_CPU_READ_MOSTLY(int, sched_load_boost);
#ifdef CONFIG_SCHED_WALT
unsigned int sysctl_sched_use_walt_cpu_util = 1;
unsigned int sysctl_sched_use_walt_task_util = 1;
__read_mostly unsigned int sysctl_sched_walt_cpu_high_irqload =
(10 * NSEC_PER_MSEC);
#endif
int sched_thermal_decay_shift;
static int __init setup_sched_thermal_decay_shift(char *str)
{
int _shift = 0;
if (kstrtoint(str, 0, &_shift))
pr_warn("Unable to set scheduler thermal pressure decay shift parameter\n");
sched_thermal_decay_shift = clamp(_shift, 0, 10);
return 1;
}
__setup("sched_thermal_decay_shift=", setup_sched_thermal_decay_shift);
#ifdef CONFIG_SMP
/*
* For asym packing, by default the lower numbered cpu has higher priority.
*/
int __weak arch_asym_cpu_priority(int cpu)
{
return -arch_scale_cpu_capacity(cpu);
}
/*
* The margin used when comparing CPU capacities.
* is 'cap1' noticeably greater than 'cap2'
*
* (default: ~5%)
*/
#define capacity_greater(cap1, cap2) ((cap1) * 1024 > (cap2) * 1078)
#endif
#ifdef CONFIG_CFS_BANDWIDTH
/*
* Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
* each time a cfs_rq requests quota.
*
* Note: in the case that the slice exceeds the runtime remaining (either due
* to consumption or the quota being specified to be smaller than the slice)
* we will always only issue the remaining available time.
*
* (default: 5 msec, units: microseconds)
*/
unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
#endif
/*
* The margin used when comparing utilization with CPU capacity.
*
* (default: ~20%)
*/
#define fits_capacity(cap, max) ((cap) * 1280 < (max) * 1024)
unsigned int capacity_margin = 1280;
/* Migration margins */
unsigned int sysctl_sched_capacity_margin_up[MAX_MARGIN_LEVELS] = {
[0 ... MAX_MARGIN_LEVELS-1] = 1078}; /* ~5% margin */
unsigned int sysctl_sched_capacity_margin_down[MAX_MARGIN_LEVELS] = {
[0 ... MAX_MARGIN_LEVELS-1] = 1205}; /* ~15% margin */
unsigned int sched_capacity_margin_up[NR_CPUS] = {
[0 ... NR_CPUS-1] = 1078}; /* ~5% margin */
unsigned int sched_capacity_margin_down[NR_CPUS] = {
[0 ... NR_CPUS-1] = 1205}; /* ~15% margin */
#ifdef CONFIG_SCHED_WALT
/* 1ms default for 20ms window size scaled to 1024 */
unsigned int sysctl_sched_min_task_util_for_boost = 51;
/* 0.68ms default for 20ms window size scaled to 1024 */
unsigned int sysctl_sched_min_task_util_for_colocation = 35;
#endif
static inline void update_load_add(struct load_weight *lw, unsigned long inc)
{
lw->weight += inc;
lw->inv_weight = 0;
}
static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
{
lw->weight -= dec;
lw->inv_weight = 0;
}
static inline void update_load_set(struct load_weight *lw, unsigned long w)
{
lw->weight = w;
lw->inv_weight = 0;
}
/*
* Increase the granularity value when there are more CPUs,
* because with more CPUs the 'effective latency' as visible
* to users decreases. But the relationship is not linear,
* so pick a second-best guess by going with the log2 of the
* number of CPUs.
*
* This idea comes from the SD scheduler of Con Kolivas:
*/
static unsigned int get_update_sysctl_factor(void)
{
unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8);
unsigned int factor;
switch (sysctl_sched_tunable_scaling) {
case SCHED_TUNABLESCALING_NONE:
factor = 1;
break;
case SCHED_TUNABLESCALING_LINEAR:
factor = cpus;
break;
case SCHED_TUNABLESCALING_LOG:
default:
factor = 1 + ilog2(cpus);
break;
}
return factor;
}
static void update_sysctl(void)
{
unsigned int factor = get_update_sysctl_factor();
#define SET_SYSCTL(name) \
(sysctl_##name = (factor) * normalized_sysctl_##name)
SET_SYSCTL(sched_base_slice);
#undef SET_SYSCTL
}
void sched_init_granularity(void)
{
update_sysctl();
}
#define WMULT_CONST (~0U)
#define WMULT_SHIFT 32
static void __update_inv_weight(struct load_weight *lw)
{
unsigned long w;
if (likely(lw->inv_weight))
return;
w = scale_load_down(lw->weight);
if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
lw->inv_weight = 1;
else if (unlikely(!w))
lw->inv_weight = WMULT_CONST;
else
lw->inv_weight = WMULT_CONST / w;
}
/*
* delta_exec * weight / lw.weight
* OR
* (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
*
* Either weight := NICE_0_LOAD and lw \e sched_prio_to_wmult[], in which case
* we're guaranteed shift stays positive because inv_weight is guaranteed to
* fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
*
* Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
* weight/lw.weight <= 1, and therefore our shift will also be positive.
*/
static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
{
u64 fact = scale_load_down(weight);
u32 fact_hi = (u32)(fact >> 32);
int shift = WMULT_SHIFT;
int fs;
__update_inv_weight(lw);
if (unlikely(fact_hi)) {
fs = fls(fact_hi);
shift -= fs;
fact >>= fs;
}
/* hint to use a 32x32->64 mul */
fact = (u64)(u32)fact * lw->inv_weight;
fact_hi = (u32)(fact >> 32);
if (fact_hi) {
fs = fls(fact_hi);
shift -= fs;
fact >>= fs;
}
return mul_u64_u32_shr(delta_exec, fact, shift);
}
/*
* delta /= w
*/
static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
{
if (unlikely(se->load.weight != NICE_0_LOAD))
delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
return delta;
}
const struct sched_class fair_sched_class;
/**************************************************************
* CFS operations on generic schedulable entities:
*/
#ifdef CONFIG_FAIR_GROUP_SCHED
static inline struct task_struct *task_of(struct sched_entity *se)
{
SCHED_WARN_ON(!entity_is_task(se));
return container_of(se, struct task_struct, se);
}
/* Walk up scheduling entities hierarchy */
#define for_each_sched_entity(se) \
for (; se; se = se->parent)
static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
{
return p->se.cfs_rq;
}
/* runqueue on which this entity is (to be) queued */
static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
{
return se->cfs_rq;
}
/* runqueue "owned" by this group */
static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
{
return grp->my_q;
}
static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
{
struct rq *rq = rq_of(cfs_rq);
int cpu = cpu_of(rq);
if (cfs_rq->on_list)
return rq->tmp_alone_branch == &rq->leaf_cfs_rq_list;
cfs_rq->on_list = 1;
/*
* Ensure we either appear before our parent (if already
* enqueued) or force our parent to appear after us when it is
* enqueued. The fact that we always enqueue bottom-up
* reduces this to two cases and a special case for the root
* cfs_rq. Furthermore, it also means that we will always reset
* tmp_alone_branch either when the branch is connected
* to a tree or when we reach the top of the tree
*/
if (cfs_rq->tg->parent &&
cfs_rq->tg->parent->cfs_rq[cpu]->on_list) {
/*
* If parent is already on the list, we add the child
* just before. Thanks to circular linked property of
* the list, this means to put the child at the tail
* of the list that starts by parent.
*/
list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
&(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list));
/*
* The branch is now connected to its tree so we can
* reset tmp_alone_branch to the beginning of the
* list.
*/
rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
return true;
}
if (!cfs_rq->tg->parent) {
/*
* cfs rq without parent should be put
* at the tail of the list.
*/
list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
&rq->leaf_cfs_rq_list);
/*
* We have reach the top of a tree so we can reset
* tmp_alone_branch to the beginning of the list.
*/
rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
return true;
}
/*
* The parent has not already been added so we want to
* make sure that it will be put after us.
* tmp_alone_branch points to the begin of the branch
* where we will add parent.
*/
list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch);
/*
* update tmp_alone_branch to points to the new begin
* of the branch
*/
rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list;
return false;
}
static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
{
if (cfs_rq->on_list) {
struct rq *rq = rq_of(cfs_rq);
/*
* With cfs_rq being unthrottled/throttled during an enqueue,
* it can happen the tmp_alone_branch points the a leaf that
* we finally want to del. In this case, tmp_alone_branch moves
* to the prev element but it will point to rq->leaf_cfs_rq_list
* at the end of the enqueue.
*/
if (rq->tmp_alone_branch == &cfs_rq->leaf_cfs_rq_list)
rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev;
list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
cfs_rq->on_list = 0;
}
}
static inline void assert_list_leaf_cfs_rq(struct rq *rq)
{
SCHED_WARN_ON(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list);
}
/* Iterate thr' all leaf cfs_rq's on a runqueue */
#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, \
leaf_cfs_rq_list)
/* Do the two (enqueued) entities belong to the same group ? */
static inline struct cfs_rq *
is_same_group(struct sched_entity *se, struct sched_entity *pse)
{
if (se->cfs_rq == pse->cfs_rq)
return se->cfs_rq;
return NULL;
}
static inline struct sched_entity *parent_entity(struct sched_entity *se)
{
return se->parent;
}
static void
find_matching_se(struct sched_entity **se, struct sched_entity **pse)
{
int se_depth, pse_depth;
/*
* preemption test can be made between sibling entities who are in the
* same cfs_rq i.e who have a common parent. Walk up the hierarchy of
* both tasks until we find their ancestors who are siblings of common
* parent.
*/
/* First walk up until both entities are at same depth */
se_depth = (*se)->depth;
pse_depth = (*pse)->depth;
while (se_depth > pse_depth) {
se_depth--;
*se = parent_entity(*se);
}
while (pse_depth > se_depth) {
pse_depth--;
*pse = parent_entity(*pse);
}
while (!is_same_group(*se, *pse)) {
*se = parent_entity(*se);
*pse = parent_entity(*pse);
}
}
#else /* !CONFIG_FAIR_GROUP_SCHED */
static inline struct task_struct *task_of(struct sched_entity *se)
{
return container_of(se, struct task_struct, se);
}
#define for_each_sched_entity(se) \
for (; se; se = NULL)
static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
{
return &task_rq(p)->cfs;
}
static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
{
struct task_struct *p = task_of(se);
struct rq *rq = task_rq(p);
return &rq->cfs;
}
/* runqueue "owned" by this group */
static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
{
return NULL;
}
static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
{
return true;
}
static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
{
}
static inline void assert_list_leaf_cfs_rq(struct rq *rq)
{
}
#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
static inline struct sched_entity *parent_entity(struct sched_entity *se)
{
return NULL;
}
static inline void
find_matching_se(struct sched_entity **se, struct sched_entity **pse)
{
}
#endif /* CONFIG_FAIR_GROUP_SCHED */
static __always_inline
void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
/**************************************************************
* Scheduling class tree data structure manipulation methods:
*/
static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
{
s64 delta = (s64)(vruntime - max_vruntime);
if (delta > 0)
max_vruntime = vruntime;
return max_vruntime;
}
static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
{
s64 delta = (s64)(vruntime - min_vruntime);
if (delta < 0)
min_vruntime = vruntime;
return min_vruntime;
}
static inline bool entity_before(struct sched_entity *a,
struct sched_entity *b)
{
/*
* Tiebreak on vruntime seems unnecessary since it can
* hardly happen.
*/
return (s64)(a->deadline - b->deadline) < 0;
}
static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
return (s64)(se->vruntime - cfs_rq->min_vruntime);
}
#define __node_2_se(node) \
rb_entry((node), struct sched_entity, run_node)
/*
* Compute virtual time from the per-task service numbers:
*
* Fair schedulers conserve lag:
*
* \Sum lag_i = 0
*
* Where lag_i is given by:
*
* lag_i = S - s_i = w_i * (V - v_i)
*
* Where S is the ideal service time and V is it's virtual time counterpart.
* Therefore:
*
* \Sum lag_i = 0
* \Sum w_i * (V - v_i) = 0
* \Sum w_i * V - w_i * v_i = 0
*
* From which we can solve an expression for V in v_i (which we have in
* se->vruntime):
*
* \Sum v_i * w_i \Sum v_i * w_i
* V = -------------- = --------------
* \Sum w_i W
*
* Specifically, this is the weighted average of all entity virtual runtimes.
*
* [[ NOTE: this is only equal to the ideal scheduler under the condition
* that join/leave operations happen at lag_i = 0, otherwise the
* virtual time has non-continguous motion equivalent to:
*
* V +-= lag_i / W
*
* Also see the comment in place_entity() that deals with this. ]]
*
* However, since v_i is u64, and the multiplcation could easily overflow
* transform it into a relative form that uses smaller quantities:
*
* Substitute: v_i == (v_i - v0) + v0
*
* \Sum ((v_i - v0) + v0) * w_i \Sum (v_i - v0) * w_i
* V = ---------------------------- = --------------------- + v0
* W W
*
* Which we track using:
*
* v0 := cfs_rq->min_vruntime
* \Sum (v_i - v0) * w_i := cfs_rq->avg_vruntime
* \Sum w_i := cfs_rq->avg_load
*
* Since min_vruntime is a monotonic increasing variable that closely tracks
* the per-task service, these deltas: (v_i - v), will be in the order of the
* maximal (virtual) lag induced in the system due to quantisation.
*
* Also, we use scale_load_down() to reduce the size.
*
* As measured, the max (key * weight) value was ~44 bits for a kernel build.
*/
static void
avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
unsigned long weight = scale_load_down(se->load.weight);
s64 key = entity_key(cfs_rq, se);
cfs_rq->avg_vruntime += key * weight;
cfs_rq->avg_load += weight;
}
static void
avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
unsigned long weight = scale_load_down(se->load.weight);
s64 key = entity_key(cfs_rq, se);
cfs_rq->avg_vruntime -= key * weight;
cfs_rq->avg_load -= weight;
}
static inline
void avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta)
{
/*
* v' = v + d ==> avg_vruntime' = avg_runtime - d*avg_load
*/
cfs_rq->avg_vruntime -= cfs_rq->avg_load * delta;
}
/*
* Specifically: avg_runtime() + 0 must result in entity_eligible() := true
* For this to be so, the result of this function must have a left bias.
*/
u64 avg_vruntime(struct cfs_rq *cfs_rq)
{
struct sched_entity *curr = cfs_rq->curr;
s64 avg = cfs_rq->avg_vruntime;
long load = cfs_rq->avg_load;
if (curr && curr->on_rq) {
unsigned long weight = scale_load_down(curr->load.weight);
avg += entity_key(cfs_rq, curr) * weight;
load += weight;
}
if (load) {
/* sign flips effective floor / ceil */
if (avg < 0)
avg -= (load - 1);
avg = div_s64(avg, load);
}
return cfs_rq->min_vruntime + avg;
}
/*
* lag_i = S - s_i = w_i * (V - v_i)
*
* However, since V is approximated by the weighted average of all entities it
* is possible -- by addition/removal/reweight to the tree -- to move V around
* and end up with a larger lag than we started with.
*
* Limit this to either double the slice length with a minimum of TICK_NSEC
* since that is the timing granularity.
*
* EEVDF gives the following limit for a steady state system:
*
* -r_max < lag < max(r_max, q)
*
* XXX could add max_slice to the augmented data to track this.
*/
s64 entity_lag(u64 avruntime, struct sched_entity *se)
{
s64 vlag, limit;
vlag = avruntime - se->vruntime;
limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se);
return clamp(vlag, -limit, limit);
}
void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
SCHED_WARN_ON(!se->on_rq);
se->vlag = entity_lag(avg_vruntime(cfs_rq), se);
}
/*
* Entity is eligible once it received less service than it ought to have,
* eg. lag >= 0.
*
* lag_i = S - s_i = w_i*(V - v_i)
*
* lag_i >= 0 -> V >= v_i
*
* \Sum (v_i - v)*w_i
* V = ------------------ + v
* \Sum w_i
*
* lag_i >= 0 -> \Sum (v_i - v)*w_i >= (v_i - v)*(\Sum w_i)
*
* Note: using 'avg_vruntime() > se->vruntime' is inacurate due
* to the loss in precision caused by the division.
*/
static int vruntime_eligible(struct cfs_rq *cfs_rq, u64 vruntime)
{
struct sched_entity *curr = cfs_rq->curr;
s64 avg = cfs_rq->avg_vruntime;
long load = cfs_rq->avg_load;
if (curr && curr->on_rq) {
unsigned long weight = scale_load_down(curr->load.weight);
avg += entity_key(cfs_rq, curr) * weight;
load += weight;
}
return avg >= (s64)(vruntime - cfs_rq->min_vruntime) * load;
}
int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
if (!sched_feat(ENFORCE_ELIGIBILITY))
return 1;
return vruntime_eligible(cfs_rq, se->vruntime);
}
static u64 __update_min_vruntime(struct cfs_rq *cfs_rq, u64 vruntime)
{
u64 min_vruntime = cfs_rq->min_vruntime;
/*
* open coded max_vruntime() to allow updating avg_vruntime
*/
s64 delta = (s64)(vruntime - min_vruntime);
if (delta > 0) {
avg_vruntime_update(cfs_rq, delta);
min_vruntime = vruntime;
}
return min_vruntime;
}
static void update_min_vruntime(struct cfs_rq *cfs_rq)
{
struct sched_entity *se = __pick_root_entity(cfs_rq);
struct sched_entity *curr = cfs_rq->curr;
u64 vruntime = cfs_rq->min_vruntime;
if (curr) {
if (curr->on_rq)
vruntime = curr->vruntime;
else
curr = NULL;
}
if (se) {
if (!curr)
vruntime = se->min_vruntime;
else
vruntime = min_vruntime(vruntime, se->min_vruntime);
}
/* ensure we never gain time by being placed backwards. */
u64_u32_store(cfs_rq->min_vruntime,
__update_min_vruntime(cfs_rq, vruntime));
}
static inline bool __entity_less(struct rb_node *a, const struct rb_node *b)
{
return entity_before(__node_2_se(a), __node_2_se(b));
}
#define vruntime_gt(field, lse, rse) ({ (s64)((lse)->field - (rse)->field) > 0; })
static inline void __min_vruntime_update(struct sched_entity *se, struct rb_node *node)
{
if (node) {
struct sched_entity *rse = __node_2_se(node);
if (vruntime_gt(min_vruntime, se, rse))
se->min_vruntime = rse->min_vruntime;
}
}
/*
* se->min_vruntime = min(se->vruntime, {left,right}->min_vruntime)
*/
static inline bool min_vruntime_update(struct sched_entity *se, bool exit)
{
u64 old_min_vruntime = se->min_vruntime;
struct rb_node *node = &se->run_node;
se->min_vruntime = se->vruntime;
__min_vruntime_update(se, node->rb_right);
__min_vruntime_update(se, node->rb_left);
return se->min_vruntime == old_min_vruntime;
}
RB_DECLARE_CALLBACKS(static, min_vruntime_cb, struct sched_entity,
run_node, min_vruntime, min_vruntime_update);
/*
* Enqueue an entity into the rb-tree:
*/
static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
avg_vruntime_add(cfs_rq, se);
se->min_vruntime = se->vruntime;
rb_add_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
__entity_less, &min_vruntime_cb);
}
static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
rb_erase_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
&min_vruntime_cb);
avg_vruntime_sub(cfs_rq, se);
}
struct sched_entity *__pick_root_entity(struct cfs_rq *cfs_rq)
{
struct rb_node *root = cfs_rq->tasks_timeline.rb_root.rb_node;
if (!root)
return NULL;
return __node_2_se(root);
}
struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
{
struct rb_node *left = rb_first_cached(&cfs_rq->tasks_timeline);
if (!left)
return NULL;
return __node_2_se(left);
}
/*
* Earliest Eligible Virtual Deadline First
*
* In order to provide latency guarantees for different request sizes
* EEVDF selects the best runnable task from two criteria:
*
* 1) the task must be eligible (must be owed service)
*
* 2) from those tasks that meet 1), we select the one
* with the earliest virtual deadline.
*
* We can do this in O(log n) time due to an augmented RB-tree. The
* tree keeps the entries sorted on deadline, but also functions as a
* heap based on the vruntime by keeping:
*
* se->min_vruntime = min(se->vruntime, se->{left,right}->min_vruntime)
*
* Which allows tree pruning through eligibility.
*/
static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
{
struct rb_node *node = cfs_rq->tasks_timeline.rb_root.rb_node;
struct sched_entity *se = __pick_first_entity(cfs_rq);
struct sched_entity *curr = cfs_rq->curr;
struct sched_entity *best = NULL;
/*
* We can safely skip eligibility check if there is only one entity
* in this cfs_rq, saving some cycles.
*/
if (cfs_rq->nr_running == 1)
return curr && curr->on_rq ? curr : se;
if (curr && (!curr->on_rq || !entity_eligible(cfs_rq, curr)))
curr = NULL;
/*
* Once selected, run a task until it either becomes non-eligible or
* until it gets a new slice. See the HACK in set_next_entity().
*/
if (sched_feat(RUN_TO_PARITY) && curr && curr->vlag == curr->deadline)
return curr;
/* Pick the leftmost entity if it's eligible */
if (se && entity_eligible(cfs_rq, se)) {
best = se;
goto found;
}
/* Heap search for the EEVD entity */
while (node) {
struct rb_node *left = node->rb_left;
/*
* Eligible entities in left subtree are always better
* choices, since they have earlier deadlines.
*/
if (left && vruntime_eligible(cfs_rq,
__node_2_se(left)->min_vruntime)) {
node = left;
continue;
}
se = __node_2_se(node);
/*
* The left subtree either is empty or has no eligible
* entity, so check the current node since it is the one
* with earliest deadline that might be eligible.
*/
if (entity_eligible(cfs_rq, se)) {
best = se;
break;
}
node = node->rb_right;
}
found:
if (!best || (curr && entity_before(curr, best)))
best = curr;
return best;
}
#ifdef CONFIG_SCHED_DEBUG
struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
{
struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root);
if (!last)
return NULL;
return __node_2_se(last);
}
/**************************************************************
* Scheduling class statistics methods:
*/
int sched_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
unsigned int factor = get_update_sysctl_factor();
if (ret || !write)
return ret;
#define WRT_SYSCTL(name) \
(normalized_sysctl_##name = sysctl_##name / (factor))
WRT_SYSCTL(sched_base_slice);
#undef WRT_SYSCTL
return 0;
}
#endif
static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se);
/*
* XXX: strictly: vd_i += N*r_i/w_i such that: vd_i > ve_i
* this is probably good enough.
*/
static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
if ((s64)(se->vruntime - se->deadline) < 0)
return;
/*
* For EEVDF the virtual time slope is determined by w_i (iow.
* nice) while the request time r_i is determined by
* sysctl_sched_base_slice.
*/
se->slice = sysctl_sched_base_slice;
/*
* EEVDF: vd_i = ve_i + r_i / w_i
*/
se->deadline = se->vruntime + calc_delta_fair(se->slice, se);
/*
* The task has consumed its request, reschedule.
*/
if (cfs_rq->nr_running > 1) {
resched_curr(rq_of(cfs_rq));
clear_buddies(cfs_rq, se);
}
}
#include "pelt.h"
#ifdef CONFIG_SMP
static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
static unsigned long task_h_load(struct task_struct *p);
static unsigned long capacity_of(int cpu);
/* Give new sched_entity start runnable values to heavy its load in infant time */
void init_entity_runnable_average(struct sched_entity *se)
{
struct sched_avg *sa = &se->avg;
memset(sa, 0, sizeof(*sa));
/*
* Tasks are intialized with full load to be seen as heavy tasks until
* they get a chance to stabilize to their real load level.
* Group entities are intialized with zero load to reflect the fact that
* nothing has been attached to the task group yet.
*/
if (entity_is_task(se))
sa->runnable_load_avg = sa->load_avg = scale_load_down(se->load.weight);
se->runnable_weight = se->load.weight;
/* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
}
static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
static void attach_entity_cfs_rq(struct sched_entity *se);
/*
* With new tasks being created, their initial util_avgs are extrapolated
* based on the cfs_rq's current util_avg:
*
* util_avg = cfs_rq->avg.util_avg / (cfs_rq->avg.load_avg + 1)
* * se_weight(se)
*
* However, in many cases, the above util_avg does not give a desired
* value. Moreover, the sum of the util_avgs may be divergent, such
* as when the series is a harmonic series.
*
* To solve this problem, we also cap the util_avg of successive tasks to
* only 1/2 of the left utilization budget:
*
* util_avg_cap = (cpu_scale - cfs_rq->avg.util_avg) / 2^n
*
* where n denotes the nth task and cpu_scale the CPU capacity.
*
* For example, for a CPU with 1024 of capacity, a simplest series from
* the beginning would be like:
*
* task util_avg: 512, 256, 128, 64, 32, 16, 8, ...
* cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ...
*
* Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap)
* if util_avg > util_avg_cap.
*/
void post_init_entity_util_avg(struct sched_entity *se)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);
struct sched_avg *sa = &se->avg;
long cpu_scale = arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq)));
long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2;
if (cap > 0) {
if (cfs_rq->avg.util_avg != 0) {
sa->util_avg = cfs_rq->avg.util_avg * se_weight(se);
sa->util_avg /= (cfs_rq->avg.load_avg + 1);
if (sa->util_avg > cap)
sa->util_avg = cap;
} else {
sa->util_avg = cap;
}
}
if (entity_is_task(se)) {
struct task_struct *p = task_of(se);
if (p->sched_class != &fair_sched_class) {
/*
* For !fair tasks do:
*
update_cfs_rq_load_avg(now, cfs_rq);
attach_entity_load_avg(cfs_rq, se);
switched_from_fair(rq, p);
*
* such that the next switched_to_fair() has the
* expected state.
*/
se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq);
return;
}
}
attach_entity_cfs_rq(se);
}
#else /* !CONFIG_SMP */
void init_entity_runnable_average(struct sched_entity *se)
{
}
void post_init_entity_util_avg(struct sched_entity *se)
{
}
static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
{
}
#endif /* CONFIG_SMP */
/*
* Update the current task's runtime statistics.
*/
static void update_curr(struct cfs_rq *cfs_rq)
{
struct sched_entity *curr = cfs_rq->curr;
u64 now = rq_clock_task(rq_of(cfs_rq));
u64 delta_exec;
if (unlikely(!curr))
return;
delta_exec = now - curr->exec_start;
if (unlikely((s64)delta_exec <= 0))
return;
curr->exec_start = now;
schedstat_set(curr->statistics.exec_max,
max(delta_exec, curr->statistics.exec_max));
curr->sum_exec_runtime += delta_exec;
schedstat_add(cfs_rq->exec_clock, delta_exec);
curr->vruntime += calc_delta_fair(delta_exec, curr);
update_deadline(cfs_rq, curr);
update_min_vruntime(cfs_rq);
if (entity_is_task(curr)) {
struct task_struct *curtask = task_of(curr);
trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
cpuacct_charge(curtask, delta_exec);
account_group_exec_runtime(curtask, delta_exec);
}
account_cfs_rq_runtime(cfs_rq, delta_exec);
}
static void update_curr_fair(struct rq *rq)
{
update_curr(cfs_rq_of(&rq->curr->se));
}
static inline void
update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
u64 wait_start, prev_wait_start;
if (!schedstat_enabled())
return;
wait_start = rq_clock(rq_of(cfs_rq));
prev_wait_start = schedstat_val(se->statistics.wait_start);
if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) &&
likely(wait_start > prev_wait_start))
wait_start -= prev_wait_start;
schedstat_set(se->statistics.wait_start, wait_start);
}
static inline void
update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
struct task_struct *p;
u64 delta;
if (!schedstat_enabled())
return;
delta = rq_clock(rq_of(cfs_rq)) - schedstat_val(se->statistics.wait_start);
if (entity_is_task(se)) {
p = task_of(se);
if (task_on_rq_migrating(p)) {
/*
* Preserve migrating task's wait time so wait_start
* time stamp can be adjusted to accumulate wait time
* prior to migration.
*/
schedstat_set(se->statistics.wait_start, delta);
return;
}
trace_sched_stat_wait(p, delta);
}
schedstat_set(se->statistics.wait_max,
max(schedstat_val(se->statistics.wait_max), delta));
schedstat_inc(se->statistics.wait_count);
schedstat_add(se->statistics.wait_sum, delta);
schedstat_set(se->statistics.wait_start, 0);
}
static inline void
update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
struct task_struct *tsk = NULL;
u64 sleep_start, block_start;
if (!schedstat_enabled())
return;
sleep_start = schedstat_val(se->statistics.sleep_start);
block_start = schedstat_val(se->statistics.block_start);
if (entity_is_task(se))
tsk = task_of(se);
if (sleep_start) {
u64 delta = rq_clock(rq_of(cfs_rq)) - sleep_start;
if ((s64)delta < 0)
delta = 0;
if (unlikely(delta > schedstat_val(se->statistics.sleep_max)))
schedstat_set(se->statistics.sleep_max, delta);
schedstat_set(se->statistics.sleep_start, 0);
schedstat_add(se->statistics.sum_sleep_runtime, delta);
if (tsk) {
account_scheduler_latency(tsk, delta >> 10, 1);
trace_sched_stat_sleep(tsk, delta);
}
}
if (block_start) {
u64 delta = rq_clock(rq_of(cfs_rq)) - block_start;
if ((s64)delta < 0)
delta = 0;
if (unlikely(delta > schedstat_val(se->statistics.block_max)))
schedstat_set(se->statistics.block_max, delta);
schedstat_set(se->statistics.block_start, 0);
schedstat_add(se->statistics.sum_sleep_runtime, delta);
if (tsk) {
if (tsk->in_iowait) {
schedstat_add(se->statistics.iowait_sum, delta);
schedstat_inc(se->statistics.iowait_count);
trace_sched_stat_iowait(tsk, delta);
}
trace_sched_stat_blocked(tsk, delta);
trace_sched_blocked_reason(tsk);
/*
* Blocking time is in units of nanosecs, so shift by
* 20 to get a milliseconds-range estimation of the
* amount of time that the task spent sleeping:
*/
if (unlikely(prof_on == SLEEP_PROFILING)) {
profile_hits(SLEEP_PROFILING,
(void *)get_wchan(tsk),
delta >> 20);
}
account_scheduler_latency(tsk, delta >> 10, 0);
}
}
}
/*
* Task is being enqueued - update stats:
*/
static inline void
update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
if (!schedstat_enabled())
return;
/*
* Are we enqueueing a waiting task? (for current tasks
* a dequeue/enqueue event is a NOP)
*/
if (se != cfs_rq->curr)
update_stats_wait_start(cfs_rq, se);
if (flags & ENQUEUE_WAKEUP)
update_stats_enqueue_sleeper(cfs_rq, se);
}
static inline void
update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
if (!schedstat_enabled())
return;
/*
* Mark the end of the wait period if dequeueing a
* waiting task:
*/
if (se != cfs_rq->curr)
update_stats_wait_end(cfs_rq, se);
if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) {
struct task_struct *tsk = task_of(se);
if (tsk->state & TASK_INTERRUPTIBLE)
schedstat_set(se->statistics.sleep_start,
rq_clock(rq_of(cfs_rq)));
if (tsk->state & TASK_UNINTERRUPTIBLE)
schedstat_set(se->statistics.block_start,
rq_clock(rq_of(cfs_rq)));
}
}
/*
* We are picking a new current task - update its stats:
*/
static inline void
update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
/*
* We are starting a new run period:
*/
se->exec_start = rq_clock_task(rq_of(cfs_rq));
}
/**************************************************
* Scheduling class queueing methods:
*/
#ifdef CONFIG_NUMA_BALANCING
/*
* Approximate time to scan a full NUMA task in ms. The task scan period is
* calculated based on the tasks virtual memory size and
* numa_balancing_scan_size.
*/
unsigned int sysctl_numa_balancing_scan_period_min = 1000;
unsigned int sysctl_numa_balancing_scan_period_max = 60000;
/* Portion of address space to scan in MB */
unsigned int sysctl_numa_balancing_scan_size = 256;
/* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
unsigned int sysctl_numa_balancing_scan_delay = 1000;
struct numa_group {
atomic_t refcount;
spinlock_t lock; /* nr_tasks, tasks */
int nr_tasks;
pid_t gid;
int active_nodes;
struct rcu_head rcu;
unsigned long total_faults;
unsigned long max_faults_cpu;
/*
* Faults_cpu is used to decide whether memory should move
* towards the CPU. As a consequence, these stats are weighted
* more by CPU use than by memory faults.
*/
unsigned long *faults_cpu;
unsigned long faults[0];
};
static inline unsigned long group_faults_priv(struct numa_group *ng);
static inline unsigned long group_faults_shared(struct numa_group *ng);
static unsigned int task_nr_scan_windows(struct task_struct *p)
{
unsigned long rss = 0;
unsigned long nr_scan_pages;
/*
* Calculations based on RSS as non-present and empty pages are skipped
* by the PTE scanner and NUMA hinting faults should be trapped based
* on resident pages
*/
nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
rss = get_mm_rss(p->mm);
if (!rss)
rss = nr_scan_pages;
rss = round_up(rss, nr_scan_pages);
return rss / nr_scan_pages;
}
/* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
#define MAX_SCAN_WINDOW 2560
static unsigned int task_scan_min(struct task_struct *p)
{
unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size);
unsigned int scan, floor;
unsigned int windows = 1;
if (scan_size < MAX_SCAN_WINDOW)
windows = MAX_SCAN_WINDOW / scan_size;
floor = 1000 / windows;
scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
return max_t(unsigned int, floor, scan);
}
static unsigned int task_scan_start(struct task_struct *p)
{
unsigned long smin = task_scan_min(p);
unsigned long period = smin;
/* Scale the maximum scan period with the amount of shared memory. */
if (p->numa_group) {
struct numa_group *ng = p->numa_group;
unsigned long shared = group_faults_shared(ng);
unsigned long private = group_faults_priv(ng);
period *= atomic_read(&ng->refcount);
period *= shared + 1;
period /= private + shared + 1;
}
return max(smin, period);
}
static unsigned int task_scan_max(struct task_struct *p)
{
unsigned long smin = task_scan_min(p);
unsigned long smax;
/* Watch for min being lower than max due to floor calculations */
smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
/* Scale the maximum scan period with the amount of shared memory. */
if (p->numa_group) {
struct numa_group *ng = p->numa_group;
unsigned long shared = group_faults_shared(ng);
unsigned long private = group_faults_priv(ng);
unsigned long period = smax;
period *= atomic_read(&ng->refcount);
period *= shared + 1;
period /= private + shared + 1;
smax = max(smax, period);
}
return max(smin, smax);
}
static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
{
rq->nr_numa_running += (p->numa_preferred_nid != -1);
rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
}
static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
{
rq->nr_numa_running -= (p->numa_preferred_nid != -1);
rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
}
/* Shared or private faults. */
#define NR_NUMA_HINT_FAULT_TYPES 2
/* Memory and CPU locality */
#define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2)
/* Averaged statistics, and temporary buffers. */
#define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2)
pid_t task_numa_group_id(struct task_struct *p)
{
return p->numa_group ? p->numa_group->gid : 0;
}
/*
* The averaged statistics, shared & private, memory & cpu,
* occupy the first half of the array. The second half of the
* array is for current counters, which are averaged into the
* first set by task_numa_placement.
*/
static inline int task_faults_idx(enum numa_faults_stats s, int nid, int priv)
{
return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv;
}
static inline unsigned long task_faults(struct task_struct *p, int nid)
{
if (!p->numa_faults)
return 0;
return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] +
p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)];
}
static inline unsigned long group_faults(struct task_struct *p, int nid)
{
if (!p->numa_group)
return 0;
return p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 0)] +
p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 1)];
}
static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
{
return group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 0)] +
group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 1)];
}
static inline unsigned long group_faults_priv(struct numa_group *ng)
{
unsigned long faults = 0;
int node;
for_each_online_node(node) {
faults += ng->faults[task_faults_idx(NUMA_MEM, node, 1)];
}
return faults;
}
static inline unsigned long group_faults_shared(struct numa_group *ng)
{
unsigned long faults = 0;
int node;
for_each_online_node(node) {
faults += ng->faults[task_faults_idx(NUMA_MEM, node, 0)];
}
return faults;
}
/*
* A node triggering more than 1/3 as many NUMA faults as the maximum is
* considered part of a numa group's pseudo-interleaving set. Migrations
* between these nodes are slowed down, to allow things to settle down.
*/
#define ACTIVE_NODE_FRACTION 3
static bool numa_is_active_node(int nid, struct numa_group *ng)
{
return group_faults_cpu(ng, nid) * ACTIVE_NODE_FRACTION > ng->max_faults_cpu;
}
/* Handle placement on systems where not all nodes are directly connected. */
static unsigned long score_nearby_nodes(struct task_struct *p, int nid,
int maxdist, bool task)
{
unsigned long score = 0;
int node;
/*
* All nodes are directly connected, and the same distance
* from each other. No need for fancy placement algorithms.
*/
if (sched_numa_topology_type == NUMA_DIRECT)
return 0;
/*
* This code is called for each node, introducing N^2 complexity,
* which should be ok given the number of nodes rarely exceeds 8.
*/
for_each_online_node(node) {
unsigned long faults;
int dist = node_distance(nid, node);
/*
* The furthest away nodes in the system are not interesting
* for placement; nid was already counted.
*/
if (dist == sched_max_numa_distance || node == nid)
continue;
/*
* On systems with a backplane NUMA topology, compare groups
* of nodes, and move tasks towards the group with the most
* memory accesses. When comparing two nodes at distance
* "hoplimit", only nodes closer by than "hoplimit" are part
* of each group. Skip other nodes.
*/
if (sched_numa_topology_type == NUMA_BACKPLANE &&
dist > maxdist)
continue;
/* Add up the faults from nearby nodes. */
if (task)
faults = task_faults(p, node);
else
faults = group_faults(p, node);
/*
* On systems with a glueless mesh NUMA topology, there are
* no fixed "groups of nodes". Instead, nodes that are not
* directly connected bounce traffic through intermediate
* nodes; a numa_group can occupy any set of nodes.
* The further away a node is, the less the faults count.
* This seems to result in good task placement.
*/
if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
faults *= (sched_max_numa_distance - dist);
faults /= (sched_max_numa_distance - LOCAL_DISTANCE);
}
score += faults;
}
return score;
}
/*
* These return the fraction of accesses done by a particular task, or
* task group, on a particular numa node. The group weight is given a
* larger multiplier, in order to group tasks together that are almost
* evenly spread out between numa nodes.
*/
static inline unsigned long task_weight(struct task_struct *p, int nid,
int dist)
{
unsigned long faults, total_faults;
if (!p->numa_faults)
return 0;
total_faults = p->total_numa_faults;
if (!total_faults)
return 0;
faults = task_faults(p, nid);
faults += score_nearby_nodes(p, nid, dist, true);
return 1000 * faults / total_faults;
}
static inline unsigned long group_weight(struct task_struct *p, int nid,
int dist)
{
unsigned long faults, total_faults;
if (!p->numa_group)
return 0;
total_faults = p->numa_group->total_faults;
if (!total_faults)
return 0;
faults = group_faults(p, nid);
faults += score_nearby_nodes(p, nid, dist, false);
return 1000 * faults / total_faults;
}
bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
int src_nid, int dst_cpu)
{
struct numa_group *ng = p->numa_group;
int dst_nid = cpu_to_node(dst_cpu);
int last_cpupid, this_cpupid;
this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
/*
* Multi-stage node selection is used in conjunction with a periodic
* migration fault to build a temporal task<->page relation. By using
* a two-stage filter we remove short/unlikely relations.
*
* Using P(p) ~ n_p / n_t as per frequentist probability, we can equate
* a task's usage of a particular page (n_p) per total usage of this
* page (n_t) (in a given time-span) to a probability.
*
* Our periodic faults will sample this probability and getting the
* same result twice in a row, given these samples are fully
* independent, is then given by P(n)^2, provided our sample period
* is sufficiently short compared to the usage pattern.
*
* This quadric squishes small probabilities, making it less likely we
* act on an unlikely task<->page relation.
*/
last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
if (!cpupid_pid_unset(last_cpupid) &&
cpupid_to_nid(last_cpupid) != dst_nid)
return false;
/* Always allow migrate on private faults */
if (cpupid_match_pid(p, last_cpupid))
return true;
/* A shared fault, but p->numa_group has not been set up yet. */
if (!ng)
return true;
/*
* Destination node is much more heavily used than the source
* node? Allow migration.
*/
if (group_faults_cpu(ng, dst_nid) > group_faults_cpu(ng, src_nid) *
ACTIVE_NODE_FRACTION)
return true;
/*
* Distribute memory according to CPU & memory use on each node,
* with 3/4 hysteresis to avoid unnecessary memory migrations:
*
* faults_cpu(dst) 3 faults_cpu(src)
* --------------- * - > ---------------
* faults_mem(dst) 4 faults_mem(src)
*/
return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3 >
group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4;
}
static unsigned long weighted_cpuload(struct rq *rq);
/* Cached statistics for all CPUs within a node */
struct numa_stats {
unsigned long nr_running;
unsigned long load;
/* Total compute capacity of CPUs on a node */
unsigned long compute_capacity;
/* Approximate capacity in terms of runnable tasks on a node */
unsigned long task_capacity;
int has_free_capacity;
};
/*
* XXX borrowed from update_sg_lb_stats
*/
static void update_numa_stats(struct numa_stats *ns, int nid)
{
int smt, cpu, cpus = 0;
unsigned long capacity;
memset(ns, 0, sizeof(*ns));
for_each_cpu(cpu, cpumask_of_node(nid)) {
struct rq *rq = cpu_rq(cpu);
ns->nr_running += rq->nr_running;
ns->load += weighted_cpuload(rq);
ns->compute_capacity += capacity_of(cpu);
cpus++;
}
/*
* If we raced with hotplug and there are no CPUs left in our mask
* the @ns structure is NULL'ed and task_numa_compare() will
* not find this node attractive.
*
* We'll either bail at !has_free_capacity, or we'll detect a huge
* imbalance and bail there.
*/
if (!cpus)
return;
/* smt := ceil(cpus / capacity), assumes: 1 < smt_power < 2 */
smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, ns->compute_capacity);
capacity = cpus / smt; /* cores */
ns->task_capacity = min_t(unsigned, capacity,
DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE));
ns->has_free_capacity = (ns->nr_running < ns->task_capacity);
}
struct task_numa_env {
struct task_struct *p;
int src_cpu, src_nid;
int dst_cpu, dst_nid;
struct numa_stats src_stats, dst_stats;
int imbalance_pct;
int dist;
struct task_struct *best_task;
long best_imp;
int best_cpu;
};
static void task_numa_assign(struct task_numa_env *env,
struct task_struct *p, long imp)
{
if (env->best_task)
put_task_struct(env->best_task);
if (p)
get_task_struct(p);
env->best_task = p;
env->best_imp = imp;
env->best_cpu = env->dst_cpu;
}
static bool load_too_imbalanced(long src_load, long dst_load,
struct task_numa_env *env)
{
long imb, old_imb;
long orig_src_load, orig_dst_load;
long src_capacity, dst_capacity;
/*
* The load is corrected for the CPU capacity available on each node.
*
* src_load dst_load
* ------------ vs ---------
* src_capacity dst_capacity
*/
src_capacity = env->src_stats.compute_capacity;
dst_capacity = env->dst_stats.compute_capacity;
/* We care about the slope of the imbalance, not the direction. */
if (dst_load < src_load)
swap(dst_load, src_load);
/* Is the difference below the threshold? */
imb = dst_load * src_capacity * 100 -
src_load * dst_capacity * env->imbalance_pct;
if (imb <= 0)
return false;
/*
* The imbalance is above the allowed threshold.
* Compare it with the old imbalance.
*/
orig_src_load = env->src_stats.load;
orig_dst_load = env->dst_stats.load;
if (orig_dst_load < orig_src_load)
swap(orig_dst_load, orig_src_load);
old_imb = orig_dst_load * src_capacity * 100 -
orig_src_load * dst_capacity * env->imbalance_pct;
/* Would this change make things worse? */
return (imb > old_imb);
}
/*
* This checks if the overall compute and NUMA accesses of the system would
* be improved if the source tasks was migrated to the target dst_cpu taking
* into account that it might be best if task running on the dst_cpu should
* be exchanged with the source task
*/
static void task_numa_compare(struct task_numa_env *env,
long taskimp, long groupimp)
{
struct rq *src_rq = cpu_rq(env->src_cpu);
struct rq *dst_rq = cpu_rq(env->dst_cpu);
struct task_struct *cur;
long src_load, dst_load;
long load;
long imp = env->p->numa_group ? groupimp : taskimp;
long moveimp = imp;
int dist = env->dist;
rcu_read_lock();
cur = task_rcu_dereference(&dst_rq->curr);
if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur)))
cur = NULL;
/*
* Because we have preemption enabled we can get migrated around and
* end try selecting ourselves (current == env->p) as a swap candidate.
*/
if (cur == env->p)
goto unlock;
/*
* "imp" is the fault differential for the source task between the
* source and destination node. Calculate the total differential for
* the source task and potential destination task. The more negative
* the value is, the more rmeote accesses that would be expected to
* be incurred if the tasks were swapped.
*/
if (cur) {
/* Skip this swap candidate if cannot move to the source cpu */
if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr))
goto unlock;
/*
* If dst and source tasks are in the same NUMA group, or not
* in any group then look only at task weights.
*/
if (cur->numa_group == env->p->numa_group) {
imp = taskimp + task_weight(cur, env->src_nid, dist) -
task_weight(cur, env->dst_nid, dist);
/*
* Add some hysteresis to prevent swapping the
* tasks within a group over tiny differences.
*/
if (cur->numa_group)
imp -= imp/16;
} else {
/*
* Compare the group weights. If a task is all by
* itself (not part of a group), use the task weight
* instead.
*/
if (cur->numa_group)
imp += group_weight(cur, env->src_nid, dist) -
group_weight(cur, env->dst_nid, dist);
else
imp += task_weight(cur, env->src_nid, dist) -
task_weight(cur, env->dst_nid, dist);
}
}
if (imp <= env->best_imp && moveimp <= env->best_imp)
goto unlock;
if (!cur) {
/* Is there capacity at our destination? */
if (env->src_stats.nr_running <= env->src_stats.task_capacity &&
!env->dst_stats.has_free_capacity)
goto unlock;
goto balance;
}
/* Balance doesn't matter much if we're running a task per cpu */
if (imp > env->best_imp && src_rq->nr_running == 1 &&
dst_rq->nr_running == 1)
goto assign;
/*
* In the overloaded case, try and keep the load balanced.
*/
balance:
load = task_h_load(env->p);
dst_load = env->dst_stats.load + load;
src_load = env->src_stats.load - load;
if (moveimp > imp && moveimp > env->best_imp) {
/*
* If the improvement from just moving env->p direction is
* better than swapping tasks around, check if a move is
* possible. Store a slightly smaller score than moveimp,
* so an actually idle CPU will win.
*/
if (!load_too_imbalanced(src_load, dst_load, env)) {
imp = moveimp - 1;
cur = NULL;
goto assign;
}
}
if (imp <= env->best_imp)
goto unlock;
if (cur) {
load = task_h_load(cur);
dst_load -= load;
src_load += load;
}
if (load_too_imbalanced(src_load, dst_load, env))
goto unlock;
/*
* One idle CPU per node is evaluated for a task numa move.
* Call select_idle_sibling to maybe find a better one.
*/
if (!cur) {
/*
* select_idle_siblings() uses an per-cpu cpumask that
* can be used from IRQ context.
*/
local_irq_disable();
env->dst_cpu = select_idle_sibling(env->p, env->src_cpu,
env->dst_cpu);
local_irq_enable();
}
assign:
task_numa_assign(env, cur, imp);
unlock:
rcu_read_unlock();
}
static void task_numa_find_cpu(struct task_numa_env *env,
long taskimp, long groupimp)
{
int cpu;
for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
/* Skip this CPU if the source task cannot migrate */
if (!cpumask_test_cpu(cpu, env->p->cpus_ptr))
continue;
env->dst_cpu = cpu;
task_numa_compare(env, taskimp, groupimp);
}
}
/* Only move tasks to a NUMA node less busy than the current node. */
static bool numa_has_capacity(struct task_numa_env *env)
{
struct numa_stats *src = &env->src_stats;
struct numa_stats *dst = &env->dst_stats;
if (src->has_free_capacity && !dst->has_free_capacity)
return false;
/*
* Only consider a task move if the source has a higher load
* than the destination, corrected for CPU capacity on each node.
*
* src->load dst->load
* --------------------- vs ---------------------
* src->compute_capacity dst->compute_capacity
*/
if (src->load * dst->compute_capacity * env->imbalance_pct >
dst->load * src->compute_capacity * 100)
return true;
return false;
}
static int task_numa_migrate(struct task_struct *p)
{
struct task_numa_env env = {
.p = p,
.src_cpu = task_cpu(p),
.src_nid = task_node(p),
.imbalance_pct = 112,
.best_task = NULL,
.best_imp = 0,
.best_cpu = -1,
};
struct sched_domain *sd;
unsigned long taskweight, groupweight;
int nid, ret, dist;
long taskimp, groupimp;
/*
* Pick the lowest SD_NUMA domain, as that would have the smallest
* imbalance and would be the first to start moving tasks about.
*
* And we want to avoid any moving of tasks about, as that would create
* random movement of tasks -- counter the numa conditions we're trying
* to satisfy here.
*/
rcu_read_lock();
sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
if (sd)
env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
rcu_read_unlock();
/*
* Cpusets can break the scheduler domain tree into smaller
* balance domains, some of which do not cross NUMA boundaries.
* Tasks that are "trapped" in such domains cannot be migrated
* elsewhere, so there is no point in (re)trying.
*/
if (unlikely(!sd)) {
p->numa_preferred_nid = task_node(p);
return -EINVAL;
}
env.dst_nid = p->numa_preferred_nid;
dist = env.dist = node_distance(env.src_nid, env.dst_nid);
taskweight = task_weight(p, env.src_nid, dist);
groupweight = group_weight(p, env.src_nid, dist);
update_numa_stats(&env.src_stats, env.src_nid);
taskimp = task_weight(p, env.dst_nid, dist) - taskweight;
groupimp = group_weight(p, env.dst_nid, dist) - groupweight;
update_numa_stats(&env.dst_stats, env.dst_nid);
/* Try to find a spot on the preferred nid. */
if (numa_has_capacity(&env))
task_numa_find_cpu(&env, taskimp, groupimp);
/*
* Look at other nodes in these cases:
* - there is no space available on the preferred_nid
* - the task is part of a numa_group that is interleaved across
* multiple NUMA nodes; in order to better consolidate the group,
* we need to check other locations.
*/
if (env.best_cpu == -1 || (p->numa_group && p->numa_group->active_nodes > 1)) {
for_each_online_node(nid) {
if (nid == env.src_nid || nid == p->numa_preferred_nid)
continue;
dist = node_distance(env.src_nid, env.dst_nid);
if (sched_numa_topology_type == NUMA_BACKPLANE &&
dist != env.dist) {
taskweight = task_weight(p, env.src_nid, dist);
groupweight = group_weight(p, env.src_nid, dist);
}
/* Only consider nodes where both task and groups benefit */
taskimp = task_weight(p, nid, dist) - taskweight;
groupimp = group_weight(p, nid, dist) - groupweight;
if (taskimp < 0 && groupimp < 0)
continue;
env.dist = dist;
env.dst_nid = nid;
update_numa_stats(&env.dst_stats, env.dst_nid);
if (numa_has_capacity(&env))
task_numa_find_cpu(&env, taskimp, groupimp);
}
}
/*
* If the task is part of a workload that spans multiple NUMA nodes,
* and is migrating into one of the workload's active nodes, remember
* this node as the task's preferred numa node, so the workload can
* settle down.
* A task that migrated to a second choice node will be better off
* trying for a better one later. Do not set the preferred node here.
*/
if (p->numa_group) {
struct numa_group *ng = p->numa_group;
if (env.best_cpu == -1)
nid = env.src_nid;
else
nid = env.dst_nid;
if (ng->active_nodes > 1 && numa_is_active_node(env.dst_nid, ng))
sched_setnuma(p, env.dst_nid);
}
/* No better CPU than the current one was found. */
if (env.best_cpu == -1)
return -EAGAIN;
/*
* Reset the scan period if the task is being rescheduled on an
* alternative node to recheck if the tasks is now properly placed.
*/
p->numa_scan_period = task_scan_start(p);
if (env.best_task == NULL) {
ret = migrate_task_to(p, env.best_cpu);
if (ret != 0)
trace_sched_stick_numa(p, env.src_cpu, env.best_cpu);
return ret;
}
ret = migrate_swap(p, env.best_task);
if (ret != 0)
trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task));
put_task_struct(env.best_task);
return ret;
}
/* Attempt to migrate a task to a CPU on the preferred node. */
static void numa_migrate_preferred(struct task_struct *p)
{
unsigned long interval = HZ;
/* This task has no NUMA fault statistics yet */
if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults))
return;
/* Periodically retry migrating the task to the preferred node */
interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16);
p->numa_migrate_retry = jiffies + interval;
/* Success if task is already running on preferred CPU */
if (task_node(p) == p->numa_preferred_nid)
return;
/* Otherwise, try migrate to a CPU on the preferred node */
task_numa_migrate(p);
}
/*
* Find out how many nodes on the workload is actively running on. Do this by
* tracking the nodes from which NUMA hinting faults are triggered. This can
* be different from the set of nodes where the workload's memory is currently
* located.
*/
static void numa_group_count_active_nodes(struct numa_group *numa_group)
{
unsigned long faults, max_faults = 0;
int nid, active_nodes = 0;
for_each_online_node(nid) {
faults = group_faults_cpu(numa_group, nid);
if (faults > max_faults)
max_faults = faults;
}
for_each_online_node(nid) {
faults = group_faults_cpu(numa_group, nid);
if (faults * ACTIVE_NODE_FRACTION > max_faults)
active_nodes++;
}
numa_group->max_faults_cpu = max_faults;
numa_group->active_nodes = active_nodes;
}
/*
* When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
* increments. The more local the fault statistics are, the higher the scan
* period will be for the next scan window. If local/(local+remote) ratio is
* below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS)
* the scan period will decrease. Aim for 70% local accesses.
*/
#define NUMA_PERIOD_SLOTS 10
#define NUMA_PERIOD_THRESHOLD 7
/*
* Increase the scan period (slow down scanning) if the majority of
* our memory is already on our local node, or if the majority of
* the page accesses are shared with other processes.
* Otherwise, decrease the scan period.
*/
static void update_task_scan_period(struct task_struct *p,
unsigned long shared, unsigned long private)
{
unsigned int period_slot;
int lr_ratio, ps_ratio;
int diff;
unsigned long remote = p->numa_faults_locality[0];
unsigned long local = p->numa_faults_locality[1];
/*
* If there were no record hinting faults then either the task is
* completely idle or all activity is areas that are not of interest
* to automatic numa balancing. Related to that, if there were failed
* migration then it implies we are migrating too quickly or the local
* node is overloaded. In either case, scan slower
*/
if (local + shared == 0 || p->numa_faults_locality[2]) {
p->numa_scan_period = min(p->numa_scan_period_max,
p->numa_scan_period << 1);
p->mm->numa_next_scan = jiffies +
msecs_to_jiffies(p->numa_scan_period);
return;
}
/*
* Prepare to scale scan period relative to the current period.
* == NUMA_PERIOD_THRESHOLD scan period stays the same
* < NUMA_PERIOD_THRESHOLD scan period decreases (scan faster)
* >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower)
*/
period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS);
lr_ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote);
ps_ratio = (private * NUMA_PERIOD_SLOTS) / (private + shared);
if (ps_ratio >= NUMA_PERIOD_THRESHOLD) {
/*
* Most memory accesses are local. There is no need to
* do fast NUMA scanning, since memory is already local.
*/
int slot = ps_ratio - NUMA_PERIOD_THRESHOLD;
if (!slot)
slot = 1;
diff = slot * period_slot;
} else if (lr_ratio >= NUMA_PERIOD_THRESHOLD) {
/*
* Most memory accesses are shared with other tasks.
* There is no point in continuing fast NUMA scanning,
* since other tasks may just move the memory elsewhere.
*/
int slot = lr_ratio - NUMA_PERIOD_THRESHOLD;
if (!slot)
slot = 1;
diff = slot * period_slot;
} else {
/*
* Private memory faults exceed (SLOTS-THRESHOLD)/SLOTS,
* yet they are not on the local NUMA node. Speed up
* NUMA scanning to get the memory moved over.
*/
int ratio = max(lr_ratio, ps_ratio);
diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot;
}
p->numa_scan_period = clamp(p->numa_scan_period + diff,
task_scan_min(p), task_scan_max(p));
memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
}
/*
* Get the fraction of time the task has been running since the last
* NUMA placement cycle. The scheduler keeps similar statistics, but
* decays those on a 32ms period, which is orders of magnitude off
* from the dozens-of-seconds NUMA balancing period. Use the scheduler
* stats only if the task is so new there are no NUMA statistics yet.
*/
static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
{
u64 runtime, delta, now;
/* Use the start of this time slice to avoid calculations. */
now = p->se.exec_start;
runtime = p->se.sum_exec_runtime;
if (p->last_task_numa_placement) {
delta = runtime - p->last_sum_exec_runtime;
*period = now - p->last_task_numa_placement;
/* Avoid time going backwards, prevent potential divide error: */
if (unlikely((s64)*period < 0))
*period = 0;
} else {
delta = p->se.avg.load_sum;
*period = LOAD_AVG_MAX;
}
p->last_sum_exec_runtime = runtime;
p->last_task_numa_placement = now;
return delta;
}
/*
* Determine the preferred nid for a task in a numa_group. This needs to
* be done in a way that produces consistent results with group_weight,
* otherwise workloads might not converge.
*/
static int preferred_group_nid(struct task_struct *p, int nid)
{
nodemask_t nodes;
int dist;
/* Direct connections between all NUMA nodes. */
if (sched_numa_topology_type == NUMA_DIRECT)
return nid;
/*
* On a system with glueless mesh NUMA topology, group_weight
* scores nodes according to the number of NUMA hinting faults on
* both the node itself, and on nearby nodes.
*/
if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
unsigned long score, max_score = 0;
int node, max_node = nid;
dist = sched_max_numa_distance;
for_each_online_node(node) {
score = group_weight(p, node, dist);
if (score > max_score) {
max_score = score;
max_node = node;
}
}
return max_node;
}
/*
* Finding the preferred nid in a system with NUMA backplane
* interconnect topology is more involved. The goal is to locate
* tasks from numa_groups near each other in the system, and
* untangle workloads from different sides of the system. This requires
* searching down the hierarchy of node groups, recursively searching
* inside the highest scoring group of nodes. The nodemask tricks
* keep the complexity of the search down.
*/
nodes = node_online_map;
for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) {
unsigned long max_faults = 0;
nodemask_t max_group = NODE_MASK_NONE;
int a, b;
/* Are there nodes at this distance from each other? */
if (!find_numa_distance(dist))
continue;
for_each_node_mask(a, nodes) {
unsigned long faults = 0;
nodemask_t this_group;
nodes_clear(this_group);
/* Sum group's NUMA faults; includes a==b case. */
for_each_node_mask(b, nodes) {
if (node_distance(a, b) < dist) {
faults += group_faults(p, b);
node_set(b, this_group);
node_clear(b, nodes);
}
}
/* Remember the top group. */
if (faults > max_faults) {
max_faults = faults;
max_group = this_group;
/*
* subtle: at the smallest distance there is
* just one node left in each "group", the
* winner is the preferred nid.
*/
nid = a;
}
}
/* Next round, evaluate the nodes within max_group. */
if (!max_faults)
break;
nodes = max_group;
}
return nid;
}
static void task_numa_placement(struct task_struct *p)
{
int seq, nid, max_nid = -1, max_group_nid = -1;
unsigned long max_faults = 0, max_group_faults = 0;
unsigned long fault_types[2] = { 0, 0 };
unsigned long total_faults;
u64 runtime, period;
spinlock_t *group_lock = NULL;
/*
* The p->mm->numa_scan_seq field gets updated without
* exclusive access. Use READ_ONCE() here to ensure
* that the field is read in a single access:
*/
seq = READ_ONCE(p->mm->numa_scan_seq);
if (p->numa_scan_seq == seq)
return;
p->numa_scan_seq = seq;
p->numa_scan_period_max = task_scan_max(p);
total_faults = p->numa_faults_locality[0] +
p->numa_faults_locality[1];
runtime = numa_get_avg_runtime(p, &period);
/* If the task is part of a group prevent parallel updates to group stats */
if (p->numa_group) {
group_lock = &p->numa_group->lock;
spin_lock_irq(group_lock);
}
/* Find the node with the highest number of faults */
for_each_online_node(nid) {
/* Keep track of the offsets in numa_faults array */
int mem_idx, membuf_idx, cpu_idx, cpubuf_idx;
unsigned long faults = 0, group_faults = 0;
int priv;
for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) {
long diff, f_diff, f_weight;
mem_idx = task_faults_idx(NUMA_MEM, nid, priv);
membuf_idx = task_faults_idx(NUMA_MEMBUF, nid, priv);
cpu_idx = task_faults_idx(NUMA_CPU, nid, priv);
cpubuf_idx = task_faults_idx(NUMA_CPUBUF, nid, priv);
/* Decay existing window, copy faults since last scan */
diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2;
fault_types[priv] += p->numa_faults[membuf_idx];
p->numa_faults[membuf_idx] = 0;
/*
* Normalize the faults_from, so all tasks in a group
* count according to CPU use, instead of by the raw
* number of faults. Tasks with little runtime have
* little over-all impact on throughput, and thus their
* faults are less important.
*/
f_weight = div64_u64(runtime << 16, period + 1);
f_weight = (f_weight * p->numa_faults[cpubuf_idx]) /
(total_faults + 1);
f_diff = f_weight - p->numa_faults[cpu_idx] / 2;
p->numa_faults[cpubuf_idx] = 0;
p->numa_faults[mem_idx] += diff;
p->numa_faults[cpu_idx] += f_diff;
faults += p->numa_faults[mem_idx];
p->total_numa_faults += diff;
if (p->numa_group) {
/*
* safe because we can only change our own group
*
* mem_idx represents the offset for a given
* nid and priv in a specific region because it
* is at the beginning of the numa_faults array.
*/
p->numa_group->faults[mem_idx] += diff;
p->numa_group->faults_cpu[mem_idx] += f_diff;
p->numa_group->total_faults += diff;
group_faults += p->numa_group->faults[mem_idx];
}
}
if (faults > max_faults) {
max_faults = faults;
max_nid = nid;
}
if (group_faults > max_group_faults) {
max_group_faults = group_faults;
max_group_nid = nid;
}
}
update_task_scan_period(p, fault_types[0], fault_types[1]);
if (p->numa_group) {
numa_group_count_active_nodes(p->numa_group);
spin_unlock_irq(group_lock);
max_nid = preferred_group_nid(p, max_group_nid);
}
if (max_faults) {
/* Set the new preferred node */
if (max_nid != p->numa_preferred_nid)
sched_setnuma(p, max_nid);
if (task_node(p) != p->numa_preferred_nid)
numa_migrate_preferred(p);
}
}
static inline int get_numa_group(struct numa_group *grp)
{
return atomic_inc_not_zero(&grp->refcount);
}
static inline void put_numa_group(struct numa_group *grp)
{
if (atomic_dec_and_test(&grp->refcount))
kfree_rcu(grp, rcu);
}
static void task_numa_group(struct task_struct *p, int cpupid, int flags,
int *priv)
{
struct numa_group *grp, *my_grp;
struct task_struct *tsk;
bool join = false;
int cpu = cpupid_to_cpu(cpupid);
int i;
if (unlikely(!p->numa_group)) {
unsigned int size = sizeof(struct numa_group) +
4*nr_node_ids*sizeof(unsigned long);
grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
if (!grp)
return;
atomic_set(&grp->refcount, 1);
grp->active_nodes = 1;
grp->max_faults_cpu = 0;
spin_lock_init(&grp->lock);
grp->gid = p->pid;
/* Second half of the array tracks nids where faults happen */
grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES *
nr_node_ids;
for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
grp->faults[i] = p->numa_faults[i];
grp->total_faults = p->total_numa_faults;
grp->nr_tasks++;
rcu_assign_pointer(p->numa_group, grp);
}
rcu_read_lock();
tsk = READ_ONCE(cpu_rq(cpu)->curr);
if (!cpupid_match_pid(tsk, cpupid))
goto no_join;
grp = rcu_dereference(tsk->numa_group);
if (!grp)
goto no_join;
my_grp = p->numa_group;
if (grp == my_grp)
goto no_join;
/*
* Only join the other group if its bigger; if we're the bigger group,
* the other task will join us.
*/
if (my_grp->nr_tasks > grp->nr_tasks)
goto no_join;
/*
* Tie-break on the grp address.
*/
if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
goto no_join;
/* Always join threads in the same process. */
if (tsk->mm == current->mm)
join = true;
/* Simple filter to avoid false positives due to PID collisions */
if (flags & TNF_SHARED)
join = true;
/* Update priv based on whether false sharing was detected */
*priv = !join;
if (join && !get_numa_group(grp))
goto no_join;
rcu_read_unlock();
if (!join)
return;
BUG_ON(irqs_disabled());
double_lock_irq(&my_grp->lock, &grp->lock);
for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
my_grp->faults[i] -= p->numa_faults[i];
grp->faults[i] += p->numa_faults[i];
}
my_grp->total_faults -= p->total_numa_faults;
grp->total_faults += p->total_numa_faults;
my_grp->nr_tasks--;
grp->nr_tasks++;
spin_unlock(&my_grp->lock);
spin_unlock_irq(&grp->lock);
rcu_assign_pointer(p->numa_group, grp);
put_numa_group(my_grp);
return;
no_join:
rcu_read_unlock();
return;
}
/*
* Get rid of NUMA staticstics associated with a task (either current or dead).
* If @final is set, the task is dead and has reached refcount zero, so we can
* safely free all relevant data structures. Otherwise, there might be
* concurrent reads from places like load balancing and procfs, and we should
* reset the data back to default state without freeing ->numa_faults.
*/
void task_numa_free(struct task_struct *p, bool final)
{
struct numa_group *grp = p->numa_group;
unsigned long *numa_faults = p->numa_faults;
unsigned long flags;
int i;
if (!numa_faults)
return;
if (grp) {
spin_lock_irqsave(&grp->lock, flags);
for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
grp->faults[i] -= p->numa_faults[i];
grp->total_faults -= p->total_numa_faults;
grp->nr_tasks--;
spin_unlock_irqrestore(&grp->lock, flags);
RCU_INIT_POINTER(p->numa_group, NULL);
put_numa_group(grp);
}
if (final) {
p->numa_faults = NULL;
kfree(numa_faults);
} else {
p->total_numa_faults = 0;
for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
numa_faults[i] = 0;
}
}
/*
* Got a PROT_NONE fault for a page on @node.
*/
void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
{
struct task_struct *p = current;
bool migrated = flags & TNF_MIGRATED;
int cpu_node = task_node(current);
int local = !!(flags & TNF_FAULT_LOCAL);
struct numa_group *ng;
int priv;
if (!IS_ENABLED(CONFIG_NUMA_BALANCING) ||
!static_branch_likely(&sched_numa_balancing))
return;
/* for example, ksmd faulting in a user's mm */
if (!p->mm)
return;
/* Allocate buffer to track faults on a per-node basis */
if (unlikely(!p->numa_faults)) {
int size = sizeof(*p->numa_faults) *
NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids;
p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN);
if (!p->numa_faults)
return;
p->total_numa_faults = 0;
memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
}
/*
* First accesses are treated as private, otherwise consider accesses
* to be private if the accessing pid has not changed
*/
if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) {
priv = 1;
} else {
priv = cpupid_match_pid(p, last_cpupid);
if (!priv && !(flags & TNF_NO_GROUP))
task_numa_group(p, last_cpupid, flags, &priv);
}
/*
* If a workload spans multiple NUMA nodes, a shared fault that
* occurs wholly within the set of nodes that the workload is
* actively using should be counted as local. This allows the
* scan rate to slow down when a workload has settled down.
*/
ng = p->numa_group;
if (!priv && !local && ng && ng->active_nodes > 1 &&
numa_is_active_node(cpu_node, ng) &&
numa_is_active_node(mem_node, ng))
local = 1;
task_numa_placement(p);
/*
* Retry task to preferred node migration periodically, in case it
* case it previously failed, or the scheduler moved us.
*/
if (time_after(jiffies, p->numa_migrate_retry))
numa_migrate_preferred(p);
if (migrated)
p->numa_pages_migrated += pages;
if (flags & TNF_MIGRATE_FAIL)
p->numa_faults_locality[2] += pages;
p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages;
p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages;
p->numa_faults_locality[local] += pages;
}
static void reset_ptenuma_scan(struct task_struct *p)
{
/*
* We only did a read acquisition of the mmap sem, so
* p->mm->numa_scan_seq is written to without exclusive access
* and the update is not guaranteed to be atomic. That's not
* much of an issue though, since this is just used for
* statistical sampling. Use READ_ONCE/WRITE_ONCE, which are not
* expensive, to avoid any form of compiler optimizations:
*/
WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1);
p->mm->numa_scan_offset = 0;
}
/*
* The expensive part of numa migration is done from task_work context.
* Triggered from task_tick_numa().
*/
void task_numa_work(struct callback_head *work)
{
unsigned long migrate, next_scan, now = jiffies;
struct task_struct *p = current;
struct mm_struct *mm = p->mm;
u64 runtime = p->se.sum_exec_runtime;
struct vm_area_struct *vma;
unsigned long start, end;
unsigned long nr_pte_updates = 0;
long pages, virtpages;
SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work));
work->next = work; /* protect against double add */
/*
* Who cares about NUMA placement when they're dying.
*
* NOTE: make sure not to dereference p->mm before this check,
* exit_task_work() happens _after_ exit_mm() so we could be called
* without p->mm even though we still had it when we enqueued this
* work.
*/
if (p->flags & PF_EXITING)
return;
if (!mm->numa_next_scan) {
mm->numa_next_scan = now +
msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
}
/*
* Enforce maximal scan/migration frequency..
*/
migrate = mm->numa_next_scan;
if (time_before(now, migrate))
return;
if (p->numa_scan_period == 0) {
p->numa_scan_period_max = task_scan_max(p);
p->numa_scan_period = task_scan_start(p);
}
next_scan = now + msecs_to_jiffies(p->numa_scan_period);
if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
return;
/*
* Delay this task enough that another task of this mm will likely win
* the next time around.
*/
p->node_stamp += 2 * TICK_NSEC;
start = mm->numa_scan_offset;
pages = sysctl_numa_balancing_scan_size;
pages <<= 20 - PAGE_SHIFT; /* MB in pages */
virtpages = pages * 8; /* Scan up to this much virtual space */
if (!pages)
return;
if (!down_read_trylock(&mm->mmap_sem))
return;
vma = find_vma(mm, start);
if (!vma) {
reset_ptenuma_scan(p);
start = 0;
vma = mm->mmap;
}
for (; vma; vma = vma->vm_next) {
if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
continue;
}
/*
* Shared library pages mapped by multiple processes are not
* migrated as it is expected they are cache replicated. Avoid
* hinting faults in read-only file-backed mappings or the vdso
* as migrating the pages will be of marginal benefit.
*/
if (!vma->vm_mm ||
(vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
continue;
/*
* Skip inaccessible VMAs to avoid any confusion between
* PROT_NONE and NUMA hinting ptes
*/
if (!vma_is_accessible(vma))
continue;
do {
start = max(start, vma->vm_start);
end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
end = min(end, vma->vm_end);
nr_pte_updates = change_prot_numa(vma, start, end);
/*
* Try to scan sysctl_numa_balancing_size worth of
* hpages that have at least one present PTE that
* is not already pte-numa. If the VMA contains
* areas that are unused or already full of prot_numa
* PTEs, scan up to virtpages, to skip through those
* areas faster.
*/
if (nr_pte_updates)
pages -= (end - start) >> PAGE_SHIFT;
virtpages -= (end - start) >> PAGE_SHIFT;
start = end;
if (pages <= 0 || virtpages <= 0)
goto out;
cond_resched();
} while (end != vma->vm_end);
}
out:
/*
* It is possible to reach the end of the VMA list but the last few
* VMAs are not guaranteed to the vma_migratable. If they are not, we
* would find the !migratable VMA on the next scan but not reset the
* scanner to the start so check it now.
*/
if (vma)
mm->numa_scan_offset = start;
else
reset_ptenuma_scan(p);
up_read(&mm->mmap_sem);
/*
* Make sure tasks use at least 32x as much time to run other code
* than they used here, to limit NUMA PTE scanning overhead to 3% max.
* Usually update_task_scan_period slows down scanning enough; on an
* overloaded system we need to limit overhead on a per task basis.
*/
if (unlikely(p->se.sum_exec_runtime != runtime)) {
u64 diff = p->se.sum_exec_runtime - runtime;
p->node_stamp += 32 * diff;
}
}
/*
* Drive the periodic memory faults..
*/
void task_tick_numa(struct rq *rq, struct task_struct *curr)
{
struct callback_head *work = &curr->numa_work;
u64 period, now;
/*
* We don't care about NUMA placement if we don't have memory.
*/
if ((curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work)
return;
/*
* Using runtime rather than walltime has the dual advantage that
* we (mostly) drive the selection from busy threads and that the
* task needs to have done some actual work before we bother with
* NUMA placement.
*/
now = curr->se.sum_exec_runtime;
period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
if (now > curr->node_stamp + period) {
if (!curr->node_stamp)
curr->numa_scan_period = task_scan_start(curr);
curr->node_stamp += period;
if (!time_before(jiffies, curr->mm->numa_next_scan)) {
init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
task_work_add(curr, work, true);
}
}
}
#else
static void task_tick_numa(struct rq *rq, struct task_struct *curr)
{
}
static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
{
}
static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
{
}
#endif /* CONFIG_NUMA_BALANCING */
static void
account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
update_load_add(&cfs_rq->load, se->load.weight);
if (!parent_entity(se))
update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
#ifdef CONFIG_SMP
if (entity_is_task(se)) {
struct rq *rq = rq_of(cfs_rq);
account_numa_enqueue(rq, task_of(se));
list_add(&se->group_node, &rq->cfs_tasks);
}
#endif
cfs_rq->nr_running++;
}
static void
account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
update_load_sub(&cfs_rq->load, se->load.weight);
if (!parent_entity(se))
update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
#ifdef CONFIG_SMP
if (entity_is_task(se)) {
account_numa_dequeue(rq_of(cfs_rq), task_of(se));
list_del_init(&se->group_node);
}
#endif
cfs_rq->nr_running--;
}
/*
* Signed add and clamp on underflow.
*
* Explicitly do a load-store to ensure the intermediate value never hits
* memory. This allows lockless observations without ever seeing the negative
* values.
*/
#define add_positive(_ptr, _val) do { \
typeof(_ptr) ptr = (_ptr); \
typeof(_val) val = (_val); \
typeof(*ptr) res, var = READ_ONCE(*ptr); \
\
res = var + val; \
\
if (val < 0 && res > var) \
res = 0; \
\
WRITE_ONCE(*ptr, res); \
} while (0)
/*
* Unsigned subtract and clamp on underflow.
*
* Explicitly do a load-store to ensure the intermediate value never hits
* memory. This allows lockless observations without ever seeing the negative
* values.
*/
#define sub_positive(_ptr, _val) do { \
typeof(_ptr) ptr = (_ptr); \
typeof(*ptr) val = (_val); \
typeof(*ptr) res, var = READ_ONCE(*ptr); \
res = var - val; \
if (res > var) \
res = 0; \
WRITE_ONCE(*ptr, res); \
} while (0)
#ifdef CONFIG_SMP
static inline void
enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
cfs_rq->runnable_weight += se->runnable_weight;
cfs_rq->avg.runnable_load_avg += se->avg.runnable_load_avg;
cfs_rq->avg.runnable_load_sum += se_runnable(se) * se->avg.runnable_load_sum;
}
static inline void
dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
cfs_rq->runnable_weight -= se->runnable_weight;
sub_positive(&cfs_rq->avg.runnable_load_avg, se->avg.runnable_load_avg);
sub_positive(&cfs_rq->avg.runnable_load_sum,
se_runnable(se) * se->avg.runnable_load_sum);
}
static inline void
enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
cfs_rq->avg.load_avg += se->avg.load_avg;
cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum;
}
static inline void
dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
}
#else
static inline void
enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
static inline void
dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
static inline void
enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
static inline void
dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
#endif
static void reweight_eevdf(struct sched_entity *se, u64 avruntime,
unsigned long weight)
{
unsigned long old_weight = se->load.weight;
s64 vlag, vslice;
/*
* VRUNTIME
* ========
*
* COROLLARY #1: The virtual runtime of the entity needs to be
* adjusted if re-weight at !0-lag point.
*
* Proof: For contradiction assume this is not true, so we can
* re-weight without changing vruntime at !0-lag point.
*
* Weight VRuntime Avg-VRuntime
* before w v V
* after w' v' V'
*
* Since lag needs to be preserved through re-weight:
*
* lag = (V - v)*w = (V'- v')*w', where v = v'
* ==> V' = (V - v)*w/w' + v (1)
*
* Let W be the total weight of the entities before reweight,
* since V' is the new weighted average of entities:
*
* V' = (WV + w'v - wv) / (W + w' - w) (2)
*
* by using (1) & (2) we obtain:
*
* (WV + w'v - wv) / (W + w' - w) = (V - v)*w/w' + v
* ==> (WV-Wv+Wv+w'v-wv)/(W+w'-w) = (V - v)*w/w' + v
* ==> (WV - Wv)/(W + w' - w) + v = (V - v)*w/w' + v
* ==> (V - v)*W/(W + w' - w) = (V - v)*w/w' (3)
*
* Since we are doing at !0-lag point which means V != v, we
* can simplify (3):
*
* ==> W / (W + w' - w) = w / w'
* ==> Ww' = Ww + ww' - ww
* ==> W * (w' - w) = w * (w' - w)
* ==> W = w (re-weight indicates w' != w)
*
* So the cfs_rq contains only one entity, hence vruntime of
* the entity @v should always equal to the cfs_rq's weighted
* average vruntime @V, which means we will always re-weight
* at 0-lag point, thus breach assumption. Proof completed.
*
*
* COROLLARY #2: Re-weight does NOT affect weighted average
* vruntime of all the entities.
*
* Proof: According to corollary #1, Eq. (1) should be:
*
* (V - v)*w = (V' - v')*w'
* ==> v' = V' - (V - v)*w/w' (4)
*
* According to the weighted average formula, we have:
*
* V' = (WV - wv + w'v') / (W - w + w')
* = (WV - wv + w'(V' - (V - v)w/w')) / (W - w + w')
* = (WV - wv + w'V' - Vw + wv) / (W - w + w')
* = (WV + w'V' - Vw) / (W - w + w')
*
* ==> V'*(W - w + w') = WV + w'V' - Vw
* ==> V' * (W - w) = (W - w) * V (5)
*
* If the entity is the only one in the cfs_rq, then reweight
* always occurs at 0-lag point, so V won't change. Or else
* there are other entities, hence W != w, then Eq. (5) turns
* into V' = V. So V won't change in either case, proof done.
*
*
* So according to corollary #1 & #2, the effect of re-weight
* on vruntime should be:
*
* v' = V' - (V - v) * w / w' (4)
* = V - (V - v) * w / w'
* = V - vl * w / w'
* = V - vl'
*/
if (avruntime != se->vruntime) {
vlag = entity_lag(avruntime, se);
vlag = div_s64(vlag * old_weight, weight);
se->vruntime = avruntime - vlag;
}
/*
* DEADLINE
* ========
*
* When the weight changes, the virtual time slope changes and
* we should adjust the relative virtual deadline accordingly.
*
* d' = v' + (d - v)*w/w'
* = V' - (V - v)*w/w' + (d - v)*w/w'
* = V - (V - v)*w/w' + (d - v)*w/w'
* = V + (d - V)*w/w'
*/
vslice = (s64)(se->deadline - avruntime);
vslice = div_s64(vslice * old_weight, weight);
se->deadline = avruntime + vslice;
}
static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
unsigned long weight, unsigned long runnable)
{
bool curr = cfs_rq->curr == se;
u64 avruntime;
if (se->on_rq) {
/* commit outstanding execution time */
update_curr(cfs_rq);
avruntime = avg_vruntime(cfs_rq);
if (!curr)
__dequeue_entity(cfs_rq, se);
update_load_sub(&cfs_rq->load, se->load.weight);
dequeue_runnable_load_avg(cfs_rq, se);
}
dequeue_load_avg(cfs_rq, se);
se->runnable_weight = runnable;
if (se->on_rq) {
reweight_eevdf(se, avruntime, weight);
} else {
/*
* Because we keep se->vlag = V - v_i, while: lag_i = w_i*(V - v_i),
* we need to scale se->vlag when w_i changes.
*/
se->vlag = div_s64(se->vlag * se->load.weight, weight);
}
update_load_set(&se->load, weight);
#ifdef CONFIG_SMP
do {
u32 divider = get_pelt_divider(&se->avg);
se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider);
se->avg.runnable_load_avg =
div_u64(se_runnable(se) * se->avg.runnable_load_sum, divider);
} while (0);
#endif
enqueue_load_avg(cfs_rq, se);
if (se->on_rq) {
update_load_add(&cfs_rq->load, se->load.weight);
enqueue_runnable_load_avg(cfs_rq, se);
if (!curr)
__enqueue_entity(cfs_rq, se);
/*
* The entity's vruntime has been adjusted, so let's check
* whether the rq-wide min_vruntime needs updated too. Since
* the calculations above require stable min_vruntime rather
* than up-to-date one, we do the update at the end of the
* reweight process.
*/
update_min_vruntime(cfs_rq);
}
}
void reweight_task(struct task_struct *p, int prio)
{
struct sched_entity *se = &p->se;
struct cfs_rq *cfs_rq = cfs_rq_of(se);
struct load_weight *load = &se->load;
unsigned long weight = scale_load(sched_prio_to_weight[prio]);
reweight_entity(cfs_rq, se, weight, weight);
load->inv_weight = sched_prio_to_wmult[prio];
}
#ifdef CONFIG_FAIR_GROUP_SCHED
# ifdef CONFIG_SMP
/*
* All this does is approximate the hierarchical proportion which includes that
* global sum we all love to hate.
*
* That is, the weight of a group entity, is the proportional share of the
* group weight based on the group runqueue weights. That is:
*
* tg->weight * grq->load.weight
* ge->load.weight = ----------------------------- (1)
* \Sum grq->load.weight
*
* Now, because computing that sum is prohibitively expensive to compute (been
* there, done that) we approximate it with this average stuff. The average
* moves slower and therefore the approximation is cheaper and more stable.
*
* So instead of the above, we substitute:
*
* grq->load.weight -> grq->avg.load_avg (2)
*
* which yields the following:
*
* tg->weight * grq->avg.load_avg
* ge->load.weight = ------------------------------ (3)
* tg->load_avg
*
* Where: tg->load_avg ~= \Sum grq->avg.load_avg
*
* That is shares_avg, and it is right (given the approximation (2)).
*
* The problem with it is that because the average is slow -- it was designed
* to be exactly that of course -- this leads to transients in boundary
* conditions. In specific, the case where the group was idle and we start the
* one task. It takes time for our CPU's grq->avg.load_avg to build up,
* yielding bad latency etc..
*
* Now, in that special case (1) reduces to:
*
* tg->weight * grq->load.weight
* ge->load.weight = ----------------------------- = tg->weight (4)
* grp->load.weight
*
* That is, the sum collapses because all other CPUs are idle; the UP scenario.
*
* So what we do is modify our approximation (3) to approach (4) in the (near)
* UP case, like:
*
* ge->load.weight =
*
* tg->weight * grq->load.weight
* --------------------------------------------------- (5)
* tg->load_avg - grq->avg.load_avg + grq->load.weight
*
* But because grq->load.weight can drop to 0, resulting in a divide by zero,
* we need to use grq->avg.load_avg as its lower bound, which then gives:
*
*
* tg->weight * grq->load.weight
* ge->load.weight = ----------------------------- (6)
* tg_load_avg'
*
* Where:
*
* tg_load_avg' = tg->load_avg - grq->avg.load_avg +
* max(grq->load.weight, grq->avg.load_avg)
*
* And that is shares_weight and is icky. In the (near) UP case it approaches
* (4) while in the normal case it approaches (3). It consistently
* overestimates the ge->load.weight and therefore:
*
* \Sum ge->load.weight >= tg->weight
*
* hence icky!
*/
static long calc_group_shares(struct cfs_rq *cfs_rq)
{
long tg_weight, tg_shares, load, shares;
struct task_group *tg = cfs_rq->tg;
tg_shares = READ_ONCE(tg->shares);
load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg);
tg_weight = atomic_long_read(&tg->load_avg);
/* Ensure tg_weight >= load */
tg_weight -= cfs_rq->tg_load_avg_contrib;
tg_weight += load;
shares = (tg_shares * load);
if (tg_weight)
shares /= tg_weight;
/*
* MIN_SHARES has to be unscaled here to support per-CPU partitioning
* of a group with small tg->shares value. It is a floor value which is
* assigned as a minimum load.weight to the sched_entity representing
* the group on a CPU.
*
* E.g. on 64-bit for a group with tg->shares of scale_load(15)=15*1024
* on an 8-core system with 8 tasks each runnable on one CPU shares has
* to be 15*1024*1/8=1920 instead of scale_load(MIN_SHARES)=2*1024. In
* case no task is runnable on a CPU MIN_SHARES=2 should be returned
* instead of 0.
*/
return clamp_t(long, shares, MIN_SHARES, tg_shares);
}
/*
* This calculates the effective runnable weight for a group entity based on
* the group entity weight calculated above.
*
* Because of the above approximation (2), our group entity weight is
* an load_avg based ratio (3). This means that it includes blocked load and
* does not represent the runnable weight.
*
* Approximate the group entity's runnable weight per ratio from the group
* runqueue:
*
* grq->avg.runnable_load_avg
* ge->runnable_weight = ge->load.weight * -------------------------- (7)
* grq->avg.load_avg
*
* However, analogous to above, since the avg numbers are slow, this leads to
* transients in the from-idle case. Instead we use:
*
* ge->runnable_weight = ge->load.weight *
*
* max(grq->avg.runnable_load_avg, grq->runnable_weight)
* ----------------------------------------------------- (8)
* max(grq->avg.load_avg, grq->load.weight)
*
* Where these max() serve both to use the 'instant' values to fix the slow
* from-idle and avoid the /0 on to-idle, similar to (6).
*/
static long calc_group_runnable(struct cfs_rq *cfs_rq, long shares)
{
long runnable, load_avg;
load_avg = max(cfs_rq->avg.load_avg,
scale_load_down(cfs_rq->load.weight));
runnable = max(cfs_rq->avg.runnable_load_avg,
scale_load_down(cfs_rq->runnable_weight));
runnable *= shares;
if (load_avg)
runnable /= load_avg;
return clamp_t(long, runnable, MIN_SHARES, shares);
}
# endif /* CONFIG_SMP */
static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
/*
* Recomputes the group entity based on the current state of its group
* runqueue.
*/
static void update_cfs_group(struct sched_entity *se)
{
struct cfs_rq *gcfs_rq = group_cfs_rq(se);
long shares, runnable;
if (!gcfs_rq)
return;
if (throttled_hierarchy(gcfs_rq))
return;
#ifndef CONFIG_SMP
runnable = shares = READ_ONCE(gcfs_rq->tg->shares);
#else
shares = calc_group_shares(gcfs_rq);
runnable = calc_group_runnable(gcfs_rq, shares);
#endif
if (unlikely(se->load.weight != shares))
reweight_entity(cfs_rq_of(se), se, shares, runnable);
}
#else /* CONFIG_FAIR_GROUP_SCHED */
static inline void update_cfs_group(struct sched_entity *se)
{
}
#endif /* CONFIG_FAIR_GROUP_SCHED */
static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
{
struct rq *rq = rq_of(cfs_rq);
if (&rq->cfs == cfs_rq) {
/*
* There are a few boundary cases this might miss but it should
* get called often enough that that should (hopefully) not be
* a real problem -- added to that it only calls on the local
* CPU, so if we enqueue remotely we'll miss an update, but
* the next tick/schedule should update.
*
* It will not get called when we go idle, because the idle
* thread is a different class (!fair), nor will the utilization
* number include things like RT tasks.
*
* As is, the util number is not freq-invariant (we'd have to
* implement arch_scale_freq_capacity() for that).
*
* See cpu_util().
*/
cpufreq_update_util(rq, 0);
}
}
#ifdef CONFIG_SMP
#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
{
return u64_u32_load_copy(cfs_rq->avg.last_update_time,
cfs_rq->last_update_time_copy);
}
#ifdef CONFIG_FAIR_GROUP_SCHED
/**
* update_tg_load_avg - update the tg's load avg
* @cfs_rq: the cfs_rq whose avg changed
* @force: update regardless of how small the difference
*
* This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load.
* However, because tg->load_avg is a global value there are performance
* considerations.
*
* In order to avoid having to look at the other cfs_rq's, we use a
* differential update where we store the last value we propagated. This in
* turn allows skipping updates if the differential is 'small'.
*
* Updating tg's load_avg is necessary before update_cfs_share().
*/
static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
{
long delta;
u64 now;
/*
* No need to update load_avg for root_task_group as it is not used.
*/
if (cfs_rq->tg == &root_task_group)
return;
/*
* For migration heavy workloads, access to tg->load_avg can be
* unbound. Limit the update rate to at most once per ms.
*/
now = sched_clock_cpu(cpu_of(rq_of(cfs_rq)));
if (now - cfs_rq->last_update_tg_load_avg < NSEC_PER_MSEC)
return;
delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
if (abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
atomic_long_add(delta, &cfs_rq->tg->load_avg);
cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg;
cfs_rq->last_update_tg_load_avg = now;
}
trace_sched_load_tg(cfs_rq);
}
/*
* Called within set_task_rq() right before setting a task's cpu. The
* caller only guarantees p->pi_lock is held; no other assumptions,
* including the state of rq->lock, should be made.
*/
void set_task_rq_fair(struct sched_entity *se,
struct cfs_rq *prev, struct cfs_rq *next)
{
u64 p_last_update_time;
u64 n_last_update_time;
if (!sched_feat(ATTACH_AGE_LOAD))
return;
/*
* We are supposed to update the task to "current" time, then its up to
* date and ready to go to new CPU/cfs_rq. But we have difficulty in
* getting what current time is, so simply throw away the out-of-date
* time. This will result in the wakee task is less decayed, but giving
* the wakee more load sounds not bad.
*/
if (!(se->avg.last_update_time && prev))
return;
p_last_update_time = cfs_rq_last_update_time(prev);
n_last_update_time = cfs_rq_last_update_time(next);
__update_load_avg_blocked_se(p_last_update_time, se);
se->avg.last_update_time = n_last_update_time;
}
/*
* When on migration a sched_entity joins/leaves the PELT hierarchy, we need to
* propagate its contribution. The key to this propagation is the invariant
* that for each group:
*
* ge->avg == grq->avg (1)
*
* _IFF_ we look at the pure running and runnable sums. Because they
* represent the very same entity, just at different points in the hierarchy.
*
* Per the above update_tg_cfs_util() is trivial and simply copies the running
* sum over (but still wrong, because the group entity and group rq do not have
* their PELT windows aligned).
*
* However, update_tg_cfs_runnable() is more complex. So we have:
*
* ge->avg.load_avg = ge->load.weight * ge->avg.runnable_avg (2)
*
* And since, like util, the runnable part should be directly transferable,
* the following would _appear_ to be the straight forward approach:
*
* grq->avg.load_avg = grq->load.weight * grq->avg.runnable_avg (3)
*
* And per (1) we have:
*
* ge->avg.runnable_avg == grq->avg.runnable_avg
*
* Which gives:
*
* ge->load.weight * grq->avg.load_avg
* ge->avg.load_avg = ----------------------------------- (4)
* grq->load.weight
*
* Except that is wrong!
*
* Because while for entities historical weight is not important and we
* really only care about our future and therefore can consider a pure
* runnable sum, runqueues can NOT do this.
*
* We specifically want runqueues to have a load_avg that includes
* historical weights. Those represent the blocked load, the load we expect
* to (shortly) return to us. This only works by keeping the weights as
* integral part of the sum. We therefore cannot decompose as per (3).
*
* Another reason this doesn't work is that runnable isn't a 0-sum entity.
* Imagine a rq with 2 tasks that each are runnable 2/3 of the time. Then the
* rq itself is runnable anywhere between 2/3 and 1 depending on how the
* runnable section of these tasks overlap (or not). If they were to perfectly
* align the rq as a whole would be runnable 2/3 of the time. If however we
* always have at least 1 runnable task, the rq as a whole is always runnable.
*
* So we'll have to approximate.. :/
*
* Given the constraint:
*
* ge->avg.running_sum <= ge->avg.runnable_sum <= LOAD_AVG_MAX
*
* We can construct a rule that adds runnable to a rq by assuming minimal
* overlap.
*
* On removal, we'll assume each task is equally runnable; which yields:
*
* grq->avg.runnable_sum = grq->avg.load_sum / grq->load.weight
*
* XXX: only do this for the part of runnable > running ?
*
*/
static inline void
update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
{
long delta = gcfs_rq->avg.util_avg - se->avg.util_avg;
/* Nothing to update */
if (!delta)
return;
/*
* The relation between sum and avg is:
*
* LOAD_AVG_MAX - 1024 + sa->period_contrib
*
* however, the PELT windows are not aligned between grq and gse.
*/
/* Set new sched_entity's utilization */
se->avg.util_avg = gcfs_rq->avg.util_avg;
se->avg.util_sum = se->avg.util_avg * LOAD_AVG_MAX;
/* Update parent cfs_rq utilization */
add_positive(&cfs_rq->avg.util_avg, delta);
cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * LOAD_AVG_MAX;
}
static inline void
update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
{
long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
unsigned long runnable_load_avg, load_avg;
u64 runnable_load_sum, load_sum = 0;
s64 delta_sum;
if (!runnable_sum)
return;
gcfs_rq->prop_runnable_sum = 0;
if (runnable_sum >= 0) {
/*
* Add runnable; clip at LOAD_AVG_MAX. Reflects that until
* the CPU is saturated running == runnable.
*/
runnable_sum += se->avg.load_sum;
runnable_sum = min(runnable_sum, (long)LOAD_AVG_MAX);
} else {
/*
* Estimate the new unweighted runnable_sum of the gcfs_rq by
* assuming all tasks are equally runnable.
*/
if (scale_load_down(gcfs_rq->load.weight)) {
load_sum = div_s64(gcfs_rq->avg.load_sum,
scale_load_down(gcfs_rq->load.weight));
}
/* But make sure to not inflate se's runnable */
runnable_sum = min(se->avg.load_sum, load_sum);
}
/*
* runnable_sum can't be lower than running_sum
* Rescale running sum to be in the same range as runnable sum
* running_sum is in [0 : LOAD_AVG_MAX << SCHED_CAPACITY_SHIFT]
* runnable_sum is in [0 : LOAD_AVG_MAX]
*/
running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT;
runnable_sum = max(runnable_sum, running_sum);
load_sum = (s64)se_weight(se) * runnable_sum;
load_avg = div_s64(load_sum, LOAD_AVG_MAX);
delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
delta_avg = load_avg - se->avg.load_avg;
se->avg.load_sum = runnable_sum;
se->avg.load_avg = load_avg;
add_positive(&cfs_rq->avg.load_avg, delta_avg);
add_positive(&cfs_rq->avg.load_sum, delta_sum);
runnable_load_sum = (s64)se_runnable(se) * runnable_sum;
runnable_load_avg = div_s64(runnable_load_sum, LOAD_AVG_MAX);
delta_sum = runnable_load_sum - se_weight(se) * se->avg.runnable_load_sum;
delta_avg = runnable_load_avg - se->avg.runnable_load_avg;
se->avg.runnable_load_sum = runnable_sum;
se->avg.runnable_load_avg = runnable_load_avg;
if (se->on_rq) {
add_positive(&cfs_rq->avg.runnable_load_avg, delta_avg);
add_positive(&cfs_rq->avg.runnable_load_sum, delta_sum);
}
}
static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)
{
cfs_rq->propagate = 1;
cfs_rq->prop_runnable_sum += runnable_sum;
}
/* Update task and its cfs_rq load average */
static inline int propagate_entity_load_avg(struct sched_entity *se)
{
struct cfs_rq *cfs_rq, *gcfs_rq;
if (entity_is_task(se))
return 0;
gcfs_rq = group_cfs_rq(se);
if (!gcfs_rq->propagate)
return 0;
gcfs_rq->propagate = 0;
cfs_rq = cfs_rq_of(se);
add_tg_cfs_propagate(cfs_rq, gcfs_rq->prop_runnable_sum);
update_tg_cfs_util(cfs_rq, se, gcfs_rq);
update_tg_cfs_runnable(cfs_rq, se, gcfs_rq);
trace_sched_load_cfs_rq(cfs_rq);
trace_sched_load_se(se);
return 1;
}
/*
* Check if we need to update the load and the utilization of a blocked
* group_entity:
*/
static inline bool skip_blocked_update(struct sched_entity *se)
{
struct cfs_rq *gcfs_rq = group_cfs_rq(se);
/*
* If sched_entity still have not zero load or utilization, we have to
* decay it:
*/
if (se->avg.load_avg || se->avg.util_avg)
return false;
/*
* If there is a pending propagation, we have to update the load and
* the utilization of the sched_entity:
*/
if (gcfs_rq->propagate)
return false;
/*
* Otherwise, the load and the utilization of the sched_entity is
* already zero and there is no pending propagation, so it will be a
* waste of time to try to decay it:
*/
return true;
}
#else /* CONFIG_FAIR_GROUP_SCHED */
static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {}
static inline int propagate_entity_load_avg(struct sched_entity *se)
{
return 0;
}
static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) {}
#endif /* CONFIG_FAIR_GROUP_SCHED */
/*
* Remove and clamp on negative, from a local variable.
*
* A variant of sub_positive(), which does not use explicit load-store
* and is thus optimized for local variable updates.
*/
#define lsub_positive(_ptr, _val) do { \
typeof(_ptr) ptr = (_ptr); \
*ptr -= min_t(typeof(*ptr), *ptr, _val); \
} while (0)
/**
* update_cfs_rq_load_avg - update the cfs_rq's load/util averages
* @now: current time, as per cfs_rq_clock_pelt()
* @cfs_rq: cfs_rq to update
*
* The cfs_rq avg is the direct sum of all its entities (blocked and runnable)
* avg. The immediate corollary is that all (fair) tasks must be attached, see
* post_init_entity_util_avg().
*
* cfs_rq->avg is used for task_h_load() and update_cfs_share() for example.
*
* Returns true if the load decayed or we removed load.
*
* Since both these conditions indicate a changed cfs_rq->avg.load we should
* call update_tg_load_avg() when this function returns true.
*/
static inline int
update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
{
unsigned long removed_load = 0, removed_util = 0, removed_runnable_sum = 0;
struct sched_avg *sa = &cfs_rq->avg;
int decayed = 0;
if (cfs_rq->removed.nr) {
unsigned long r;
u32 divider = get_pelt_divider(&cfs_rq->avg);
raw_spin_lock(&cfs_rq->removed.lock);
swap(cfs_rq->removed.util_avg, removed_util);
swap(cfs_rq->removed.load_avg, removed_load);
swap(cfs_rq->removed.runnable_sum, removed_runnable_sum);
cfs_rq->removed.nr = 0;
raw_spin_unlock(&cfs_rq->removed.lock);
r = removed_load;
sub_positive(&sa->load_avg, r);
sub_positive(&sa->load_sum, r * divider);
r = removed_util;
sub_positive(&sa->util_avg, r);
sub_positive(&sa->util_sum, r * divider);
add_tg_cfs_propagate(cfs_rq, -(long)removed_runnable_sum);
decayed = 1;
}
decayed |= __update_load_avg_cfs_rq(now, cfs_rq);
u64_u32_store_copy(sa->last_update_time,
cfs_rq->last_update_time_copy,
sa->last_update_time);
if (decayed)
cfs_rq_util_change(cfs_rq);
return decayed;
}
/**
* attach_entity_load_avg - attach this entity to its cfs_rq load avg
* @cfs_rq: cfs_rq to attach to
* @se: sched_entity to attach
*
* Must call update_cfs_rq_load_avg() before this, since we rely on
* cfs_rq->avg.last_update_time being current.
*/
static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
/*
* cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
* See ___update_load_avg() for details.
*/
u32 divider = get_pelt_divider(&cfs_rq->avg);
/*
* When we attach the @se to the @cfs_rq, we must align the decay
* window because without that, really weird and wonderful things can
* happen.
*
* XXX illustrate
*/
se->avg.last_update_time = cfs_rq->avg.last_update_time;
se->avg.period_contrib = cfs_rq->avg.period_contrib;
/*
* Hell(o) Nasty stuff.. we need to recompute _sum based on the new
* period_contrib. This isn't strictly correct, but since we're
* entirely outside of the PELT hierarchy, nobody cares if we truncate
* _sum a little.
*/
se->avg.util_sum = se->avg.util_avg * divider;
se->avg.load_sum = divider;
if (se_weight(se)) {
se->avg.load_sum =
div_u64(se->avg.load_avg * se->avg.load_sum, se_weight(se));
}
se->avg.runnable_load_sum = se->avg.load_sum;
enqueue_load_avg(cfs_rq, se);
cfs_rq->avg.util_avg += se->avg.util_avg;
cfs_rq->avg.util_sum += se->avg.util_sum;
add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);
cfs_rq_util_change(cfs_rq);
trace_sched_load_cfs_rq(cfs_rq);
}
/**
* detach_entity_load_avg - detach this entity from its cfs_rq load avg
* @cfs_rq: cfs_rq to detach from
* @se: sched_entity to detach
*
* Must call update_cfs_rq_load_avg() before this, since we rely on
* cfs_rq->avg.last_update_time being current.
*/
static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
dequeue_load_avg(cfs_rq, se);
sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
cfs_rq_util_change(cfs_rq);
trace_sched_load_cfs_rq(cfs_rq);
}
/*
* Optional action to be done while updating the load average
*/
#define UPDATE_TG 0x1
#define SKIP_AGE_LOAD 0x2
#define DO_ATTACH 0x4
#define DO_DETACH 0x8
/* Update task and its cfs_rq load average */
static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
u64 now = cfs_rq_clock_pelt(cfs_rq);
int decayed;
/*
* Track task load average for carrying it to new CPU after migrated, and
* track group sched_entity load average for task_h_load calc in migration
*/
if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD))
__update_load_avg_se(now, cfs_rq, se);
decayed = update_cfs_rq_load_avg(now, cfs_rq);
decayed |= propagate_entity_load_avg(se);
if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
attach_entity_load_avg(cfs_rq, se);
update_tg_load_avg(cfs_rq, 0);
} else if (flags & DO_DETACH) {
/*
* DO_DETACH means we're here from dequeue_entity()
* and we are migrating task out of the CPU.
*/
detach_entity_load_avg(cfs_rq, se);
update_tg_load_avg(cfs_rq, 0);
} else if (decayed && (flags & UPDATE_TG))
update_tg_load_avg(cfs_rq, 0);
}
/*
* Synchronize entity load avg of dequeued entity without locking
* the previous rq.
*/
void sync_entity_load_avg(struct sched_entity *se)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);
u64 last_update_time;
last_update_time = cfs_rq_last_update_time(cfs_rq);
__update_load_avg_blocked_se(last_update_time, se);
}
/*
* Task first catches up with cfs_rq, and then subtract
* itself from the cfs_rq (task must be off the queue now).
*/
void remove_entity_load_avg(struct sched_entity *se)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);
unsigned long flags;
/*
* tasks cannot exit without having gone through wake_up_new_task() ->
* post_init_entity_util_avg() which will have added things to the
* cfs_rq, so we can remove unconditionally.
*
* Similarly for groups, they will have passed through
* post_init_entity_util_avg() before unregister_sched_fair_group()
* calls this.
*/
sync_entity_load_avg(se);
raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags);
++cfs_rq->removed.nr;
cfs_rq->removed.util_avg += se->avg.util_avg;
cfs_rq->removed.load_avg += se->avg.load_avg;
cfs_rq->removed.runnable_sum += se->avg.load_sum; /* == runnable_sum */
raw_spin_unlock_irqrestore(&cfs_rq->removed.lock, flags);
}
static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq)
{
return cfs_rq->avg.runnable_load_avg;
}
static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq)
{
return cfs_rq->avg.load_avg;
}
static int idle_balance(struct rq *this_rq, struct rq_flags *rf);
static inline bool task_fits_capacity(struct task_struct *p, long capacity);
static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
{
if (!static_branch_unlikely(&sched_asym_cpucapacity))
return;
if (!p) {
rq->misfit_task_load = 0;
return;
}
if (task_fits_max(p, cpu_of(rq))) {
rq->misfit_task_load = 0;
return;
}
rq->misfit_task_load = task_h_load(p);
}
static inline unsigned long _task_util_est(struct task_struct *p)
{
struct util_est ue = READ_ONCE(p->se.avg.util_est);
return (max(ue.ewma, ue.enqueued) | UTIL_AVG_UNCHANGED);
}
unsigned long task_util_est(struct task_struct *p)
{
#ifdef CONFIG_SCHED_WALT
if (likely(!walt_disabled && sysctl_sched_use_walt_task_util))
return p->ravg.demand_scaled;
#endif
return max(task_util(p), _task_util_est(p));
}
#ifdef CONFIG_UCLAMP_TASK
static inline unsigned long uclamp_task_util(struct task_struct *p)
{
return clamp(task_util_est(p),
uclamp_eff_value(p, UCLAMP_MIN),
uclamp_eff_value(p, UCLAMP_MAX));
}
#else
static inline unsigned long uclamp_task_util(struct task_struct *p)
{
return task_util_est(p);
}
#endif
static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
struct task_struct *p)
{
unsigned int enqueued;
if (!sched_feat(UTIL_EST))
return;
/* Update root cfs_rq's estimated utilization */
enqueued = cfs_rq->avg.util_est.enqueued;
enqueued += _task_util_est(p);
WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
trace_sched_util_est_task(p, &p->se.avg);
trace_sched_util_est_cpu(cpu_of(rq_of(cfs_rq)), cfs_rq);
}
#define UTIL_EST_MARGIN (SCHED_CAPACITY_SCALE / 100)
/*
* Check if a (signed) value is within a specified (unsigned) margin,
* based on the observation that:
*
* abs(x) < y := (unsigned)(x + y - 1) < (2 * y - 1)
*
* NOTE: this only works when value + maring < INT_MAX.
*/
static inline bool within_margin(int value, int margin)
{
return ((unsigned int)(value + margin - 1) < (2 * margin - 1));
}
static void
util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
{
long last_ewma_diff, last_enqueued_diff;
struct util_est ue;
int cpu;
if (!sched_feat(UTIL_EST))
return;
/* Update root cfs_rq's estimated utilization */
ue.enqueued = cfs_rq->avg.util_est.enqueued;
ue.enqueued -= min_t(unsigned int, ue.enqueued, _task_util_est(p));
WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued);
trace_sched_util_est_cpu(cpu_of(rq_of(cfs_rq)), cfs_rq);
/*
* Skip update of task's estimated utilization when the task has not
* yet completed an activation, e.g. being migrated.
*/
if (!task_sleep)
return;
/*
* If the PELT values haven't changed since enqueue time,
* skip the util_est update.
*/
ue = p->se.avg.util_est;
if (ue.enqueued & UTIL_AVG_UNCHANGED)
return;
last_enqueued_diff = ue.enqueued;
/*
* Reset EWMA on utilization increases, the moving average is used only
* to smooth utilization decreases.
*/
ue.enqueued = (task_util(p) | UTIL_AVG_UNCHANGED);
if (sched_feat(UTIL_EST_FASTUP)) {
if (ue.ewma < ue.enqueued) {
ue.ewma = ue.enqueued;
goto done;
}
}
/*
* Skip update of task's estimated utilization when its members are
* already ~1% close to its last activation value.
*/
last_ewma_diff = ue.enqueued - ue.ewma;
last_enqueued_diff -= ue.enqueued;
if (within_margin(last_ewma_diff, UTIL_EST_MARGIN)) {
if (!within_margin(last_enqueued_diff, UTIL_EST_MARGIN))
goto done;
return;
}
/*
* To avoid overestimation of actual task utilization, skip updates if
* we cannot grant there is idle time in this CPU.
*/
cpu = cpu_of(rq_of(cfs_rq));
if (task_util(p) > capacity_orig_of(cpu))
return;
/*
* Update Task's estimated utilization
*
* When *p completes an activation we can consolidate another sample
* of the task size. This is done by storing the current PELT value
* as ue.enqueued and by using this value to update the Exponential
* Weighted Moving Average (EWMA):
*
* ewma(t) = w * task_util(p) + (1-w) * ewma(t-1)
* = w * task_util(p) + ewma(t-1) - w * ewma(t-1)
* = w * (task_util(p) - ewma(t-1)) + ewma(t-1)
* = w * ( last_ewma_diff ) + ewma(t-1)
* = w * (last_ewma_diff + ewma(t-1) / w)
*
* Where 'w' is the weight of new samples, which is configured to be
* 0.25, thus making w=1/4 ( >>= UTIL_EST_WEIGHT_SHIFT)
*/
ue.ewma <<= UTIL_EST_WEIGHT_SHIFT;
ue.ewma += last_ewma_diff;
ue.ewma >>= UTIL_EST_WEIGHT_SHIFT;
done:
WRITE_ONCE(p->se.avg.util_est, ue);
trace_sched_util_est_task(p, &p->se.avg);
}
#else /* CONFIG_SMP */
#define UPDATE_TG 0x0
#define SKIP_AGE_LOAD 0x0
#define DO_ATTACH 0x0
#define DO_DETACH 0x0
static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
{
cfs_rq_util_change(cfs_rq);
}
static inline void remove_entity_load_avg(struct sched_entity *se) {}
static inline void
attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
static inline void
detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
static inline int idle_balance(struct rq *rq, struct rq_flags *rf)
{
return 0;
}
static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
static inline void
util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
static inline void
util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p,
bool task_sleep) {}
#endif /* CONFIG_SMP */
static inline bool entity_is_long_sleeper(struct sched_entity *se)
{
struct cfs_rq *cfs_rq;
u64 sleep_time;
if (se->exec_start == 0)
return false;
cfs_rq = cfs_rq_of(se);
sleep_time = rq_clock_task(rq_of(cfs_rq));
/* Happen while migrating because of clock task divergence */
if (sleep_time <= se->exec_start)
return false;
sleep_time -= se->exec_start;
if (sleep_time > ((1ULL << 63) / scale_load_down(NICE_0_LOAD)))
return true;
return false;
}
static void
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
u64 vslice, vruntime = avg_vruntime(cfs_rq);
s64 lag = 0;
se->slice = sysctl_sched_base_slice;
vslice = calc_delta_fair(se->slice, se);
/*
* Due to how V is constructed as the weighted average of entities,
* adding tasks with positive lag, or removing tasks with negative lag
* will move 'time' backwards, this can screw around with the lag of
* other tasks.
*
* EEVDF: placement strategy #1 / #2
*/
if (sched_feat(PLACE_LAG) && cfs_rq->nr_running) {
struct sched_entity *curr = cfs_rq->curr;
unsigned long load;
lag = se->vlag;
/*
* If we want to place a task and preserve lag, we have to
* consider the effect of the new entity on the weighted
* average and compensate for this, otherwise lag can quickly
* evaporate.
*
* Lag is defined as:
*
* lag_i = S - s_i = w_i * (V - v_i)
*
* To avoid the 'w_i' term all over the place, we only track
* the virtual lag:
*
* vl_i = V - v_i <=> v_i = V - vl_i
*
* And we take V to be the weighted average of all v:
*
* V = (\Sum w_j*v_j) / W
*
* Where W is: \Sum w_j
*
* Then, the weighted average after adding an entity with lag
* vl_i is given by:
*
* V' = (\Sum w_j*v_j + w_i*v_i) / (W + w_i)
* = (W*V + w_i*(V - vl_i)) / (W + w_i)
* = (W*V + w_i*V - w_i*vl_i) / (W + w_i)
* = (V*(W + w_i) - w_i*l) / (W + w_i)
* = V - w_i*vl_i / (W + w_i)
*
* And the actual lag after adding an entity with vl_i is:
*
* vl'_i = V' - v_i
* = V - w_i*vl_i / (W + w_i) - (V - vl_i)
* = vl_i - w_i*vl_i / (W + w_i)
*
* Which is strictly less than vl_i. So in order to preserve lag
* we should inflate the lag before placement such that the
* effective lag after placement comes out right.
*
* As such, invert the above relation for vl'_i to get the vl_i
* we need to use such that the lag after placement is the lag
* we computed before dequeue.
*
* vl'_i = vl_i - w_i*vl_i / (W + w_i)
* = ((W + w_i)*vl_i - w_i*vl_i) / (W + w_i)
*
* (W + w_i)*vl'_i = (W + w_i)*vl_i - w_i*vl_i
* = W*vl_i
*
* vl_i = (W + w_i)*vl'_i / W
*/
load = cfs_rq->avg_load;
if (curr && curr->on_rq)
load += scale_load_down(curr->load.weight);
lag *= load + scale_load_down(se->load.weight);
if (WARN_ON_ONCE(!load))
load = 1;
lag = div_s64(lag, load);
}
se->vruntime = vruntime - lag;
/*
* When joining the competition; the exisiting tasks will be,
* on average, halfway through their slice, as such start tasks
* off with half a slice to ease into the competition.
*/
if (sched_feat(PLACE_DEADLINE_INITIAL) && (flags & ENQUEUE_INITIAL))
vslice /= 2;
/*
* EEVDF: vd_i = ve_i + r_i/w_i
*/
se->deadline = se->vruntime + vslice;
}
static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
static inline void check_schedstat_required(void)
{
#ifdef CONFIG_SCHEDSTATS
if (schedstat_enabled())
return;
/* Force schedstat enabled if a dependent tracepoint is active */
if (trace_sched_stat_wait_enabled() ||
trace_sched_stat_sleep_enabled() ||
trace_sched_stat_iowait_enabled() ||
trace_sched_stat_blocked_enabled() ||
trace_sched_stat_runtime_enabled()) {
printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, "
"stat_blocked and stat_runtime require the "
"kernel parameter schedstats=enable or "
"kernel.sched_schedstats=1\n");
}
#endif
}
static void
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
bool curr = cfs_rq->curr == se;
/*
* If we're the current task, we must renormalise before calling
* update_curr().
*/
if (curr)
place_entity(cfs_rq, se, flags);
update_curr(cfs_rq);
/*
* When enqueuing a sched_entity, we must:
* - Update loads to have both entity and cfs_rq synced with now.
* - Add its load to cfs_rq->runnable_avg
* - For group_entity, update its weight to reflect the new share of
* its group cfs_rq
* - Add its new weight to cfs_rq->load.weight
*/
update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH);
enqueue_runnable_load_avg(cfs_rq, se);
/*
* XXX update_load_avg() above will have attached us to the pelt sum;
* but update_cfs_group() here will re-adjust the weight and have to
* undo/redo all that. Seems wasteful.
*/
update_cfs_group(se);
/*
* XXX now that the entity has been re-weighted, and it's lag adjusted,
* we can place the entity.
*/
if (!curr)
place_entity(cfs_rq, se, flags);
account_entity_enqueue(cfs_rq, se);
if (flags & ENQUEUE_WAKEUP)
place_entity(cfs_rq, se, 0);
/* Entity has migrated, no longer consider this task hot */
if (flags & ENQUEUE_MIGRATED)
se->exec_start = 0;
check_schedstat_required();
update_stats_enqueue(cfs_rq, se, flags);
if (!curr)
__enqueue_entity(cfs_rq, se);
se->on_rq = 1;
if (cfs_rq->nr_running == 1) {
list_add_leaf_cfs_rq(cfs_rq);
check_enqueue_throttle(cfs_rq);
}
}
static void __clear_buddies_next(struct sched_entity *se)
{
for_each_sched_entity(se) {
struct cfs_rq *cfs_rq = cfs_rq_of(se);
if (cfs_rq->next != se)
break;
cfs_rq->next = NULL;
}
}
static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
if (cfs_rq->next == se)
__clear_buddies_next(se);
}
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
static void
dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
int action = UPDATE_TG;
if (entity_is_task(se) && task_on_rq_migrating(task_of(se)))
action |= DO_DETACH;
/*
* Update run-time statistics of the 'current'.
*/
update_curr(cfs_rq);
/*
* When dequeuing a sched_entity, we must:
* - Update loads to have both entity and cfs_rq synced with now.
* - Substract its load from the cfs_rq->runnable_avg.
* - Substract its previous weight from cfs_rq->load.weight.
* - For group entity, update its weight to reflect the new share
* of its group cfs_rq.
*/
update_load_avg(cfs_rq, se, action);
dequeue_runnable_load_avg(cfs_rq, se);
update_stats_dequeue(cfs_rq, se, flags);
clear_buddies(cfs_rq, se);
update_entity_lag(cfs_rq, se);
if (se != cfs_rq->curr)
__dequeue_entity(cfs_rq, se);
se->on_rq = 0;
account_entity_dequeue(cfs_rq, se);
/* return excess runtime on last dequeue */
return_cfs_rq_runtime(cfs_rq);
update_cfs_group(se);
/*
* Now advance min_vruntime if @se was the entity holding it back,
* except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be
* put back on, and if we advance min_vruntime, we'll be placed back
* further than we started -- ie. we'll be penalized.
*/
if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
update_min_vruntime(cfs_rq);
}
static void
set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
clear_buddies(cfs_rq, se);
/* 'current' is not kept within the tree. */
if (se->on_rq) {
/*
* Any task has to be enqueued before it get to execute on
* a CPU. So account for the time it spent waiting on the
* runqueue.
*/
update_stats_wait_end(cfs_rq, se);
__dequeue_entity(cfs_rq, se);
update_load_avg(cfs_rq, se, UPDATE_TG);
/*
* HACK, stash a copy of deadline at the point of pick in vlag,
* which isn't used until dequeue.
*/
se->vlag = se->deadline;
}
update_stats_curr_start(cfs_rq, se);
cfs_rq->curr = se;
/*
* Track our maximum slice length, if the CPU's load is at
* least twice that of our own weight (i.e. dont track it
* when there are only lesser-weight tasks around):
*/
if (schedstat_enabled() && rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
schedstat_set(se->statistics.slice_max,
max((u64)schedstat_val(se->statistics.slice_max),
se->sum_exec_runtime - se->prev_sum_exec_runtime));
}
se->prev_sum_exec_runtime = se->sum_exec_runtime;
}
/*
* Pick the next process, keeping these things in mind, in this order:
* 1) keep things fair between processes/task groups
* 2) pick the "next" process, since someone really wants that to run
* 3) pick the "last" process, for cache locality
* 4) do not run the "skip" process, if something else is available
*/
static struct sched_entity *
pick_next_entity(struct cfs_rq *cfs_rq)
{
/*
* Enabling NEXT_BUDDY will affect latency but not fairness.
*/
if (sched_feat(NEXT_BUDDY) &&
cfs_rq->next && entity_eligible(cfs_rq, cfs_rq->next))
return cfs_rq->next;
return pick_eevdf(cfs_rq);
}
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
{
/*
* If still on the runqueue then deactivate_task()
* was not called and update_curr() has to be done:
*/
if (prev->on_rq)
update_curr(cfs_rq);
/* throttle cfs_rqs exceeding runtime */
check_cfs_rq_runtime(cfs_rq);
if (prev->on_rq) {
update_stats_wait_start(cfs_rq, prev);
/* Put 'current' back into the tree. */
__enqueue_entity(cfs_rq, prev);
/* in !on_rq case, update occurred at dequeue */
update_load_avg(cfs_rq, prev, 0);
}
cfs_rq->curr = NULL;
}
static void
entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
{
/*
* Update run-time statistics of the 'current'.
*/
update_curr(cfs_rq);
/*
* Ensure that runnable average is periodically updated.
*/
update_load_avg(cfs_rq, curr, UPDATE_TG);
update_cfs_group(curr);
#ifdef CONFIG_SCHED_HRTICK
/*
* queued ticks are scheduled to match the slice, so don't bother
* validating it and just reschedule.
*/
if (queued) {
resched_curr(rq_of(cfs_rq));
return;
}
/*
* don't let the period tick interfere with the hrtick preemption
*/
if (!sched_feat(DOUBLE_TICK) &&
hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
return;
#endif
}
/**************************************************
* CFS bandwidth control machinery
*/
#ifdef CONFIG_CFS_BANDWIDTH
#ifdef HAVE_JUMP_LABEL
static struct static_key __cfs_bandwidth_used;
static inline bool cfs_bandwidth_used(void)
{
return static_key_false(&__cfs_bandwidth_used);
}
void cfs_bandwidth_usage_inc(void)
{
static_key_slow_inc_cpuslocked(&__cfs_bandwidth_used);
}
void cfs_bandwidth_usage_dec(void)
{
static_key_slow_dec_cpuslocked(&__cfs_bandwidth_used);
}
#else /* HAVE_JUMP_LABEL */
static bool cfs_bandwidth_used(void)
{
return true;
}
void cfs_bandwidth_usage_inc(void) {}
void cfs_bandwidth_usage_dec(void) {}
#endif /* HAVE_JUMP_LABEL */
/*
* default period for cfs group bandwidth.
* default: 0.1s, units: nanoseconds
*/
static inline u64 default_cfs_period(void)
{
return 100000000ULL;
}
static inline u64 sched_cfs_bandwidth_slice(void)
{
return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
}
/*
* Replenish runtime according to assigned quota. We use sched_clock_cpu
* directly instead of rq->clock to avoid adding additional synchronization
* around rq->lock.
*
* requires cfs_b->lock
*/
void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
{
if (cfs_b->quota != RUNTIME_INF)
cfs_b->runtime = cfs_b->quota;
}
static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
{
return &tg->cfs_bandwidth;
}
/* rq->task_clock normalized against any time this cfs_rq has spent throttled */
static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
{
if (unlikely(cfs_rq->throttle_count))
return cfs_rq->throttled_clock_pelt - cfs_rq->throttled_clock_pelt_time;
return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_pelt_time;
}
/* returns 0 on failure to allocate runtime */
static int __assign_cfs_rq_runtime(struct cfs_bandwidth *cfs_b,
struct cfs_rq *cfs_rq, u64 target_runtime)
{
u64 min_amount, amount = 0;
lockdep_assert_held(&cfs_b->lock);
/* note: this is a positive sum as runtime_remaining <= 0 */
min_amount = target_runtime - cfs_rq->runtime_remaining;
if (cfs_b->quota == RUNTIME_INF)
amount = min_amount;
else {
start_cfs_bandwidth(cfs_b);
if (cfs_b->runtime > 0) {
amount = min(cfs_b->runtime, min_amount);
cfs_b->runtime -= amount;
cfs_b->idle = 0;
}
}
cfs_rq->runtime_remaining += amount;
return cfs_rq->runtime_remaining > 0;
}
/* returns 0 on failure to allocate runtime */
static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
{
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
int ret;
raw_spin_lock(&cfs_b->lock);
ret = __assign_cfs_rq_runtime(cfs_b, cfs_rq, sched_cfs_bandwidth_slice());
raw_spin_unlock(&cfs_b->lock);
return ret;
}
static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
{
/* dock delta_exec before expiring quota (as it could span periods) */
cfs_rq->runtime_remaining -= delta_exec;
if (likely(cfs_rq->runtime_remaining > 0))
return;
if (cfs_rq->throttled)
return;
/*
* if we're unable to extend our runtime we resched so that the active
* hierarchy can be throttled
*/
if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
resched_curr(rq_of(cfs_rq));
}
static __always_inline
void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
{
if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
return;
__account_cfs_rq_runtime(cfs_rq, delta_exec);
}
static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
{
return cfs_bandwidth_used() && cfs_rq->throttled;
}
/* check whether cfs_rq, or any parent, is throttled */
static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
{
return cfs_bandwidth_used() && cfs_rq->throttle_count;
}
/*
* Ensure that neither of the group entities corresponding to src_cpu or
* dest_cpu are members of a throttled hierarchy when performing group
* load-balance operations.
*/
static inline int throttled_lb_pair(struct task_group *tg,
int src_cpu, int dest_cpu)
{
struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
src_cfs_rq = tg->cfs_rq[src_cpu];
dest_cfs_rq = tg->cfs_rq[dest_cpu];
return throttled_hierarchy(src_cfs_rq) ||
throttled_hierarchy(dest_cfs_rq);
}
/* updated child weight may affect parent so we have to do this bottom up */
static int tg_unthrottle_up(struct task_group *tg, void *data)
{
struct rq *rq = data;
struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
cfs_rq->throttle_count--;
if (!cfs_rq->throttle_count) {
/* adjust cfs_rq_clock_task() */
cfs_rq->throttled_clock_pelt_time += rq_clock_pelt(rq) -
cfs_rq->throttled_clock_pelt;
/* Add cfs_rq with already running entity in the list */
if (cfs_rq->nr_running >= 1)
list_add_leaf_cfs_rq(cfs_rq);
}
return 0;
}
static int tg_throttle_down(struct task_group *tg, void *data)
{
struct rq *rq = data;
struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
/* group is entering throttled state, stop time */
if (!cfs_rq->throttle_count) {
cfs_rq->throttled_clock_pelt = rq_clock_pelt(rq);
list_del_leaf_cfs_rq(cfs_rq);
}
cfs_rq->throttle_count++;
return 0;
}
static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
{
struct rq *rq = rq_of(cfs_rq);
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
struct sched_entity *se;
long task_delta, idle_task_delta, dequeue = 1;
raw_spin_lock(&cfs_b->lock);
/* This will start the period timer if necessary */
if (__assign_cfs_rq_runtime(cfs_b, cfs_rq, 1)) {
/*
* We have raced with bandwidth becoming available, and if we
* actually throttled the timer might not unthrottle us for an
* entire period. We additionally needed to make sure that any
* subsequent check_cfs_rq_runtime calls agree not to throttle
* us, as we may commit to do cfs put_prev+pick_next, so we ask
* for 1ns of runtime rather than just check cfs_b.
*/
dequeue = 0;
} else {
list_add_tail_rcu(&cfs_rq->throttled_list,
&cfs_b->throttled_cfs_rq);
}
raw_spin_unlock(&cfs_b->lock);
if (!dequeue)
return false; /* Throttle no longer required. */
se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
/* freeze hierarchy runnable averages while throttled */
rcu_read_lock();
walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
rcu_read_unlock();
task_delta = cfs_rq->h_nr_running;
idle_task_delta = cfs_rq->idle_h_nr_running;
for_each_sched_entity(se) {
struct cfs_rq *qcfs_rq = cfs_rq_of(se);
/* throttled entity or throttle-on-deactivate */
if (!se->on_rq)
break;
if (dequeue)
dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
qcfs_rq->h_nr_running -= task_delta;
qcfs_rq->idle_h_nr_running -= idle_task_delta;
walt_dec_throttled_cfs_rq_stats(&qcfs_rq->walt_stats, cfs_rq);
if (qcfs_rq->load.weight)
dequeue = 0;
}
if (!se) {
sub_nr_running(rq, task_delta);
walt_dec_throttled_cfs_rq_stats(&rq->walt_stats, cfs_rq);
}
/*
* Note: distribution will already see us throttled via the
* throttled-list. rq->lock protects completion.
*/
cfs_rq->throttled = 1;
cfs_rq->throttled_clock = rq_clock(rq);
return true;
}
void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
{
struct rq *rq = rq_of(cfs_rq);
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
struct sched_entity *se;
long task_delta, idle_task_delta;
struct cfs_rq *tcfs_rq __maybe_unused = cfs_rq;
se = cfs_rq->tg->se[cpu_of(rq)];
cfs_rq->throttled = 0;
update_rq_clock(rq);
raw_spin_lock(&cfs_b->lock);
cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
list_del_rcu(&cfs_rq->throttled_list);
raw_spin_unlock(&cfs_b->lock);
/* update hierarchical throttle state */
walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
if (!cfs_rq->load.weight)
return;
task_delta = cfs_rq->h_nr_running;
idle_task_delta = cfs_rq->idle_h_nr_running;
for_each_sched_entity(se) {
if (se->on_rq)
break;
cfs_rq = cfs_rq_of(se);
enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
cfs_rq->h_nr_running += task_delta;
cfs_rq->idle_h_nr_running += idle_task_delta;
walt_inc_throttled_cfs_rq_stats(&cfs_rq->walt_stats, tcfs_rq);
/* end evaluation on encountering a throttled cfs_rq */
if (cfs_rq_throttled(cfs_rq))
goto unthrottle_throttle;
}
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
update_load_avg(cfs_rq, se, UPDATE_TG);
se_update_runnable(se);
cfs_rq->h_nr_running += task_delta;
cfs_rq->idle_h_nr_running += idle_task_delta;
/* end evaluation on encountering a throttled cfs_rq */
if (cfs_rq_throttled(cfs_rq))
goto unthrottle_throttle;
/*
* One parent has been throttled and cfs_rq removed from the
* list. Add it back to not break the leaf list.
*/
if (throttled_hierarchy(cfs_rq))
list_add_leaf_cfs_rq(cfs_rq);
}
/* At this point se is NULL and we are at root level*/
add_nr_running(rq, task_delta);
unthrottle_throttle:
/*
* The cfs_rq_throttled() breaks in the above iteration can result in
* incomplete leaf list maintenance, resulting in triggering the
* assertion below.
*/
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
if (list_add_leaf_cfs_rq(cfs_rq))
break;
}
assert_list_leaf_cfs_rq(rq);
if (!se) {
add_nr_running(rq, task_delta);
walt_inc_throttled_cfs_rq_stats(&rq->walt_stats, tcfs_rq);
}
/* determine whether we need to wake up potentially idle cpu */
if (rq->curr == rq->idle && rq->cfs.nr_running)
resched_curr(rq);
}
static void distribute_cfs_runtime(struct cfs_bandwidth *cfs_b)
{
struct cfs_rq *cfs_rq;
u64 runtime, remaining = 1;
rcu_read_lock();
list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
throttled_list) {
struct rq *rq = rq_of(cfs_rq);
struct rq_flags rf;
rq_lock(rq, &rf);
if (!cfs_rq_throttled(cfs_rq))
goto next;
/* By the above check, this should never be true */
SCHED_WARN_ON(cfs_rq->runtime_remaining > 0);
raw_spin_lock(&cfs_b->lock);
runtime = -cfs_rq->runtime_remaining + 1;
if (runtime > cfs_b->runtime)
runtime = cfs_b->runtime;
cfs_b->runtime -= runtime;
remaining = cfs_b->runtime;
raw_spin_unlock(&cfs_b->lock);
cfs_rq->runtime_remaining += runtime;
/* we check whether we're throttled above */
if (cfs_rq->runtime_remaining > 0)
unthrottle_cfs_rq(cfs_rq);
next:
rq_unlock(rq, &rf);
if (!remaining)
break;
}
rcu_read_unlock();
}
/*
* Responsible for refilling a task_group's bandwidth and unthrottling its
* cfs_rqs as appropriate. If there has been no activity within the last
* period the timer is deactivated until scheduling resumes; cfs_b->idle is
* used to track this state.
*/
static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
{
int throttled;
/* no need to continue the timer with no bandwidth constraint */
if (cfs_b->quota == RUNTIME_INF)
goto out_deactivate;
throttled = !list_empty(&cfs_b->throttled_cfs_rq);
cfs_b->nr_periods += overrun;
/*
* idle depends on !throttled (for the case of a large deficit), and if
* we're going inactive then everything else can be deferred
*/
if (cfs_b->idle && !throttled)
goto out_deactivate;
__refill_cfs_bandwidth_runtime(cfs_b);
if (!throttled) {
/* mark as potentially idle for the upcoming period */
cfs_b->idle = 1;
return 0;
}
/* account preceding periods in which throttling occurred */
cfs_b->nr_throttled += overrun;
/*
* This check is repeated as we release cfs_b->lock while we unthrottle.
*/
while (throttled && cfs_b->runtime > 0) {
raw_spin_unlock(&cfs_b->lock);
/* we can't nest cfs_b->lock while distributing bandwidth */
distribute_cfs_runtime(cfs_b);
raw_spin_lock(&cfs_b->lock);
throttled = !list_empty(&cfs_b->throttled_cfs_rq);
}
/*
* While we are ensured activity in the period following an
* unthrottle, this also covers the case in which the new bandwidth is
* insufficient to cover the existing bandwidth deficit. (Forcing the
* timer to remain active while there are any throttled entities.)
*/
cfs_b->idle = 0;
return 0;
out_deactivate:
return 1;
}
/* a cfs_rq won't donate quota below this amount */
static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
/* minimum remaining period time to redistribute slack quota */
static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
/* how long we wait to gather additional slack before distributing */
static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
/*
* Are we near the end of the current quota period?
*
* Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
* hrtimer base being cleared by hrtimer_start. In the case of
* migrate_hrtimers, base is never cleared, so we are fine.
*/
static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
{
struct hrtimer *refresh_timer = &cfs_b->period_timer;
s64 remaining;
/* if the call-back is running a quota refresh is already occurring */
if (hrtimer_callback_running(refresh_timer))
return 1;
/* is a quota refresh about to occur? */
remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
if (remaining < (s64)min_expire)
return 1;
return 0;
}
static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
{
u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
/* if there's a quota refresh soon don't bother with slack */
if (runtime_refresh_within(cfs_b, min_left))
return;
/* don't push forwards an existing deferred unthrottle */
if (cfs_b->slack_started)
return;
cfs_b->slack_started = true;
hrtimer_start(&cfs_b->slack_timer,
ns_to_ktime(cfs_bandwidth_slack_period),
HRTIMER_MODE_REL);
}
/* we know any runtime found here is valid as update_curr() precedes return */
static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
{
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
if (slack_runtime <= 0)
return;
raw_spin_lock(&cfs_b->lock);
if (cfs_b->quota != RUNTIME_INF) {
cfs_b->runtime += slack_runtime;
/* we are under rq->lock, defer unthrottling using a timer */
if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
!list_empty(&cfs_b->throttled_cfs_rq))
start_cfs_slack_bandwidth(cfs_b);
}
raw_spin_unlock(&cfs_b->lock);
/* even if it's not valid for return we don't want to try again */
cfs_rq->runtime_remaining -= slack_runtime;
}
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
{
if (!cfs_bandwidth_used())
return;
if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
return;
__return_cfs_rq_runtime(cfs_rq);
}
/*
* This is done with a timer (instead of inline with bandwidth return) since
* it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
*/
static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
{
u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
/* confirm we're still not at a refresh boundary */
raw_spin_lock(&cfs_b->lock);
cfs_b->slack_started = false;
if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
raw_spin_unlock(&cfs_b->lock);
return;
}
if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
runtime = cfs_b->runtime;
raw_spin_unlock(&cfs_b->lock);
if (!runtime)
return;
distribute_cfs_runtime(cfs_b);
raw_spin_lock(&cfs_b->lock);
raw_spin_unlock(&cfs_b->lock);
}
/*
* When a group wakes up we want to make sure that its quota is not already
* expired/exceeded, otherwise it may be allowed to steal additional ticks of
* runtime as update_curr() throttling can not not trigger until it's on-rq.
*/
static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
{
if (!cfs_bandwidth_used())
return;
/* an active group must be handled by the update_curr()->put() path */
if (!cfs_rq->runtime_enabled || cfs_rq->curr)
return;
/* ensure the group is not already throttled */
if (cfs_rq_throttled(cfs_rq))
return;
/* update runtime allocation */
account_cfs_rq_runtime(cfs_rq, 0);
if (cfs_rq->runtime_remaining <= 0)
throttle_cfs_rq(cfs_rq);
}
static void sync_throttle(struct task_group *tg, int cpu)
{
struct cfs_rq *pcfs_rq, *cfs_rq;
if (!cfs_bandwidth_used())
return;
if (!tg->parent)
return;
cfs_rq = tg->cfs_rq[cpu];
pcfs_rq = tg->parent->cfs_rq[cpu];
cfs_rq->throttle_count = pcfs_rq->throttle_count;
cfs_rq->throttled_clock_pelt = rq_clock_pelt(cpu_rq(cpu));
}
/* conditionally throttle active cfs_rq's from put_prev_entity() */
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
{
if (!cfs_bandwidth_used())
return false;
if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
return false;
/*
* it's possible for a throttled entity to be forced into a running
* state (e.g. set_curr_task), in this case we're finished.
*/
if (cfs_rq_throttled(cfs_rq))
return true;
return throttle_cfs_rq(cfs_rq);
}
static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
{
struct cfs_bandwidth *cfs_b =
container_of(timer, struct cfs_bandwidth, slack_timer);
do_sched_cfs_slack_timer(cfs_b);
return HRTIMER_NORESTART;
}
extern const u64 max_cfs_quota_period;
static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
{
struct cfs_bandwidth *cfs_b =
container_of(timer, struct cfs_bandwidth, period_timer);
int overrun;
int idle = 0;
int count = 0;
raw_spin_lock(&cfs_b->lock);
for (;;) {
overrun = hrtimer_forward_now(timer, cfs_b->period);
if (!overrun)
break;
if (++count > 3) {
u64 new, old = ktime_to_ns(cfs_b->period);
/*
* Grow period by a factor of 2 to avoid losing precision.
* Precision loss in the quota/period ratio can cause __cfs_schedulable
* to fail.
*/
new = old * 2;
if (new < max_cfs_quota_period) {
cfs_b->period = ns_to_ktime(new);
cfs_b->quota *= 2;
pr_warn_ratelimited(
"cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us = %lld, cfs_quota_us = %lld)\n",
smp_processor_id(),
div_u64(new, NSEC_PER_USEC),
div_u64(cfs_b->quota, NSEC_PER_USEC));
} else {
pr_warn_ratelimited(
"cfs_period_timer[cpu%d]: period too short, but cannot scale up without losing precision (cfs_period_us = %lld, cfs_quota_us = %lld)\n",
smp_processor_id(),
div_u64(old, NSEC_PER_USEC),
div_u64(cfs_b->quota, NSEC_PER_USEC));
}
/* reset count so we don't come right back in here */
count = 0;
}
idle = do_sched_cfs_period_timer(cfs_b, overrun);
}
if (idle)
cfs_b->period_active = 0;
raw_spin_unlock(&cfs_b->lock);
return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
}
void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
{
raw_spin_lock_init(&cfs_b->lock);
cfs_b->runtime = 0;
cfs_b->quota = RUNTIME_INF;
cfs_b->period = ns_to_ktime(default_cfs_period());
INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
cfs_b->period_timer.function = sched_cfs_period_timer;
hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
cfs_b->slack_timer.function = sched_cfs_slack_timer;
cfs_b->slack_started = false;
}
static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
{
cfs_rq->runtime_enabled = 0;
INIT_LIST_HEAD(&cfs_rq->throttled_list);
walt_init_cfs_rq_stats(cfs_rq);
}
void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
{
u64 overrun;
lockdep_assert_held(&cfs_b->lock);
if (cfs_b->period_active)
return;
cfs_b->period_active = 1;
overrun = hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
cfs_b->runtime_expires += (overrun + 1) * ktime_to_ns(cfs_b->period);
cfs_b->expires_seq++;
hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
}
static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
{
/* init_cfs_bandwidth() was not called */
if (!cfs_b->throttled_cfs_rq.next)
return;
hrtimer_cancel(&cfs_b->period_timer);
hrtimer_cancel(&cfs_b->slack_timer);
}
/*
* Both these cpu hotplug callbacks race against unregister_fair_sched_group()
*
* The race is harmless, since modifying bandwidth settings of unhooked group
* bits doesn't do much.
*/
/* cpu online calback */
static void __maybe_unused update_runtime_enabled(struct rq *rq)
{
struct task_group *tg;
lockdep_assert_held(&rq->lock);
rcu_read_lock();
list_for_each_entry_rcu(tg, &task_groups, list) {
struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
raw_spin_lock(&cfs_b->lock);
cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
raw_spin_unlock(&cfs_b->lock);
}
rcu_read_unlock();
}
/* cpu offline callback */
static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
{
struct task_group *tg;
lockdep_assert_held(&rq->lock);
rcu_read_lock();
list_for_each_entry_rcu(tg, &task_groups, list) {
struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
if (!cfs_rq->runtime_enabled)
continue;
/*
* clock_task is not advancing so we just need to make sure
* there's some valid quota amount
*/
cfs_rq->runtime_remaining = 1;
/*
* Offline rq is schedulable till cpu is completely disabled
* in take_cpu_down(), so we prevent new cfs throttling here.
*/
cfs_rq->runtime_enabled = 0;
if (cfs_rq_throttled(cfs_rq))
unthrottle_cfs_rq(cfs_rq);
}
rcu_read_unlock();
}
#else /* CONFIG_CFS_BANDWIDTH */
static inline bool cfs_bandwidth_used(void)
{
return false;
}
static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
{
return rq_clock_task(rq_of(cfs_rq));
}
static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
static inline void sync_throttle(struct task_group *tg, int cpu) {}
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
{
return 0;
}
static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
{
return 0;
}
static inline int throttled_lb_pair(struct task_group *tg,
int src_cpu, int dest_cpu)
{
return 0;
}
void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
#ifdef CONFIG_FAIR_GROUP_SCHED
static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
#endif
static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
{
return NULL;
}
static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
static inline void update_runtime_enabled(struct rq *rq) {}
static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
#endif /* CONFIG_CFS_BANDWIDTH */
/**************************************************
* CFS operations on tasks:
*/
#ifdef CONFIG_SCHED_HRTICK
static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
{
struct sched_entity *se = &p->se;
SCHED_WARN_ON(task_rq(p) != rq);
if (rq->cfs.h_nr_running > 1) {
u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
u64 slice = se->slice;
s64 delta = slice - ran;
if (delta < 0) {
if (rq->curr == p)
resched_curr(rq);
return;
}
hrtick_start(rq, delta);
}
}
/*
* called from enqueue/dequeue and updates the hrtick when the
* current task is from our class and nr_running is low enough
* to matter.
*/
static void hrtick_update(struct rq *rq)
{
struct task_struct *curr = rq->curr;
if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
return;
hrtick_start_fair(rq, curr);
}
#else /* !CONFIG_SCHED_HRTICK */
static inline void
hrtick_start_fair(struct rq *rq, struct task_struct *p)
{
}
static inline void hrtick_update(struct rq *rq)
{
}
#endif
#ifdef CONFIG_SMP
static bool sd_overutilized(struct sched_domain *sd)
{
return sd->shared->overutilized;
}
static void set_sd_overutilized(struct sched_domain *sd)
{
trace_sched_overutilized(sd, sd->shared->overutilized, true);
sd->shared->overutilized = true;
}
static void clear_sd_overutilized(struct sched_domain *sd)
{
trace_sched_overutilized(sd, sd->shared->overutilized, false);
sd->shared->overutilized = false;
}
static inline void update_overutilized_status(struct rq *rq)
{
struct sched_domain *sd;
rcu_read_lock();
sd = rcu_dereference(rq->sd);
if (cpu_overutilized(rq->cpu)) {
if (sd && (sd->flags & SD_LOAD_BALANCE))
set_sd_overutilized(sd);
else if (sd && sd->parent)
set_sd_overutilized(sd->parent);
}
rcu_read_unlock();
}
#else
#define update_overutilized_status(rq) do {} while (0)
#endif /* CONFIG_SMP */
/* Runqueue only has SCHED_IDLE tasks enqueued */
static int sched_idle_rq(struct rq *rq)
{
return unlikely(rq->nr_running == rq->cfs.idle_h_nr_running &&
rq->nr_running);
}
static int sched_idle_cpu(int cpu)
{
return sched_idle_rq(cpu_rq(cpu));
}
/*
* The enqueue_task method is called before nr_running is
* increased. Here we update the fair scheduling stats and
* then put the task into the rbtree:
*/
static void
enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
{
struct cfs_rq *cfs_rq;
struct sched_entity *se = &p->se;
int task_new = !(flags & ENQUEUE_WAKEUP);
int idle_h_nr_running = task_has_idle_policy(p);
bool prefer_idle = sched_feat(EAS_PREFER_IDLE) ?
(uclamp_latency_sensitive(p) > 0) : 0;
#ifdef CONFIG_SCHED_WALT
p->misfit = !task_fits_max(p, rq->cpu);
#endif
/*
* The code below (indirectly) updates schedutil which looks at
* the cfs_rq utilization to select a frequency.
* Let's add the task's estimated utilization to the cfs_rq's
* estimated utilization, before we update schedutil.
*/
util_est_enqueue(&rq->cfs, p);
/*
* The code below (indirectly) updates schedutil which looks at
* the cfs_rq utilization to select a frequency.
* Let's update schedtune here to ensure the boost value of the
* current task is accounted for in the selection of the OPP.
*
* We do it also in the case where we enqueue a throttled task;
* we could argue that a throttled task should not boost a CPU,
* however:
* a) properly implementing CPU boosting considering throttled
* tasks will increase a lot the complexity of the solution
* b) it's not easy to quantify the benefits introduced by
* such a more complex solution.
* Thus, for the time being we go for the simple solution and boost
* also for throttled RQs.
*/
schedtune_enqueue_task(p, cpu_of(rq));
/*
* If in_iowait is set, the code below may not trigger any cpufreq
* utilization updates, so do it here explicitly with the IOWAIT flag
* passed.
*/
if (p->in_iowait && prefer_idle)
cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
for_each_sched_entity(se) {
if (se->on_rq)
break;
cfs_rq = cfs_rq_of(se);
enqueue_entity(cfs_rq, se, flags);
/*
* end evaluation on encountering a throttled cfs_rq
*
* note: in the case of encountering a throttled cfs_rq we will
* post the final h_nr_running increment below.
*/
if (cfs_rq_throttled(cfs_rq))
break;
cfs_rq->h_nr_running++;
cfs_rq->idle_h_nr_running += idle_h_nr_running;
walt_inc_cfs_rq_stats(cfs_rq, p);
flags = ENQUEUE_WAKEUP;
}
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
cfs_rq->h_nr_running++;
cfs_rq->idle_h_nr_running += idle_h_nr_running;
walt_inc_cfs_rq_stats(cfs_rq, p);
if (cfs_rq_throttled(cfs_rq))
break;
update_load_avg(cfs_rq, se, UPDATE_TG);
update_cfs_group(se);
}
if (!se) {
add_nr_running(rq, 1);
inc_rq_walt_stats(rq, p);
/*
* If the task prefers idle cpu, and it also is the first
* task enqueued in this runqueue, then we don't check
* overutilized. Hopefully the cpu util will be back to
* normal before next overutilized check.
*/
if (!task_new &&
!(prefer_idle && rq->nr_running == 1))
update_overutilized_status(rq);
}
if (cfs_bandwidth_used()) {
/*
* When bandwidth control is enabled; the cfs_rq_throttled()
* breaks in the above iteration can result in incomplete
* leaf list maintenance, resulting in triggering the assertion
* below.
*/
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
if (list_add_leaf_cfs_rq(cfs_rq))
break;
}
}
assert_list_leaf_cfs_rq(rq);
hrtick_update(rq);
}
static void set_next_buddy(struct sched_entity *se);
/*
* The dequeue_task method is called before nr_running is
* decreased. We remove the task from the rbtree and
* update the fair scheduling stats:
*/
static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
{
struct cfs_rq *cfs_rq;
struct sched_entity *se = &p->se;
int task_sleep = flags & DEQUEUE_SLEEP;
int idle_h_nr_running = task_has_idle_policy(p);
bool was_sched_idle = sched_idle_rq(rq);
/*
* The code below (indirectly) updates schedutil which looks at
* the cfs_rq utilization to select a frequency.
* Let's update schedtune here to ensure the boost value of the
* current task is not more accounted for in the selection of the OPP.
*/
schedtune_dequeue_task(p, cpu_of(rq));
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
dequeue_entity(cfs_rq, se, flags);
/*
* end evaluation on encountering a throttled cfs_rq
*
* note: in the case of encountering a throttled cfs_rq we will
* post the final h_nr_running decrement below.
*/
if (cfs_rq_throttled(cfs_rq))
break;
cfs_rq->h_nr_running--;
cfs_rq->idle_h_nr_running -= idle_h_nr_running;
walt_dec_cfs_rq_stats(cfs_rq, p);
/* Don't dequeue parent if it has other entities besides us */
if (cfs_rq->load.weight) {
/* Avoid re-evaluating load for this entity: */
se = parent_entity(se);
/*
* Bias pick_next to pick a task from this cfs_rq, as
* p is sleeping when it is within its sched_slice.
*/
if (task_sleep && se && !throttled_hierarchy(cfs_rq))
set_next_buddy(se);
break;
}
flags |= DEQUEUE_SLEEP;
}
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
cfs_rq->h_nr_running--;
cfs_rq->idle_h_nr_running -= idle_h_nr_running;
walt_dec_cfs_rq_stats(cfs_rq, p);
if (cfs_rq_throttled(cfs_rq))
break;
update_load_avg(cfs_rq, se, UPDATE_TG);
update_cfs_group(se);
}
if (!se) {
sub_nr_running(rq, 1);
dec_rq_walt_stats(rq, p);
}
/* balance early to pull high priority tasks */
if (unlikely(!was_sched_idle && sched_idle_rq(rq)))
rq->next_balance = jiffies;
util_est_dequeue(&rq->cfs, p, task_sleep);
hrtick_update(rq);
}
#ifdef CONFIG_SMP
/* Working cpumask for: load_balance, load_balance_newidle. */
DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
DEFINE_PER_CPU(cpumask_var_t, select_idle_mask);
static unsigned long weighted_cpuload(struct rq *rq)
{
return cfs_rq_runnable_load_avg(&rq->cfs);
}
static unsigned long cpu_avg_load_per_task(int cpu)
{
struct rq *rq = cpu_rq(cpu);
unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running);
unsigned long load_avg = weighted_cpuload(rq);
if (nr_running)
return load_avg / nr_running;
return 0;
}
static void record_wakee(struct task_struct *p)
{
/*
* Only decay a single time; tasks that have less then 1 wakeup per
* jiffy will not have built up many flips.
*/
if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) {
current->wakee_flips >>= 1;
current->wakee_flip_decay_ts = jiffies;
}
if (current->last_wakee != p) {
current->last_wakee = p;
current->wakee_flips++;
}
}
/*
* Returns the current capacity of cpu after applying both
* cpu and freq scaling.
*/
unsigned long capacity_curr_of(int cpu)
{
unsigned long max_cap = cpu_rq(cpu)->cpu_capacity_orig;
unsigned long scale_freq = arch_scale_freq_capacity(cpu);
return cap_scale(max_cap, scale_freq);
}
inline bool energy_aware(void)
{
return sysctl_sched_energy_aware;
}
/*
* Externally visible function. Let's keep the one above
* so that the check is inlined/optimized in the sched paths.
*/
bool sched_is_energy_aware(void)
{
return energy_aware();
}
/*
* __cpu_norm_util() returns the cpu util relative to a specific capacity,
* i.e. it's busy ratio, in the range [0..SCHED_CAPACITY_SCALE] which is useful
* for energy calculations. Using the scale-invariant util returned by
* cpu_util() and approximating scale-invariant util by:
*
* util ~ (curr_freq/max_freq)*1024 * capacity_orig/1024 * running_time/time
*
* the normalized util can be found using the specific capacity.
*
* capacity = capacity_orig * curr_freq/max_freq
*
* norm_util = running_time/time ~ util/capacity
*/
static unsigned long __cpu_norm_util(unsigned long util, unsigned long capacity)
{
if (util >= capacity)
return SCHED_CAPACITY_SCALE;
return (util << SCHED_CAPACITY_SHIFT)/capacity;
}
/*
* Check whether cpu is in the fastest set of cpu's that p should run on.
* If p is boosted, prefer that p runs on a faster cpu; otherwise, allow p
* to run on any cpu.
*/
static inline bool
cpu_is_in_target_set(struct task_struct *p, int cpu)
{
struct root_domain *rd = cpu_rq(cpu)->rd;
int first_cpu = (uclamp_boosted(p)) ?
rd->mid_cap_orig_cpu : rd->min_cap_orig_cpu;
int next_usable_cpu = cpumask_next(first_cpu - 1, p->cpus_ptr);
return cpu >= next_usable_cpu || next_usable_cpu >= nr_cpu_ids;
}
static inline bool
bias_to_waker_cpu(struct task_struct *p, int cpu, struct cpumask *rtg_target)
{
bool base_test = cpumask_test_cpu(cpu, p->cpus_ptr) &&
cpu_active(cpu) && task_fits_max(p, cpu) &&
!__cpu_overutilized(cpu, task_util(p)) &&
cpu_is_in_target_set(p, cpu);
bool rtg_test = rtg_target && cpumask_test_cpu(cpu, rtg_target);
return base_test && (!rtg_target || rtg_test);
}
/*
* CPU candidates.
*
* These are labels to reference CPU candidates for an energy_diff.
* Currently we support only two possible candidates: the task's previous CPU
* and another candiate CPU.
* More advanced/aggressive EAS selection policies can consider more
* candidates.
*/
#define EAS_CPU_PRV 0
#define EAS_CPU_NXT 1
#define EAS_CPU_BKP 2
/*
* energy_diff - supports the computation of the estimated energy impact in
* moving a "task"'s "util_delta" between different CPU candidates.
*/
/*
* NOTE: When using or examining WALT task signals, all wakeup
* latency is included as busy time for task util.
*
* This is relevant here because:
* When debugging is enabled, it can take as much as 1ms to
* write the output to the trace buffer for each eenv
* scenario. For periodic tasks where the sleep time is of
* a similar order, the WALT task util can be inflated.
*
* Further, and even without debugging enabled,
* task wakeup latency changes depending upon the EAS
* wakeup algorithm selected - FIND_BEST_TARGET only does
* energy calculations for up to 2 candidate CPUs. When
* NO_FIND_BEST_TARGET is configured, we can potentially
* do an energy calculation across all CPUS in the system.
*
* The impact to WALT task util on a Juno board
* running a periodic task which only sleeps for 200usec
* between 1ms activations has been measured.
* (i.e. the wakeup latency induced by energy calculation
* and debug output is double the desired sleep time and
* almost equivalent to the runtime which is more-or-less
* the worst case possible for this test)
*
* In this scenario, a task which has a PELT util of around
* 220 is inflated under WALT to have util around 400.
*
* This is simply a property of the way WALT includes
* wakeup latency in busy time while PELT does not.
*
* Hence - be careful when enabling DEBUG_EENV_DECISIONS
* expecially if WALT is the task signal.
*/
/*#define DEBUG_EENV_DECISIONS*/
#ifdef DEBUG_EENV_DECISIONS
/* max of 8 levels of sched groups traversed */
#define EAS_EENV_DEBUG_LEVELS 16
struct _eenv_debug {
unsigned long cap;
unsigned long norm_util;
unsigned long cap_energy;
unsigned long idle_energy;
unsigned long this_energy;
unsigned long this_busy_energy;
unsigned long this_idle_energy;
cpumask_t group_cpumask;
unsigned long cpu_util[1];
};
#endif
struct eenv_cpu {
/* CPU ID, must be in cpus_mask */
int cpu_id;
/*
* Index (into sched_group_energy::cap_states) of the OPP the
* CPU needs to run at if the task is placed on it.
* This includes the both active and blocked load, due to
* other tasks on this CPU, as well as the task's own
* utilization.
*/
int cap_idx;
int cap;
/* Estimated system energy */
unsigned long energy;
/* Estimated energy variation wrt EAS_CPU_PRV */
long nrg_delta;
#ifdef DEBUG_EENV_DECISIONS
struct _eenv_debug *debug;
int debug_idx;
#endif /* DEBUG_EENV_DECISIONS */
};
struct energy_env {
/* Utilization to move */
struct task_struct *p;
unsigned long util_delta;
unsigned long util_delta_boosted;
/* Mask of CPUs candidates to evaluate */
cpumask_t cpus_mask;
/* CPU candidates to evaluate */
struct eenv_cpu *cpu;
int eenv_cpu_count;
#ifdef DEBUG_EENV_DECISIONS
/* pointer to the memory block reserved
* for debug on this CPU - there will be
* sizeof(struct _eenv_debug) *
* (EAS_CPU_CNT * EAS_EENV_DEBUG_LEVELS)
* bytes allocated here.
*/
struct _eenv_debug *debug;
#endif
/*
* Index (into energy_env::cpu) of the morst energy efficient CPU for
* the specified energy_env::task
*/
int next_idx;
int max_cpu_count;
/* Support data */
struct sched_group *sg_top;
struct sched_group *sg_cap;
struct sched_group *sg;
};
/*
* cpu_util_without: compute cpu utilization without any contributions from *p
* @cpu: the CPU which utilization is requested
* @p: the task which utilization should be discounted
*
* The utilization of a CPU is defined by the utilization of tasks currently
* enqueued on that CPU as well as tasks which are currently sleeping after an
* execution on that CPU.
*
* This method returns the utilization of the specified CPU by discounting the
* utilization of the specified task, whenever the task is currently
* contributing to the CPU utilization.
*/
static unsigned long cpu_util_without(int cpu, struct task_struct *p)
{
struct cfs_rq *cfs_rq;
unsigned int util;
#ifdef CONFIG_SCHED_WALT
/*
* WALT does not decay idle tasks in the same manner
* as PELT, so it makes little sense to subtract task
* utilization from cpu utilization. Instead just use
* cpu_util for this case.
*/
if (likely(!walt_disabled && sysctl_sched_use_walt_cpu_util) &&
p->state == TASK_WAKING)
return cpu_util(cpu);
#endif
/* Task has no contribution or is new */
if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
return cpu_util(cpu);
#ifdef CONFIG_SCHED_WALT
util = max_t(long, cpu_util(cpu) - task_util(p), 0);
#else
cfs_rq = &cpu_rq(cpu)->cfs;
util = READ_ONCE(cfs_rq->avg.util_avg);
/* Discount task's util from CPU's util */
lsub_positive(&util, task_util(p));
/*
* Covered cases:
*
* a) if *p is the only task sleeping on this CPU, then:
* cpu_util (== task_util) > util_est (== 0)
* and thus we return:
* cpu_util_without = (cpu_util - task_util) = 0
*
* b) if other tasks are SLEEPING on this CPU, which is now exiting
* IDLE, then:
* cpu_util >= task_util
* cpu_util > util_est (== 0)
* and thus we discount *p's blocked utilization to return:
* cpu_util_without = (cpu_util - task_util) >= 0
*
* c) if other tasks are RUNNABLE on that CPU and
* util_est > cpu_util
* then we use util_est since it returns a more restrictive
* estimation of the spare capacity on that CPU, by just
* considering the expected utilization of tasks already
* runnable on that CPU.
*
* Cases a) and b) are covered by the above code, while case c) is
* covered by the following code when estimated utilization is
* enabled.
*/
if (sched_feat(UTIL_EST)) {
unsigned int estimated =
READ_ONCE(cfs_rq->avg.util_est.enqueued);
/*
* Despite the following checks we still have a small window
* for a possible race, when an execl's select_task_rq_fair()
* races with LB's detach_task():
*
* detach_task()
* p->on_rq = TASK_ON_RQ_MIGRATING;
* ---------------------------------- A
* deactivate_task() \
* dequeue_task() + RaceTime
* util_est_dequeue() /
* ---------------------------------- B
*
* The additional check on "current == p" it's required to
* properly fix the execl regression and it helps in further
* reducing the chances for the above race.
*/
if (unlikely(task_on_rq_queued(p) || current == p))
lsub_positive(&estimated, _task_util_est(p));
util = max(util, estimated);
}
#endif
/*
* Utilization (estimated) can exceed the CPU capacity, thus let's
* clamp to the maximum CPU capacity to ensure consistency with
* the cpu_util call.
*/
return min_t(unsigned long, util, capacity_orig_of(cpu));
}
static unsigned long group_max_util(struct energy_env *eenv, int cpu_idx)
{
unsigned long max_util = 0;
unsigned long util;
int cpu;
for_each_cpu(cpu, sched_group_span(eenv->sg_cap)) {
util = cpu_util_without(cpu, eenv->p);
/*
* If we are looking at the target CPU specified by the eenv,
* then we should add the (estimated) utilization of the task
* assuming we will wake it up on that CPU.
*/
if (unlikely(cpu == eenv->cpu[cpu_idx].cpu_id))
util += eenv->util_delta_boosted;
max_util = max(max_util, util);
}
return max_util;
}
/*
* group_norm_util() returns the approximated group util relative to it's
* current capacity (busy ratio) in the range [0..SCHED_CAPACITY_SCALE] for use
* in energy calculations. Since task executions may or may not overlap in time
* in the group the true normalized util is between max(cpu_norm_util(i)) and
* sum(cpu_norm_util(i)) when iterating over all cpus in the group, i. The
* latter is used as the estimate as it leads to a more pessimistic energy
* estimate (more busy).
*/
static unsigned
long group_norm_util(struct energy_env *eenv, int cpu_idx)
{
unsigned long capacity = eenv->cpu[cpu_idx].cap;
unsigned long util, util_sum = 0;
int cpu;
for_each_cpu(cpu, sched_group_span(eenv->sg)) {
util = cpu_util_without(cpu, eenv->p);
/*
* If we are looking at the target CPU specified by the eenv,
* then we should add the (estimated) utilization of the task
* assuming we will wake it up on that CPU.
*/
if (unlikely(cpu == eenv->cpu[cpu_idx].cpu_id))
util += eenv->util_delta;
util_sum += __cpu_norm_util(util, capacity);
}
if (util_sum > SCHED_CAPACITY_SCALE)
return SCHED_CAPACITY_SCALE;
return util_sum;
}
static int find_new_capacity(struct energy_env *eenv, int cpu_idx)
{
const struct sched_group_energy *sge = eenv->sg_cap->sge;
unsigned long util = group_max_util(eenv, cpu_idx);
int idx, cap_idx;
cap_idx = sge->nr_cap_states - 1;
for (idx = 0; idx < sge->nr_cap_states; idx++) {
if (sge->cap_states[idx].cap >= util) {
cap_idx = idx;
break;
}
}
/* Keep track of SG's capacity */
eenv->cpu[cpu_idx].cap = sge->cap_states[cap_idx].cap;
eenv->cpu[cpu_idx].cap_idx = cap_idx;
return cap_idx;
}
static int group_idle_state(struct energy_env *eenv, int cpu_idx)
{
struct sched_group *sg = eenv->sg;
int src_in_grp, dst_in_grp;
int i, state = INT_MAX;
int max_idle_state_idx;
long grp_util = 0;
int new_state;
/* Find the shallowest idle state in the sched group. */
for_each_cpu(i, sched_group_span(sg))
state = min(state, idle_get_state_idx(cpu_rq(i)));
if (unlikely(state == INT_MAX))
return -EINVAL;
/* Take non-cpuidle idling into account (active idle/arch_cpu_idle()) */
state++;
/*
* Try to estimate if a deeper idle state is
* achievable when we move the task.
*/
for_each_cpu(i, sched_group_span(sg))
grp_util += cpu_util(i);
src_in_grp = cpumask_test_cpu(eenv->cpu[EAS_CPU_PRV].cpu_id,
sched_group_span(sg));
dst_in_grp = cpumask_test_cpu(eenv->cpu[cpu_idx].cpu_id,
sched_group_span(sg));
if (src_in_grp == dst_in_grp) {
/*
* both CPUs under consideration are in the same group or not in
* either group, migration should leave idle state the same.
*/
return state;
}
/*
* add or remove util as appropriate to indicate what group util
* will be (worst case - no concurrent execution) after moving the task
*/
grp_util += src_in_grp ? -eenv->util_delta : eenv->util_delta;
if (grp_util >
((long)sg->sgc->max_capacity * (int)sg->group_weight)) {
/*
* After moving, the group will be fully occupied
* so assume it will not be idle at all.
*/
return 0;
}
/*
* after moving, this group is at most partly
* occupied, so it should have some idle time.
*/
max_idle_state_idx = sg->sge->nr_idle_states - 2;
new_state = grp_util * max_idle_state_idx;
if (grp_util <= 0) {
/* group will have no util, use lowest state */
new_state = max_idle_state_idx + 1;
} else {
/*
* for partially idle, linearly map util to idle
* states, excluding the lowest one. This does not
* correspond to the state we expect to enter in
* reality, but an indication of what might happen.
*/
new_state = min_t(int, max_idle_state_idx,
new_state / sg->sgc->max_capacity);
new_state = max_idle_state_idx - new_state;
}
return new_state;
}
#ifdef DEBUG_EENV_DECISIONS
static struct _eenv_debug *eenv_debug_entry_ptr(struct _eenv_debug *base, int idx);
static void store_energy_calc_debug_info(struct energy_env *eenv, int cpu_idx, int cap_idx, int idle_idx)
{
int debug_idx = eenv->cpu[cpu_idx].debug_idx;
unsigned long sg_util, busy_energy, idle_energy;
const struct sched_group_energy *sge;
struct _eenv_debug *dbg;
int cpu;
if (debug_idx < EAS_EENV_DEBUG_LEVELS) {
sge = eenv->sg->sge;
sg_util = group_norm_util(eenv, cpu_idx);
busy_energy = sge->cap_states[cap_idx].power;
busy_energy *= sg_util;
idle_energy = SCHED_CAPACITY_SCALE - sg_util;
idle_energy *= sge->idle_states[idle_idx].power;
/* should we use sg_cap or sg? */
dbg = eenv_debug_entry_ptr(eenv->cpu[cpu_idx].debug, debug_idx);
dbg->cap = sge->cap_states[cap_idx].cap;
dbg->norm_util = sg_util;
dbg->cap_energy = sge->cap_states[cap_idx].power;
dbg->idle_energy = sge->idle_states[idle_idx].power;
dbg->this_energy = busy_energy + idle_energy;
dbg->this_busy_energy = busy_energy;
dbg->this_idle_energy = idle_energy;
cpumask_copy(&dbg->group_cpumask,
sched_group_span(eenv->sg));
for_each_cpu(cpu, &dbg->group_cpumask)
dbg->cpu_util[cpu] = cpu_util(cpu);
eenv->cpu[cpu_idx].debug_idx = debug_idx+1;
}
}
#else
#define store_energy_calc_debug_info(a,b,c,d) {}
#endif /* DEBUG_EENV_DECISIONS */
/*
* calc_sg_energy: compute energy for the eenv's SG (i.e. eenv->sg).
*
* This works in iterations to compute the SG's energy for each CPU
* candidate defined by the energy_env's cpu array.
*/
static int calc_sg_energy(struct energy_env *eenv)
{
struct sched_group *sg = eenv->sg;
unsigned long busy_energy, idle_energy;
unsigned int busy_power, idle_power;
unsigned long total_energy = 0;
unsigned long sg_util;
int cap_idx, idle_idx;
int cpu_idx;
for (cpu_idx = EAS_CPU_PRV; cpu_idx < eenv->max_cpu_count; ++cpu_idx) {
if (eenv->cpu[cpu_idx].cpu_id == -1)
continue;
/* Compute ACTIVE energy */
cap_idx = find_new_capacity(eenv, cpu_idx);
busy_power = sg->sge->cap_states[cap_idx].power;
sg_util = group_norm_util(eenv, cpu_idx);
busy_energy = sg_util * busy_power;
/* Compute IDLE energy */
idle_idx = group_idle_state(eenv, cpu_idx);
if (unlikely(idle_idx < 0))
return idle_idx;
if (idle_idx > sg->sge->nr_idle_states - 1)
idle_idx = sg->sge->nr_idle_states - 1;
idle_power = sg->sge->idle_states[idle_idx].power;
idle_energy = SCHED_CAPACITY_SCALE - sg_util;
idle_energy *= idle_power;
total_energy = busy_energy + idle_energy;
eenv->cpu[cpu_idx].energy += total_energy;
store_energy_calc_debug_info(eenv, cpu_idx, cap_idx, idle_idx);
}
return 0;
}
/*
* compute_energy() computes the absolute variation in energy consumption by
* moving eenv.util_delta from EAS_CPU_PRV to EAS_CPU_NXT.
*
* NOTE: compute_energy() may fail when racing with sched_domain updates, in
* which case we abort by returning -EINVAL.
*/
static int compute_energy(struct energy_env *eenv)
{
struct sched_domain *sd;
int cpu;
struct cpumask visit_cpus;
struct sched_group *sg;
int cpu_count;
WARN_ON(!eenv->sg_top->sge);
cpumask_copy(&visit_cpus, sched_group_span(eenv->sg_top));
/* If a cpu is hotplugged in while we are in this function, it does
* not appear in the existing visit_cpus mask which came from the
* sched_group pointer of the sched_domain pointed at by sd_ea for
* either the prev or next cpu and was dereferenced in
* select_energy_cpu_idx.
* Since we will dereference sd_scs later as we iterate through the
* CPUs we expect to visit, new CPUs can be present which are not in
* the visit_cpus mask. Guard this with cpu_count.
*/
cpu_count = cpumask_weight(&visit_cpus);
while (!cpumask_empty(&visit_cpus)) {
struct sched_group *sg_shared_cap = NULL;
cpu = cpumask_first(&visit_cpus);
/*
* Is the group utilization affected by cpus outside this
* sched_group?
* This sd may have groups with cpus which were not present
* when we took visit_cpus.
*/
sd = rcu_dereference(per_cpu(sd_scs, cpu));
if (sd) {
if (sd->parent)
sg_shared_cap = sd->parent->groups;
else /* single cluster system */
sg_shared_cap = sd->groups;
}
for_each_domain(cpu, sd) {
sg = sd->groups;
/* Has this sched_domain already been visited? */
if (sd->child && group_first_cpu(sg) != cpu)
break;
do {
eenv->sg_cap = sg;
if (sg_shared_cap && sg_shared_cap->group_weight >= sg->group_weight)
eenv->sg_cap = sg_shared_cap;
/*
* Compute the energy for all the candidate
* CPUs in the current visited SG.
*/
eenv->sg = sg;
if (calc_sg_energy(eenv))
return -EINVAL;
/* remove CPUs we have just visited */
if (!sd->child) {
/*
* cpu_count here is the number of
* cpus we expect to visit in this
* calculation. If we race against
* hotplug, we can have extra cpus
* added to the groups we are
* iterating which do not appear in
* the visit_cpus mask. In that case
* we are not able to calculate energy
* without restarting so we will bail
* out and use prev_cpu this time.
*/
if (!cpu_count)
return -EINVAL;
cpumask_xor(&visit_cpus, &visit_cpus, sched_group_span(sg));
cpu_count--;
}
if (cpumask_equal(sched_group_span(sg), sched_group_span(eenv->sg_top)) &&
sd->child)
goto next_cpu;
} while (sg = sg->next, sg != sd->groups);
}
next_cpu:
cpumask_clear_cpu(cpu, &visit_cpus);
continue;
}
return 0;
}
static inline bool cpu_in_sg(struct sched_group *sg, int cpu)
{
return cpu != -1 && cpumask_test_cpu(cpu, sched_group_span(sg));
}
#ifdef DEBUG_EENV_DECISIONS
static void dump_eenv_debug(struct energy_env *eenv)
{
int cpu_idx, grp_idx;
char cpu_utils[(NR_CPUS*12)+10]="cpu_util: ";
char cpulist[64];
trace_printk("eenv scenario: task=%p %s task_util=%lu prev_cpu=%d",
eenv->p, eenv->p->comm, eenv->util_delta, eenv->cpu[EAS_CPU_PRV].cpu_id);
for (cpu_idx=EAS_CPU_PRV; cpu_idx < eenv->max_cpu_count; cpu_idx++) {
if (eenv->cpu[cpu_idx].cpu_id == -1)
continue;
trace_printk("---Scenario %d: Place task on cpu %d energy=%lu (%d debug logs at %p)",
cpu_idx+1, eenv->cpu[cpu_idx].cpu_id,
eenv->cpu[cpu_idx].energy >> SCHED_CAPACITY_SHIFT,
eenv->cpu[cpu_idx].debug_idx,
eenv->cpu[cpu_idx].debug);
for (grp_idx = 0; grp_idx < eenv->cpu[cpu_idx].debug_idx; grp_idx++) {
struct _eenv_debug *debug;
int cpu, written=0;
debug = eenv_debug_entry_ptr(eenv->cpu[cpu_idx].debug, grp_idx);
cpu = scnprintf(cpulist, sizeof(cpulist), "%*pbl", cpumask_pr_args(&debug->group_cpumask));
cpu_utils[0] = 0;
/* print out the relevant cpu_util */
for_each_cpu(cpu, &(debug->group_cpumask)) {
char tmp[64];
if (written > sizeof(cpu_utils)-10) {
cpu_utils[written]=0;
break;
}
written += snprintf(tmp, sizeof(tmp), "cpu%d(%lu) ", cpu, debug->cpu_util[cpu]);
strcat(cpu_utils, tmp);
}
/* trace the data */
trace_printk(" | %s : cap=%lu nutil=%lu, cap_nrg=%lu, idle_nrg=%lu energy=%lu busy_energy=%lu idle_energy=%lu %s",
cpulist, debug->cap, debug->norm_util,
debug->cap_energy, debug->idle_energy,
debug->this_energy >> SCHED_CAPACITY_SHIFT,
debug->this_busy_energy >> SCHED_CAPACITY_SHIFT,
debug->this_idle_energy >> SCHED_CAPACITY_SHIFT,
cpu_utils);
}
trace_printk("---");
}
trace_printk("----- done");
return;
}
#else
#define dump_eenv_debug(a) {}
#endif /* DEBUG_EENV_DECISIONS */
/*
* select_energy_cpu_idx(): estimate the energy impact of changing the
* utilization distribution.
*
* The eenv parameter specifies the changes: utilization amount and a
* collection of possible CPU candidates. The number of candidates
* depends upon the selection algorithm used.
*
* If find_best_target was used to select candidate CPUs, there will
* be at most 3 including prev_cpu. If not, we used a brute force
* selection which will provide the union of:
* * CPUs belonging to the highest sd which is not overutilized
* * CPUs the task is allowed to run on
* * online CPUs
*
* This function returns the index of a CPU candidate specified by the
* energy_env which corresponds to the most energy efficient CPU.
* Thus, 0 (EAS_CPU_PRV) means that non of the CPU candidate is more energy
* efficient than running on prev_cpu. This is also the value returned in case
* of abort due to error conditions during the computations. The only
* exception to this if we fail to access the energy model via sd_ea, where
* we return -1 with the intent of asking the system to use a different
* wakeup placement algorithm.
*
* A value greater than zero means that the most energy efficient CPU is the
* one represented by eenv->cpu[eenv->next_idx].cpu_id.
*/
static inline int select_energy_cpu_idx(struct energy_env *eenv)
{
int last_cpu_idx = eenv->max_cpu_count - 1;
struct sched_domain *sd;
struct sched_group *sg;
int sd_cpu = -1;
int cpu_idx;
int margin;
sd_cpu = eenv->cpu[EAS_CPU_PRV].cpu_id;
sd = rcu_dereference(per_cpu(sd_ea, sd_cpu));
if (!sd)
return -1;
cpumask_clear(&eenv->cpus_mask);
for (cpu_idx = EAS_CPU_PRV; cpu_idx < eenv->max_cpu_count; ++cpu_idx) {
int cpu = eenv->cpu[cpu_idx].cpu_id;
if (cpu < 0)
continue;
cpumask_set_cpu(cpu, &eenv->cpus_mask);
}
sg = sd->groups;
do {
/* Skip SGs which do not contains a candidate CPU */
if (!cpumask_intersects(&eenv->cpus_mask, sched_group_span(sg)))
continue;
eenv->sg_top = sg;
if (compute_energy(eenv) == -EINVAL)
return EAS_CPU_PRV;
} while (sg = sg->next, sg != sd->groups);
/* remember - eenv energy values are unscaled */
/*
* Compute the dead-zone margin used to prevent too many task
* migrations with negligible energy savings.
* An energy saving is considered meaningful if it reduces the energy
* consumption of EAS_CPU_PRV CPU candidate by at least ~1.56%
*/
margin = eenv->cpu[EAS_CPU_PRV].energy >> 6;
/*
* By default the EAS_CPU_PRV CPU is considered the most energy
* efficient, with a 0 energy variation.
*/
eenv->next_idx = EAS_CPU_PRV;
eenv->cpu[EAS_CPU_PRV].nrg_delta = 0;
dump_eenv_debug(eenv);
trace_sched_energy_diff(eenv->p, eenv->cpu[EAS_CPU_PRV].cpu_id,
eenv->cpu[EAS_CPU_PRV].energy,
eenv->cpu[EAS_CPU_NXT].cpu_id,
eenv->cpu[EAS_CPU_NXT].energy,
eenv->cpu[EAS_CPU_BKP].cpu_id,
eenv->cpu[EAS_CPU_BKP].energy);
/*
* Compare the other CPU candidates to find a CPU which can be
* more energy efficient then EAS_CPU_PRV
*/
if (sched_feat(FBT_STRICT_ORDER))
last_cpu_idx = EAS_CPU_BKP;
for(cpu_idx = EAS_CPU_NXT; cpu_idx <= last_cpu_idx; cpu_idx++) {
if (eenv->cpu[cpu_idx].cpu_id < 0)
continue;
eenv->cpu[cpu_idx].nrg_delta =
eenv->cpu[cpu_idx].energy -
eenv->cpu[EAS_CPU_PRV].energy;
/* filter energy variations within the dead-zone margin */
if (abs(eenv->cpu[cpu_idx].nrg_delta) < margin)
eenv->cpu[cpu_idx].nrg_delta = 0;
/* update the schedule candidate with min(nrg_delta) */
if (eenv->cpu[cpu_idx].nrg_delta <
eenv->cpu[eenv->next_idx].nrg_delta) {
eenv->next_idx = cpu_idx;
/* break out if we want to stop on first saving candidate */
if (sched_feat(FBT_STRICT_ORDER))
break;
}
}
return eenv->next_idx;
}
/*
* Detect M:N waker/wakee relationships via a switching-frequency heuristic.
*
* A waker of many should wake a different task than the one last awakened
* at a frequency roughly N times higher than one of its wakees.
*
* In order to determine whether we should let the load spread vs consolidating
* to shared cache, we look for a minimum 'flip' frequency of llc_size in one
* partner, and a factor of lls_size higher frequency in the other.
*
* With both conditions met, we can be relatively sure that the relationship is
* non-monogamous, with partner count exceeding socket size.
*
* Waker/wakee being client/server, worker/dispatcher, interrupt source or
* whatever is irrelevant, spread criteria is apparent partner count exceeds
* socket size.
*/
__maybe_unused static int wake_wide(struct task_struct *p)
{
unsigned int master = current->wakee_flips;
unsigned int slave = p->wakee_flips;
int factor = this_cpu_read(sd_llc_size);
if (master < slave)
swap(master, slave);
if (slave < factor || master < slave * factor)
return 0;
return 1;
}
/*
* The purpose of wake_affine() is to quickly determine on which CPU we can run
* soonest. For the purpose of speed we only consider the waking and previous
* CPU.
*
* wake_affine_idle() - only considers 'now', it check if the waking CPU is
* cache-affine and is (or will be) idle.
*
* wake_affine_weight() - considers the weight to reflect the average
* scheduling latency of the CPUs. This seems to work
* for the overloaded case.
*/
static int
wake_affine_idle(int this_cpu, int prev_cpu, int sync)
{
/*
* If this_cpu is idle, it implies the wakeup is from interrupt
* context. Only allow the move if cache is shared. Otherwise an
* interrupt intensive workload could force all tasks onto one
* node depending on the IO topology or IRQ affinity settings.
*
* If the prev_cpu is idle and cache affine then avoid a migration.
* There is no guarantee that the cache hot data from an interrupt
* is more important than cache hot data on the prev_cpu and from
* a cpufreq perspective, it's better to have higher utilisation
* on one CPU.
*/
if (available_idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu))
return available_idle_cpu(prev_cpu) ? prev_cpu : this_cpu;
if (sync && cpu_rq(this_cpu)->nr_running == 1)
return this_cpu;
return nr_cpumask_bits;
}
static int
wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
int this_cpu, int prev_cpu, int sync)
{
s64 this_eff_load, prev_eff_load;
unsigned long task_load;
this_eff_load = weighted_cpuload(cpu_rq(this_cpu));
if (sync) {
unsigned long current_load = task_h_load(current);
if (current_load > this_eff_load)
return this_cpu;
this_eff_load -= current_load;
}
task_load = task_h_load(p);
this_eff_load += task_load;
if (sched_feat(WA_BIAS))
this_eff_load *= 100;
this_eff_load *= capacity_of(prev_cpu);
prev_eff_load = weighted_cpuload(cpu_rq(prev_cpu));
prev_eff_load -= task_load;
if (sched_feat(WA_BIAS))
prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
prev_eff_load *= capacity_of(this_cpu);
/*
* If sync, adjust the weight of prev_eff_load such that if
* prev_eff == this_eff that select_idle_sibling() will consider
* stacking the wakee on top of the waker if no other CPU is
* idle.
*/
if (sync)
prev_eff_load += 1;
return this_eff_load < prev_eff_load ? this_cpu : nr_cpumask_bits;
}
static int wake_affine(struct sched_domain *sd, struct task_struct *p,
int prev_cpu, int sync)
{
int this_cpu = smp_processor_id();
int target = nr_cpumask_bits;
if (sched_feat(WA_IDLE))
target = wake_affine_idle(this_cpu, prev_cpu, sync);
if (sched_feat(WA_WEIGHT) && target == nr_cpumask_bits)
target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync);
schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts);
if (target == nr_cpumask_bits)
return prev_cpu;
schedstat_inc(sd->ttwu_move_affine);
schedstat_inc(p->se.statistics.nr_wakeups_affine);
return target;
}
#ifdef CONFIG_SCHED_TUNE
struct reciprocal_value schedtune_spc_rdiv;
static long
schedtune_margin(unsigned long signal, long boost, long capacity)
{
long long margin = 0;
/*
* Signal proportional compensation (SPC)
*
* The Boost (B) value is used to compute a Margin (M) which is
* proportional to the complement of the original Signal (S):
* M = B * (capacity - S)
* The obtained M could be used by the caller to "boost" S.
*/
if (boost >= 0) {
if (capacity > signal) {
margin = capacity - signal;
margin *= boost;
}
} else
margin = -signal * boost;
margin = reciprocal_divide(margin, schedtune_spc_rdiv);
if (boost < 0)
margin *= -1;
return margin;
}
inline long
schedtune_cpu_margin_with(unsigned long util, int cpu, struct task_struct *p)
{
int boost = schedtune_cpu_boost_with(cpu, p);
long margin;
if (boost == 0)
margin = 0;
else
margin = schedtune_margin(util, boost);
trace_sched_boost_cpu(cpu, util, margin);
return margin;
}
long schedtune_task_margin(struct task_struct *task)
{
int boost = schedtune_task_boost(task);
unsigned long util;
long margin;
if (boost == 0)
return 0;
util = task_util_est(task);
margin = schedtune_margin(util, boost, SCHED_CAPACITY_SCALE);
return margin;
}
#else /* CONFIG_SCHED_TUNE */
inline long
schedtune_cpu_margin_with(unsigned long util, int cpu, struct task_struct *p)
{
return 0;
}
#endif /* CONFIG_SCHED_TUNE */
static unsigned long cpu_util_without(int cpu, struct task_struct *p);
static unsigned long capacity_spare_without(int cpu, struct task_struct *p)
{
return max_t(long, capacity_of(cpu) - cpu_util_without(cpu, p), 0);
}
/*
* find_idlest_group finds and returns the least busy CPU group within the
* domain.
*
* Assumes p is allowed on at least one CPU in sd.
*/
static struct sched_group *
find_idlest_group(struct sched_domain *sd, struct task_struct *p,
int this_cpu, int sd_flag)
{
struct sched_group *idlest = NULL, *group = sd->groups;
struct sched_group *most_spare_sg = NULL;
unsigned long min_runnable_load = ULONG_MAX;
unsigned long this_runnable_load = ULONG_MAX;
unsigned long min_avg_load = ULONG_MAX, this_avg_load = ULONG_MAX;
unsigned long most_spare = 0, this_spare = 0;
int imbalance_scale = 100 + (sd->imbalance_pct-100)/2;
unsigned long imbalance = scale_load_down(NICE_0_LOAD) *
(sd->imbalance_pct-100) / 100;
do {
unsigned long load, avg_load, runnable_load;
unsigned long spare_cap, max_spare_cap;
int local_group;
int i;
/* Skip over this group if it has no CPUs allowed */
if (!cpumask_intersects(sched_group_span(group), p->cpus_ptr))
continue;
local_group = cpumask_test_cpu(this_cpu,
sched_group_span(group));
/*
* Tally up the load of all CPUs in the group and find
* the group containing the CPU with most spare capacity.
*/
avg_load = 0;
runnable_load = 0;
max_spare_cap = 0;
for_each_cpu(i, sched_group_span(group)) {
load = weighted_cpuload(cpu_rq(i));
runnable_load += load;
avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs);
spare_cap = capacity_spare_without(i, p);
if (spare_cap > max_spare_cap)
max_spare_cap = spare_cap;
}
/* Adjust by relative CPU capacity of the group */
avg_load = (avg_load * SCHED_CAPACITY_SCALE) /
group->sgc->capacity;
runnable_load = (runnable_load * SCHED_CAPACITY_SCALE) /
group->sgc->capacity;
if (local_group) {
this_runnable_load = runnable_load;
this_avg_load = avg_load;
this_spare = max_spare_cap;
} else {
if (min_runnable_load > (runnable_load + imbalance)) {
/*
* The runnable load is significantly smaller
* so we can pick this new cpu
*/
min_runnable_load = runnable_load;
min_avg_load = avg_load;
idlest = group;
} else if ((runnable_load < (min_runnable_load + imbalance)) &&
(100*min_avg_load > imbalance_scale*avg_load)) {
/*
* The runnable loads are close so take the
* blocked load into account through avg_load.
*/
min_avg_load = avg_load;
idlest = group;
}
if (most_spare < max_spare_cap) {
most_spare = max_spare_cap;
most_spare_sg = group;
}
}
} while (group = group->next, group != sd->groups);
/*
* The cross-over point between using spare capacity or least load
* is too conservative for high utilization tasks on partially
* utilized systems if we require spare_capacity > task_util(p),
* so we allow for some task stuffing by using
* spare_capacity > task_util(p)/2.
*
* Spare capacity can't be used for fork because the utilization has
* not been set yet, we must first select a rq to compute the initial
* utilization.
*/
if (sd_flag & SD_BALANCE_FORK)
goto skip_spare;
if (this_spare > task_util(p) / 2 &&
imbalance_scale*this_spare > 100*most_spare)
return NULL;
if (most_spare > task_util(p) / 2)
return most_spare_sg;
skip_spare:
if (!idlest)
return NULL;
if (min_runnable_load > (this_runnable_load + imbalance))
return NULL;
if ((this_runnable_load < (min_runnable_load + imbalance)) &&
(100*this_avg_load < imbalance_scale*min_avg_load))
return NULL;
return idlest;
}
/*
* find_idlest_group_cpu - find the idlest cpu among the cpus in group.
*/
static int
find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
{
unsigned long load, min_load = ULONG_MAX;
unsigned int min_exit_latency = UINT_MAX;
u64 latest_idle_timestamp = 0;
int least_loaded_cpu = this_cpu;
int shallowest_idle_cpu = -1, si_cpu = -1;
int i;
/* Check if we have any choice: */
if (group->group_weight == 1)
return cpumask_first(sched_group_span(group));
/* Traverse only the allowed CPUs */
for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) {
if (available_idle_cpu(i)) {
struct rq *rq = cpu_rq(i);
struct cpuidle_state *idle = idle_get_state(rq);
if (idle && idle->exit_latency < min_exit_latency) {
/*
* We give priority to a CPU whose idle state
* has the smallest exit latency irrespective
* of any idle timestamp.
*/
min_exit_latency = idle->exit_latency;
latest_idle_timestamp = rq->idle_stamp;
shallowest_idle_cpu = i;
} else if ((!idle || idle->exit_latency == min_exit_latency) &&
rq->idle_stamp > latest_idle_timestamp) {
/*
* If equal or no active idle state, then
* the most recently idled CPU might have
* a warmer cache.
*/
latest_idle_timestamp = rq->idle_stamp;
shallowest_idle_cpu = i;
}
} else if (shallowest_idle_cpu == -1 && si_cpu == -1) {
if (sched_idle_cpu(i)) {
si_cpu = i;
continue;
}
load = weighted_cpuload(cpu_rq(i));
if (load < min_load) {
min_load = load;
least_loaded_cpu = i;
}
}
}
if (shallowest_idle_cpu != -1)
return shallowest_idle_cpu;
if (si_cpu != -1)
return si_cpu;
return least_loaded_cpu;
}
static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p,
int cpu, int prev_cpu, int sd_flag)
{
int new_cpu = cpu;
if (!cpumask_intersects(sched_domain_span(sd), p->cpus_ptr))
return prev_cpu;
/*
* We need task's util for capacity_spare_wake, sync it up to prev_cpu's
* last_update_time.
*/
if (sd && !(sd_flag & SD_BALANCE_FORK))
sync_entity_load_avg(&p->se);
while (sd) {
struct sched_group *group;
struct sched_domain *tmp;
int weight;
if (!(sd->flags & sd_flag)) {
sd = sd->child;
continue;
}
group = find_idlest_group(sd, p, cpu, sd_flag);
if (!group) {
sd = sd->child;
continue;
}
new_cpu = find_idlest_group_cpu(group, p, cpu);
if (new_cpu == cpu) {
/* Now try balancing at a lower domain level of cpu */
sd = sd->child;
continue;
}
/* Now try balancing at a lower domain level of new_cpu */
cpu = new_cpu;
weight = sd->span_weight;
sd = NULL;
for_each_domain(cpu, tmp) {
if (weight <= tmp->span_weight)
break;
if (tmp->flags & sd_flag)
sd = tmp;
}
/* while loop will break here if sd == NULL */
}
return new_cpu;
}
#ifdef CONFIG_SCHED_SMT
DEFINE_STATIC_KEY_FALSE(sched_smt_present);
EXPORT_SYMBOL_GPL(sched_smt_present);
static inline void set_idle_cores(int cpu, int val)
{
struct sched_domain_shared *sds;
sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
if (sds)
WRITE_ONCE(sds->has_idle_cores, val);
}
static inline bool test_idle_cores(int cpu, bool def)
{
struct sched_domain_shared *sds;
sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
if (sds)
return READ_ONCE(sds->has_idle_cores);
return def;
}
/*
* Scans the local SMT mask to see if the entire core is idle, and records this
* information in sd_llc_shared->has_idle_cores.
*
* Since SMT siblings share all cache levels, inspecting this limited remote
* state should be fairly cheap.
*/
void __update_idle_core(struct rq *rq)
{
int core = cpu_of(rq);
int cpu;
rcu_read_lock();
if (test_idle_cores(core, true))
goto unlock;
for_each_cpu(cpu, cpu_smt_mask(core)) {
if (cpu == core)
continue;
if (!available_idle_cpu(cpu))
goto unlock;
}
set_idle_cores(core, 1);
unlock:
rcu_read_unlock();
}
/*
* Scan the entire LLC domain for idle cores; this dynamically switches off if
* there are no idle cores left in the system; tracked through
* sd_llc->shared->has_idle_cores and enabled through update_idle_core() above.
*/
static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
{
struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
int core, cpu;
if (!static_branch_likely(&sched_smt_present))
return -1;
if (!test_idle_cores(target, false))
return -1;
cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
for_each_cpu_wrap(core, cpus, target) {
bool idle = true;
for_each_cpu(cpu, cpu_smt_mask(core)) {
cpumask_clear_cpu(cpu, cpus);
if (!available_idle_cpu(cpu))
idle = false;
}
if (idle)
return core;
}
/*
* Failed to find an idle core; stop looking for one.
*/
set_idle_cores(target, 0);
return -1;
}
/*
* Scan the local SMT mask for idle CPUs.
*/
static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
{
int cpu, si_cpu = -1;
if (!static_branch_likely(&sched_smt_present))
return -1;
for_each_cpu(cpu, cpu_smt_mask(target)) {
if (!cpumask_test_cpu(cpu, p->cpus_ptr))
continue;
if (available_idle_cpu(cpu))
return cpu;
if (si_cpu == -1 && sched_idle_cpu(cpu))
si_cpu = cpu;
}
return si_cpu;
}
#else /* CONFIG_SCHED_SMT */
static inline int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
{
return -1;
}
static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
{
return -1;
}
#endif /* CONFIG_SCHED_SMT */
/*
* Scan the LLC domain for idle CPUs; this is dynamically regulated by
* comparing the average scan cost (tracked in sd->avg_scan_cost) against the
* average idle time for this rq (as found in rq->avg_idle).
*/
static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target)
{
struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
struct sched_domain *this_sd;
u64 avg_cost, avg_idle;
u64 time, cost;
s64 delta;
int cpu, nr = INT_MAX, si_cpu = -1;
this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
if (!this_sd)
return -1;
/*
* Due to large variance we need a large fuzz factor; hackbench in
* particularly is sensitive here.
*/
avg_idle = this_rq()->avg_idle / 512;
avg_cost = this_sd->avg_scan_cost + 1;
if (sched_feat(SIS_AVG_CPU) && avg_idle < avg_cost)
return -1;
if (sched_feat(SIS_PROP)) {
u64 span_avg = sd->span_weight * avg_idle;
if (span_avg > 4*avg_cost)
nr = div_u64(span_avg, avg_cost);
else
nr = 4;
}
time = local_clock();
cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
for_each_cpu_wrap(cpu, cpus, target) {
if (!--nr)
return si_cpu;
if (cpu_isolated(cpu))
continue;
if (available_idle_cpu(cpu))
break;
if (si_cpu == -1 && sched_idle_cpu(cpu))
si_cpu = cpu;
}
time = local_clock() - time;
cost = this_sd->avg_scan_cost;
delta = (s64)(time - cost) / 8;
this_sd->avg_scan_cost += delta;
return cpu;
}
/*
* Try and locate an idle core/thread in the LLC cache domain.
*/
static inline int __select_idle_sibling(struct task_struct *p, int prev, int target)
{
struct sched_domain *sd;
int i, recent_used_cpu;
if ((available_idle_cpu(target) && !cpu_isolated(target)) || sched_idle_cpu(target))
return target;
/*
* If the previous cpu is cache affine and idle, don't be stupid.
*/
if ((prev != target && cpus_share_cache(prev, target) && available_idle_cpu(prev) && !cpu_isolated(prev)) || sched_idle_cpu(prev))
return prev;
/* Check a recently used CPU as a potential idle candidate */
recent_used_cpu = p->recent_used_cpu;
if (recent_used_cpu != prev &&
recent_used_cpu != target &&
cpus_share_cache(recent_used_cpu, target) &&
available_idle_cpu(recent_used_cpu) &&
cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr)) {
/*
* Replace recent_used_cpu with prev as it is a potential
* candidate for the next wake.
*/
p->recent_used_cpu = prev;
return recent_used_cpu;
}
sd = rcu_dereference(per_cpu(sd_llc, target));
if (!sd)
return target;
i = select_idle_core(p, sd, target);
if ((unsigned)i < nr_cpumask_bits)
return i;
i = select_idle_cpu(p, sd, target);
if ((unsigned)i < nr_cpumask_bits)
return i;
i = select_idle_smt(p, sd, target);
if ((unsigned)i < nr_cpumask_bits)
return i;
return target;
}
static inline int select_idle_sibling_cstate_aware(struct task_struct *p, int prev, int target)
{
struct sched_domain *sd;
struct sched_group *sg;
int best_idle_cpu = -1;
int best_idle_cstate = -1;
int best_idle_capacity = INT_MAX;
int i;
/*
* Iterate the domains and find an elegible idle cpu.
*/
sd = rcu_dereference(per_cpu(sd_llc, target));
for_each_lower_domain(sd) {
sg = sd->groups;
do {
if (!cpumask_intersects(
sched_group_span(sg), p->cpus_ptr))
goto next;
for_each_cpu_and(i, p->cpus_ptr, sched_group_span(sg)) {
int idle_idx;
unsigned long new_usage;
unsigned long capacity_orig;
if (!idle_cpu(i))
goto next;
if (cpu_isolated(i))
continue;
/* figure out if the task can fit here at all */
new_usage = uclamp_task(p);
capacity_orig = capacity_orig_of(i);
if (new_usage > capacity_orig)
goto next;
/* if the task fits without changing OPP and we
* intended to use this CPU, just proceed
*/
if (i == target && new_usage <= capacity_curr_of(target)) {
return target;
}
/* otherwise select CPU with shallowest idle state
* to reduce wakeup latency.
*/
idle_idx = idle_get_state_idx(cpu_rq(i));
if (idle_idx < best_idle_cstate &&
capacity_orig <= best_idle_capacity) {
best_idle_cpu = i;
best_idle_cstate = idle_idx;
best_idle_capacity = capacity_orig;
}
}
next:
sg = sg->next;
} while (sg != sd->groups);
}
if (best_idle_cpu >= 0)
target = best_idle_cpu;
return target;
}
static int select_idle_sibling(struct task_struct *p, int prev, int target)
{
if (!sysctl_sched_cstate_aware)
return __select_idle_sibling(p, prev, target);
return select_idle_sibling_cstate_aware(p, prev, target);
}
static inline bool task_fits_capacity(struct task_struct *p,
long capacity)
{
return fits_capacity(task_util_est(p), capacity);
}
static inline bool task_fits_max(struct task_struct *p, int cpu)
{
unsigned long capacity = capacity_orig_of(cpu);
unsigned long max_capacity = cpu_rq(cpu)->rd->max_cpu_capacity.val;
if (capacity == max_capacity)
return true;
if ((task_boost_policy(p) == SCHED_BOOST_ON_BIG ||
uclamp_boosted(p) > 0) &&
is_min_capacity_cpu(cpu))
return false;
return task_fits_capacity(p, capacity);
}
struct find_best_target_env {
struct cpumask *rtg_target;
int placement_boost;
bool need_idle;
int fastpath;
};
static bool is_packing_eligible(struct task_struct *p, int target_cpu,
struct find_best_target_env *fbt_env,
unsigned int target_cpus_count,
int best_idle_cstate, bool boosted)
{
unsigned long tutil, estimated_capacity;
if (task_placement_boost_enabled(p) || fbt_env->need_idle || boosted)
return false;
if (best_idle_cstate == -1)
return false;
if (target_cpus_count != 1)
return true;
if (task_in_cum_window_demand(cpu_rq(target_cpu), p))
tutil = 0;
else
tutil = task_util(p);
estimated_capacity = cpu_util_cum(target_cpu, tutil);
estimated_capacity = add_capacity_margin(estimated_capacity,
target_cpu);
/*
* If there is only one active CPU and it is already above its current
* capacity, avoid placing additional task on the CPU.
*/
return (estimated_capacity <= capacity_curr_of(target_cpu));
}
static int start_cpu(struct task_struct *p, bool boosted,
bool sync_boost, struct cpumask *rtg_target)
{
struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
int start_cpu = -1;
if (boosted) {
if (rd->mid_cap_orig_cpu != -1 &&
task_fits_max(p, rd->mid_cap_orig_cpu))
return rd->mid_cap_orig_cpu;
return rd->max_cap_orig_cpu;
}
if (sync_boost && rd->mid_cap_orig_cpu != -1)
return rd->mid_cap_orig_cpu;
/* A task always fits on its rtg_target */
if (rtg_target) {
int rtg_target_cpu = cpumask_first_and(rtg_target,
cpu_online_mask);
if (rtg_target_cpu < nr_cpu_ids)
return rtg_target_cpu;
}
/* Where the task should land based on its demand */
if (rd->min_cap_orig_cpu != -1
&& task_fits_max(p, rd->min_cap_orig_cpu))
start_cpu = rd->min_cap_orig_cpu;
else if (rd->mid_cap_orig_cpu != -1
&& task_fits_max(p, rd->mid_cap_orig_cpu))
start_cpu = rd->mid_cap_orig_cpu;
else
start_cpu = rd->max_cap_orig_cpu;
return start_cpu;
}
enum fastpaths {
NONE = 0,
SYNC_WAKEUP,
PREV_CPU_FASTPATH,
};
static inline int find_best_target(struct task_struct *p, int *backup_cpu,
bool boosted, bool sync_boost,
bool prefer_idle,
struct find_best_target_env *fbt_env)
{
unsigned long min_util = uclamp_task(p);
unsigned long target_capacity = ULONG_MAX;
unsigned long min_wake_util = ULONG_MAX;
unsigned long target_max_spare_cap = 0;
unsigned long target_util = ULONG_MAX;
unsigned long best_active_util = ULONG_MAX;
unsigned long best_active_cuml_util = ULONG_MAX;
unsigned long best_idle_cuml_util = ULONG_MAX;
unsigned long best_idle_util = ULONG_MAX;
int best_idle_cstate = INT_MAX;
struct sched_domain *sd;
struct sched_group *sg;
int best_active_cpu = -1;
int best_idle_cpu = -1;
int target_cpu = -1;
int cpu, i;
long spare_wake_cap, most_spare_wake_cap = 0;
int most_spare_cap_cpu = -1;
unsigned int active_cpus_count = 0;
int prev_cpu = task_cpu(p);
bool next_group_higher_cap = false;
int isolated_candidate = -1;
int mid_cap_orig_cpu = cpu_rq(smp_processor_id())->rd->mid_cap_orig_cpu;
struct task_struct *curr_tsk;
*backup_cpu = -1;
/*
* In most cases, target_capacity tracks capacity_orig of the most
* energy efficient CPU candidate, thus requiring to minimise
* target_capacity. For these cases target_capacity is already
* initialized to ULONG_MAX.
* However, for prefer_idle and boosted tasks we look for a high
* performance CPU, thus requiring to maximise target_capacity. In this
* case we initialise target_capacity to 0.
*/
if (prefer_idle && boosted)
target_capacity = 0;
/* Find start CPU based on boost value */
cpu = start_cpu(p, boosted, sync_boost, fbt_env->rtg_target);
if (cpu < 0)
return -1;
/* Find SD for the start CPU */
sd = rcu_dereference(per_cpu(sd_ea, cpu));
if (!sd)
return -1;
/* fast path for prev_cpu */
if ((capacity_orig_of(prev_cpu) == capacity_orig_of(cpu)) &&
!cpu_isolated(prev_cpu) && cpu_online(prev_cpu) &&
idle_cpu(prev_cpu)) {
if (idle_get_state_idx(cpu_rq(prev_cpu)) <= 1) {
/*
* Since target_cpu and backup_cpu are both -1s the
* caller will choose prev_cpu and importantly skip
* energy evaluation
*/
target_cpu = -1;
fbt_env->fastpath = PREV_CPU_FASTPATH;
trace_sched_find_best_target(p, prefer_idle, min_util,
cpu, -1, -1, -1, target_cpu, -1);
goto out;
}
}
/* Scan CPUs in all SDs */
sg = sd->groups;
do {
for_each_cpu_and(i, p->cpus_ptr, sched_group_span(sg)) {
unsigned long capacity_curr = capacity_curr_of(i);
unsigned long capacity_orig = capacity_orig_of(i);
unsigned long wake_util, new_util, new_util_cuml;
long spare_cap;
int idle_idx = INT_MAX;
trace_sched_cpu_util(i);
if (!cpu_online(i) || cpu_isolated(i))
continue;
if (isolated_candidate == -1)
isolated_candidate = i;
/*
* This CPU is the target of an active migration that's
* yet to complete. Avoid placing another task on it.
* See check_for_migration()
*/
if (is_reserved(i))
continue;
if (sched_cpu_high_irqload(i))
continue;
/*
* p's blocked utilization is still accounted for on prev_cpu
* so prev_cpu will receive a negative bias due to the double
* accounting. However, the blocked utilization may be zero.
*/
wake_util = cpu_util_without(i, p);
new_util = wake_util + uclamp_task_util(p);
spare_wake_cap = capacity_orig_of(i) - wake_util;
if (spare_wake_cap > most_spare_wake_cap) {
most_spare_wake_cap = spare_wake_cap;
most_spare_cap_cpu = i;
}
/*
* Cumulative demand may already be accounting for the
* task. If so, add just the boost-utilization to
* the cumulative demand of the cpu.
*/
if (task_in_cum_window_demand(cpu_rq(i), p))
new_util_cuml = cpu_util_cum(i, 0) +
min_util - task_util(p);
else
new_util_cuml = cpu_util_cum(i, 0) + min_util;
/*
* Ensure minimum capacity to grant the required boost.
* The target CPU can be already at a capacity level higher
* than the one required to boost the task.
* However, if the task prefers idle cpu and that
* cpu is idle, skip this check.
*/
new_util = max(min_util, new_util);
if (!(prefer_idle && idle_cpu(i))
&& new_util > capacity_orig)
continue;
/*
* Pre-compute the maximum possible capacity we expect
* to have available on this CPU once the task is
* enqueued here.
*/
spare_cap = capacity_orig - new_util;
if (idle_cpu(i))
idle_idx = idle_get_state_idx(cpu_rq(i));
/*
* Case A) Latency sensitive tasks
*
* Unconditionally favoring tasks that prefer idle CPU to
* improve latency.
*
* Looking for:
* - an idle CPU, whatever its idle_state is, since
* the first CPUs we explore are more likely to be
* reserved for latency sensitive tasks.
* - a non idle CPU where the task fits in its current
* capacity and has the maximum spare capacity.
* - a non idle CPU with lower contention from other
* tasks and running at the lowest possible OPP.
*
* The last two goals tries to favor a non idle CPU
* where the task can run as if it is "almost alone".
* A maximum spare capacity CPU is favoured since
* the task already fits into that CPU's capacity
* without waiting for an OPP chance.
*
* The following code path is the only one in the CPUs
* exploration loop which is always used by
* prefer_idle tasks. It exits the loop with wither a
* best_active_cpu or a target_cpu which should
* represent an optimal choice for latency sensitive
* tasks.
*/
if (prefer_idle) {
/*
* Case A.1: IDLE CPU
* Return the best IDLE CPU we find:
* - for boosted tasks: if the task fits in mid
* cluster, prefer the first mid cluster cpu
* due to cpuset design, then other mid cluster
* cpus. Otherwise, choose max cluster cpu.
* - for !boosted tasks: the most energy
* efficient CPU (i.e. smallest capacity_orig)
*/
if (boosted && mid_cap_orig_cpu != -1 &&
best_idle_cpu == mid_cap_orig_cpu)
break;
if (idle_cpu(i)) {
if (boosted &&
capacity_orig < target_capacity)
continue;
if (!boosted &&
capacity_orig > target_capacity)
continue;
if (capacity_orig == target_capacity &&
sysctl_sched_cstate_aware) {
if (best_idle_cstate < idle_idx)
continue;
/*
* If idle state of cpu is the
* same, select least utilized.
*/
else if (best_idle_cstate ==
idle_idx &&
best_idle_util <= new_util)
continue;
}
target_capacity = capacity_orig;
best_idle_cstate = idle_idx;
best_idle_util = new_util;
best_idle_cpu = i;
continue;
}
if (best_idle_cpu != -1)
continue;
/* Skip CPUs which do not fit task requirements */
if (capacity_orig < uclamp_task_util(p))
continue;
/*
* Case A.2: Target ACTIVE CPU
* Favor CPUs with max spare capacity.
*/
if (capacity_curr > new_util &&
spare_cap > target_max_spare_cap) {
target_max_spare_cap = spare_cap;
target_cpu = i;
continue;
}
if (target_cpu != -1)
continue;
/*
* Case A.3: Backup ACTIVE CPU
* Favor CPUs with:
* - lower utilization due to other tasks
* - lower utilization with the task in
*/
if (wake_util > min_wake_util)
continue;
if (new_util > best_active_util)
continue;
/*
* If utilization is the same between CPUs,
* break the ties with cumulative demand,
* also prefer lower order cpu.
*/
if (new_util == best_active_util &&
new_util_cuml >= best_active_cuml_util)
continue;
min_wake_util = wake_util;
best_active_util = new_util;
best_active_cuml_util = new_util_cuml;
best_active_cpu = i;
continue;
}
/*
* Skip processing placement further if we are visiting
* cpus with lower capacity than start cpu
*/
if (capacity_orig < capacity_orig_of(cpu))
continue;
/*
* Case B) Non latency sensitive tasks on IDLE CPUs.
*
* Find an optimal backup IDLE CPU for non latency
* sensitive tasks.
*
* Looking for:
* - minimizing the capacity_orig,
* i.e. preferring LITTLE CPUs
* - favoring shallowest idle states
* i.e. avoid to wakeup deep-idle CPUs
*
* The following code path is used by non latency
* sensitive tasks if IDLE CPUs are available. If at
* least one of such CPUs are available it sets the
* best_idle_cpu to the most suitable idle CPU to be
* selected.
*
* If idle CPUs are available, favour these CPUs to
* improve performances by spreading tasks.
* Indeed, the energy_diff() computed by the caller
* will take care to ensure the minimization of energy
* consumptions without affecting performance.
*/
if (idle_cpu(i)) {
/*
* Skip CPUs in deeper idle state, but only
* if they are also less energy efficient.
* IOW, prefer a deep IDLE LITTLE CPU vs a
* shallow idle big CPU.
*/
if (capacity_orig >= target_capacity &&
sysctl_sched_cstate_aware &&
best_idle_cstate < idle_idx)
continue;
if (best_idle_cstate == idle_idx &&
(best_idle_cpu == prev_cpu ||
(i != prev_cpu &&
new_util_cuml > best_idle_cuml_util)))
continue;
target_capacity = capacity_orig;
best_idle_cstate = idle_idx;
best_idle_cuml_util = new_util_cuml;
best_idle_cpu = i;
continue;
}
#ifdef CONFIG_SCHED_WALT
/*
* Consider only idle CPUs for active migration.
*/
if (p->state == TASK_RUNNING)
continue;
#endif
/*
* Case C) Non latency sensitive tasks on ACTIVE CPUs.
*
* Pack tasks in the most energy efficient capacities.
*
* This task packing strategy prefers more energy
* efficient CPUs (i.e. pack on smaller maximum
* capacity CPUs) while also trying to spread tasks to
* run them all at the lower OPP.
*
* This assumes for example that it's more energy
* efficient to run two tasks on two CPUs at a lower
* OPP than packing both on a single CPU but running
* that CPU at an higher OPP.
*
* Thus, this case keep track of the CPU with the
* smallest maximum capacity and highest spare maximum
* capacity.
*/
active_cpus_count++;
/* Favor CPUs with maximum spare capacity */
if (spare_cap < target_max_spare_cap)
continue;
target_max_spare_cap = spare_cap;
target_capacity = capacity_orig;
target_util = new_util;
target_cpu = i;
}
next_group_higher_cap = (capacity_orig_of(group_first_cpu(sg)) <
capacity_orig_of(group_first_cpu(sg->next)));
/*
* If we've found a cpu, but the boost is ON_ALL we continue
* visiting other clusters. If the boost is ON_BIG we visit
* next cluster if they are higher in capacity. If we are
* not in any kind of boost, we break.
*/
if (!prefer_idle && !boosted &&
(target_cpu != -1 || best_idle_cpu != -1) &&
(fbt_env->placement_boost == SCHED_BOOST_NONE ||
sched_boost() != FULL_THROTTLE_BOOST ||
(fbt_env->placement_boost == SCHED_BOOST_ON_BIG &&
!next_group_higher_cap)))
break;
/*
* if we are in prefer_idle and have found an idle cpu,
* break from searching more groups based on the stune.boost and
* group cpu capacity. For !prefer_idle && boosted case, don't
* iterate lower capacity CPUs unless the task can't be
* accommodated in the higher capacity CPUs.
*/
if ((prefer_idle && best_idle_cpu != -1) ||
(boosted && (best_idle_cpu != -1 || target_cpu != -1))) {
if (boosted) {
/*
* For boosted task, stop searching when an idle
* cpu is found in mid cluster.
*/
if ((mid_cap_orig_cpu != -1 &&
best_idle_cpu >= mid_cap_orig_cpu) ||
!next_group_higher_cap)
break;
} else {
if (next_group_higher_cap)
break;
}
}
} while (sg = sg->next, sg != sd->groups);
if (prefer_idle && (best_idle_cpu != -1)) {
trace_sched_find_best_target(p, prefer_idle, min_util, cpu,
best_idle_cpu, best_active_cpu,
-1, best_idle_cpu, -1);
return best_idle_cpu;
}
if (best_idle_cpu != -1 && !is_packing_eligible(p, target_cpu, fbt_env,
active_cpus_count, best_idle_cstate,
boosted)) {
target_cpu = best_idle_cpu;
best_idle_cpu = -1;
}
/*
* For non latency sensitive tasks, cases B and C in the previous loop,
* we pick the best IDLE CPU only if we was not able to find a target
* ACTIVE CPU.
*
* Policies priorities:
*
* - prefer_idle tasks:
*
* a) IDLE CPU available: best_idle_cpu
* b) ACTIVE CPU where task fits and has the bigger maximum spare
* capacity (i.e. target_cpu)
* c) ACTIVE CPU with less contention due to other tasks
* (i.e. best_active_cpu)
*
* - NON prefer_idle tasks:
*
* a) ACTIVE CPU: target_cpu
* b) IDLE CPU: best_idle_cpu
*/
if (target_cpu != -1 && !idle_cpu(target_cpu) &&
best_idle_cpu != -1) {
curr_tsk = READ_ONCE(cpu_rq(target_cpu)->curr);
if (curr_tsk && uclamp_boosted(curr_tsk)) {
target_cpu = best_idle_cpu;
}
}
if (target_cpu == -1)
target_cpu = prefer_idle
? best_active_cpu
: best_idle_cpu;
else
*backup_cpu = prefer_idle
? best_active_cpu
: best_idle_cpu;
#ifdef CONFIG_SCHED_WALT
if (target_cpu == -1 && most_spare_cap_cpu != -1 &&
/* ensure we use active cpu for active migration */
!(p->state == TASK_RUNNING && !idle_cpu(most_spare_cap_cpu)))
target_cpu = most_spare_cap_cpu;
#endif
trace_sched_find_best_target(p, prefer_idle, min_util, cpu,
best_idle_cpu, best_active_cpu,
most_spare_cap_cpu,
target_cpu,
*backup_cpu);
/* it is possible for target and backup
* to select same CPU - if so, drop backup
*/
if (*backup_cpu == target_cpu)
*backup_cpu = -1;
/*
* The next step of energy evaluation includes
* prev_cpu. Drop target or backup if it is
* same as prev_cpu
*/
if (*backup_cpu == prev_cpu)
*backup_cpu = -1;
if (target_cpu == prev_cpu) {
target_cpu = *backup_cpu;
*backup_cpu = -1;
}
if (target_cpu == -1 && isolated_candidate != -1 &&
cpu_isolated(prev_cpu))
target_cpu = isolated_candidate;
out:
return target_cpu;
}
/*
* Disable WAKE_AFFINE in the case where task @p doesn't fit in the
* capacity of either the waking CPU @cpu or the previous CPU @prev_cpu.
*
* In that case WAKE_AFFINE doesn't make sense and we'll let
* BALANCE_WAKE sort things out.
*/
static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
{
long min_cap, max_cap;
if (!static_branch_unlikely(&sched_asym_cpucapacity))
return 0;
min_cap = min(capacity_orig_of(prev_cpu), capacity_orig_of(cpu));
max_cap = cpu_rq(cpu)->rd->max_cpu_capacity.val;
/* Minimum capacity is close to max, no need to abort wake_affine */
if (max_cap - min_cap < max_cap >> 3)
return 0;
/* Bring task utilization in sync with prev_cpu */
sync_entity_load_avg(&p->se);
return !task_fits_max(p, cpu);
}
bool __cpu_overutilized(int cpu, int delta)
{
return (capacity_orig_of(cpu) * 1024) <
((cpu_util(cpu) + delta) * sched_capacity_margin_up[cpu]);
}
bool cpu_overutilized(int cpu)
{
return __cpu_overutilized(cpu, 0);
}
DEFINE_PER_CPU(struct energy_env, eenv_cache);
/* kernels often have NR_CPUS defined to be much
* larger than exist in practise on booted systems.
* Allocate the cpu array for eenv calculations
* at boot time to avoid massive overprovisioning.
*/
#ifdef DEBUG_EENV_DECISIONS
static inline int eenv_debug_size_per_dbg_entry(void)
{
return sizeof(struct _eenv_debug) + (sizeof(unsigned long) * num_possible_cpus());
}
static inline int eenv_debug_size_per_cpu_entry(void)
{
/* each cpu struct has an array of _eenv_debug structs
* which have an array of unsigned longs at the end -
* the allocation should be extended so that there are
* at least 'num_possible_cpus' entries in the array.
*/
return EAS_EENV_DEBUG_LEVELS * eenv_debug_size_per_dbg_entry();
}
/* given a per-_eenv_cpu debug env ptr, get the ptr for a given index */
static inline struct _eenv_debug *eenv_debug_entry_ptr(struct _eenv_debug *base, int idx)
{
char *ptr = (char *)base;
ptr += (idx * eenv_debug_size_per_dbg_entry());
return (struct _eenv_debug *)ptr;
}
/* given a pointer to the per-cpu global copy of _eenv_debug, get
* a pointer to the specified _eenv_cpu debug env.
*/
static inline struct _eenv_debug *eenv_debug_percpu_debug_env_ptr(struct _eenv_debug *base, int cpu_idx)
{
char *ptr = (char *)base;
ptr += (cpu_idx * eenv_debug_size_per_cpu_entry());
return (struct _eenv_debug *)ptr;
}
static inline int eenv_debug_size(void)
{
return num_possible_cpus() * eenv_debug_size_per_cpu_entry();
}
#endif
static inline void alloc_eenv(void)
{
int cpu;
int cpu_count = num_possible_cpus();
for_each_possible_cpu(cpu) {
struct energy_env *eenv = &per_cpu(eenv_cache, cpu);
eenv->cpu = kmalloc_array(cpu_count, sizeof(struct eenv_cpu),
GFP_KERNEL);
eenv->eenv_cpu_count = cpu_count;
#ifdef DEBUG_EENV_DECISIONS
eenv->debug = (struct _eenv_debug *)kmalloc(eenv_debug_size(), GFP_KERNEL);
#endif
}
}
static inline void reset_eenv(struct energy_env *eenv)
{
int cpu_count;
struct eenv_cpu *cpu;
#ifdef DEBUG_EENV_DECISIONS
struct _eenv_debug *debug;
int cpu_idx;
debug = eenv->debug;
#endif
cpu_count = eenv->eenv_cpu_count;
cpu = eenv->cpu;
memset(eenv, 0, sizeof(struct energy_env));
eenv->cpu = cpu;
memset(eenv->cpu, 0, sizeof(struct eenv_cpu)*cpu_count);
eenv->eenv_cpu_count = cpu_count;
#ifdef DEBUG_EENV_DECISIONS
memset(debug, 0, eenv_debug_size());
eenv->debug = debug;
for(cpu_idx = 0; cpu_idx < eenv->eenv_cpu_count; cpu_idx++)
eenv->cpu[cpu_idx].debug = eenv_debug_percpu_debug_env_ptr(debug, cpu_idx);
#endif
}
/*
* get_eenv - reset the eenv struct cached for this CPU
*
* When the eenv is returned, it is configured to do
* energy calculations for the maximum number of CPUs
* the task can be placed on. The prev_cpu entry is
* filled in here. Callers are responsible for adding
* other CPU candidates up to eenv->max_cpu_count.
*/
static inline struct energy_env *get_eenv(struct task_struct *p, int prev_cpu)
{
struct energy_env *eenv;
cpumask_t cpumask_possible_cpus;
int cpu = smp_processor_id();
int i;
eenv = &(per_cpu(eenv_cache, cpu));
reset_eenv(eenv);
/* populate eenv */
eenv->p = p;
/* use boosted task util for capacity selection
* during energy calculation, but unboosted task
* util for group utilization calculations
*/
eenv->util_delta = task_util_est(p);
eenv->util_delta_boosted = uclamp_task(p);
cpumask_and(&cpumask_possible_cpus, p->cpus_ptr, cpu_online_mask);
eenv->max_cpu_count = cpumask_weight(&cpumask_possible_cpus);
for (i=0; i < eenv->max_cpu_count; i++)
eenv->cpu[i].cpu_id = -1;
eenv->cpu[EAS_CPU_PRV].cpu_id = prev_cpu;
eenv->next_idx = EAS_CPU_PRV;
return eenv;
}
static inline int wake_to_idle(struct task_struct *p)
{
return (current->flags & PF_WAKE_UP_IDLE) ||
(p->flags & PF_WAKE_UP_IDLE);
}
#ifdef CONFIG_SCHED_WALT
static inline bool is_task_util_above_min_thresh(struct task_struct *p)
{
unsigned int threshold = (sched_boost() == CONSERVATIVE_BOOST) ?
sysctl_sched_min_task_util_for_boost :
sysctl_sched_min_task_util_for_colocation;
return task_util(p) > threshold;
}
static inline struct cpumask *find_rtg_target(struct task_struct *p)
{
struct related_thread_group *grp;
struct cpumask *rtg_target;
rcu_read_lock();
grp = task_related_thread_group(p);
if (grp && grp->preferred_cluster && is_task_util_above_min_thresh(p)) {
rtg_target = &grp->preferred_cluster->cpus;
if (!task_fits_max(p, cpumask_first(rtg_target)))
rtg_target = NULL;
} else {
rtg_target = NULL;
}
rcu_read_unlock();
return rtg_target;
}
#else
static inline struct cpumask *find_rtg_target(struct task_struct *p)
{
return NULL;
}
#endif
/*
* Needs to be called inside rcu_read_lock critical section.
* sd is a pointer to the sched domain we wish to use for an
* energy-aware placement option.
*/
static int find_energy_efficient_cpu(struct sched_domain *sd,
struct task_struct *p,
int cpu, int prev_cpu,
int sync, bool sync_boost)
{
int use_fbt = sched_feat(FIND_BEST_TARGET);
int cpu_iter, eas_cpu_idx = EAS_CPU_NXT;
int delta = 0;
int target_cpu = -1;
struct energy_env *eenv;
struct cpumask *rtg_target = find_rtg_target(p);
struct find_best_target_env fbt_env;
bool need_idle = wake_to_idle(p) || uclamp_latency_sensitive(p);
int placement_boost = task_boost_policy(p);
u64 start_t = 0;
int next_cpu = -1, backup_cpu = -1;
int boosted = (uclamp_boosted(p) > 0);
fbt_env.fastpath = 0;
if (trace_sched_task_util_enabled())
start_t = sched_clock();
if (need_idle)
sync = 0;
if (sysctl_sched_sync_hint_enable && sync &&
bias_to_waker_cpu(p, cpu, rtg_target)) {
target_cpu = cpu;
fbt_env.fastpath = SYNC_WAKEUP;
goto out;
}
/* prepopulate energy diff environment */
eenv = get_eenv(p, prev_cpu);
if (eenv->max_cpu_count < 2)
goto out;
if(!use_fbt) {
/*
* using this function outside wakeup balance will not supply
* an sd ptr. Instead, fetch the highest level with energy data.
*/
if (!sd)
sd = rcu_dereference(per_cpu(sd_ea, prev_cpu));
for_each_cpu_and(cpu_iter, p->cpus_ptr, sched_domain_span(sd)) {
unsigned long spare;
/* prev_cpu already in list */
if (cpu_iter == prev_cpu)
continue;
/*
* Consider only CPUs where the task is expected to
* fit without making the CPU overutilized.
*/
spare = capacity_spare_without(cpu_iter, p);
if (spare * 1024 < sched_capacity_margin_up[cpu_iter] *
task_util_est(p))
continue;
/* Add CPU candidate */
eenv->cpu[eas_cpu_idx++].cpu_id = cpu_iter;
eenv->max_cpu_count = eas_cpu_idx;
/* stop adding CPUs if we have no space left */
if (eas_cpu_idx >= eenv->eenv_cpu_count)
break;
}
} else {
int prefer_idle;
/*
* give compiler a hint that if sched_features
* cannot be changed, it is safe to optimise out
* all if(prefer_idle) blocks.
*/
prefer_idle = sched_feat(EAS_PREFER_IDLE) ?
(uclamp_latency_sensitive(p) > 0) : 0;
eenv->max_cpu_count = EAS_CPU_BKP + 1;
fbt_env.rtg_target = rtg_target;
fbt_env.placement_boost = placement_boost;
fbt_env.need_idle = need_idle;
/* Find a cpu with sufficient capacity */
target_cpu = find_best_target(p, &eenv->cpu[EAS_CPU_BKP].cpu_id,
boosted, sync_boost, prefer_idle,
&fbt_env);
if (target_cpu < 0)
goto out;
/* Immediately return a found idle CPU for a prefer_idle task */
if (prefer_idle && idle_cpu(target_cpu))
goto out;
#ifdef CONFIG_SCHED_WALT
if (!walt_disabled && sysctl_sched_use_walt_cpu_util &&
p->state == TASK_WAKING)
delta = task_util(p);
#endif
if (task_placement_boost_enabled(p) || need_idle || boosted ||
(rtg_target && (!cpumask_test_cpu(prev_cpu, rtg_target) ||
cpumask_test_cpu(target_cpu, rtg_target))) ||
__cpu_overutilized(prev_cpu, delta) ||
!task_fits_max(p, prev_cpu) || cpu_isolated(prev_cpu))
goto out;
/* Place target into NEXT slot */
eenv->cpu[EAS_CPU_NXT].cpu_id = target_cpu;
next_cpu = eenv->cpu[EAS_CPU_NXT].cpu_id;
backup_cpu = eenv->cpu[EAS_CPU_BKP].cpu_id;
/* take note if no backup was found */
if (eenv->cpu[EAS_CPU_BKP].cpu_id < 0)
eenv->max_cpu_count = EAS_CPU_BKP;
}
if (eenv->max_cpu_count == EAS_CPU_NXT) {
/*
* we did not find any energy-awareness
* candidates beyond prev_cpu, so we will
* fall-back to the regular slow-path.
*/
goto out;
}
/* find most energy-efficient CPU */
target_cpu = select_energy_cpu_idx(eenv) < 0 ? prev_cpu :
eenv->cpu[eenv->next_idx].cpu_id;
out:
if (target_cpu < 0)
target_cpu = prev_cpu;
trace_sched_task_util(p, next_cpu, backup_cpu, target_cpu, sync,
need_idle, fbt_env.fastpath, placement_boost,
rtg_target ? cpumask_first(rtg_target) : -1, start_t,
boosted);
return target_cpu;
}
static inline bool nohz_kick_needed(struct rq *rq, bool only_update);
static void nohz_balancer_kick(bool only_update);
/*
* wake_energy: Make the decision if we want to use an energy-aware
* wakeup task placement or not. This is limited to situations where
* we cannot use energy-awareness right now.
*
* Returns TRUE if we should attempt energy-aware wakeup, FALSE if not.
*
* Should only be called from select_task_rq_fair inside the RCU
* read-side critical section.
*/
static inline int wake_energy(struct task_struct *p, int prev_cpu,
int sd_flag, int wake_flags)
{
struct sched_domain *sd = NULL;
int sync = wake_flags & WF_SYNC;
sd = rcu_dereference_sched(cpu_rq(prev_cpu)->sd);
/*
* Check all definite no-energy-awareness conditions
*/
if (!sd)
return false;
if (!energy_aware())
return false;
if (sd_overutilized(sd))
return false;
/*
* we cannot do energy-aware wakeup placement sensibly
* for tasks with 0 utilization, so let them be placed
* according to the normal strategy.
* However if fbt is in use we may still benefit from
* the heuristics we use there in selecting candidate
* CPUs.
*/
if (unlikely(!sched_feat(FIND_BEST_TARGET) && !task_util_est(p)))
return false;
if(!sched_feat(EAS_PREFER_IDLE)){
/*
* Force prefer-idle tasks into the slow path, this may not happen
* if none of the sd flags matched.
*/
if (uclamp_latency_sensitive(p) > 0 && !sync)
return false;
}
return true;
}
/*
* select_task_rq_fair: Select target runqueue for the waking task in domains
* that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE,
* SD_BALANCE_FORK, or SD_BALANCE_EXEC.
*
* Balances load by selecting the idlest cpu in the idlest group, or under
* certain conditions an idle sibling cpu if the domain has SD_WAKE_AFFINE set.
*
* Returns the target cpu number.
*
* preempt must be disabled.
*/
static int
select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
{
struct sched_domain *tmp, *affine_sd = NULL;
struct sched_domain *sd = NULL, *energy_sd = NULL;
int cpu = smp_processor_id();
int new_cpu = prev_cpu;
int want_affine = 0;
int want_energy = 0;
int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING);
rcu_read_lock();
if (sd_flag & SD_BALANCE_WAKE) {
int _wake_cap = wake_cap(p, cpu, prev_cpu);
int _cpus_allowed = cpumask_test_cpu(cpu, p->cpus_ptr);
if (sysctl_sched_sync_hint_enable && sync &&
_cpus_allowed && !_wake_cap &&
wake_affine_idle(cpu, prev_cpu, sync) &&
cpu_is_in_target_set(p, cpu)) {
rcu_read_unlock();
return cpu;
}
record_wakee(p);
want_energy = wake_energy(p, prev_cpu, sd_flag, wake_flags);
want_affine = !want_energy &&
!_wake_cap &&
_cpus_allowed;
}
for_each_domain(cpu, tmp) {
if (!(tmp->flags & SD_LOAD_BALANCE))
continue;
/*
* If both cpu and prev_cpu are part of this domain,
* cpu is a valid SD_WAKE_AFFINE target.
*/
if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
affine_sd = tmp;
break;
}
/*
* If we are able to try an energy-aware wakeup,
* select the highest non-overutilized sched domain
* which includes this cpu and prev_cpu
*
* maybe want to not test prev_cpu and only consider
* the current one?
*/
if (want_energy &&
!sd_overutilized(tmp) &&
cpumask_test_cpu(prev_cpu, sched_domain_span(tmp)))
energy_sd = tmp;
if (tmp->flags & sd_flag)
sd = tmp;
else if (!(want_affine || want_energy))
break;
}
if (affine_sd) {
sd = NULL; /* Prefer wake_affine over balance flags */
if (cpu == prev_cpu)
goto pick_cpu;
new_cpu = wake_affine(affine_sd, p, prev_cpu, sync);
}
if (!sd) {
pick_cpu:
if (sd_flag & SD_BALANCE_WAKE) { /* XXX always ? */
new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
if (want_affine)
current->recent_used_cpu = cpu;
}
} else {
if (energy_sd) {
/*
* If the sync flag is set but ignored, prefer to
* select cpu in the same or nearest cluster as current.
* So if current is a big or big+ cpu and sync is set,
* indicate that the selection algorithm from mid
* capacity cpu should be used.
*/
bool sync_boost = sync &&
cpu >= cpu_rq(cpu)->rd->mid_cap_orig_cpu;
new_cpu = find_energy_efficient_cpu(energy_sd, p, cpu,
prev_cpu, sync, sync_boost);
}
/* if we did an energy-aware placement and had no choices available
* then fall back to the default find_idlest_cpu choice
*/
if (!energy_sd || (energy_sd && new_cpu == -1))
new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag);
}
rcu_read_unlock();
#ifdef CONFIG_NO_HZ_COMMON
if (nohz_kick_needed(cpu_rq(new_cpu), true))
nohz_balancer_kick(true);
#endif
return new_cpu;
}
/*
* Called immediately before a task is migrated to a new cpu; task_cpu(p) and
* cfs_rq_of(p) references at time of call are still valid and identify the
* previous cpu. The caller guarantees p->pi_lock or task_rq(p)->lock is held.
*/
static void migrate_task_rq_fair(struct task_struct *p)
{
struct sched_entity *se = &p->se;
if (!task_on_rq_migrating(p)) {
remove_entity_load_avg(se);
/*
* We are supposed to update the task to "current" time, then
* its up to date and ready to go to new CPU/cfs_rq. But we
* have difficulty in getting what current time is, so simply
* throw away the out-of-date time. This will result in the
* wakee task is less decayed, but giving the wakee more load
* sounds not bad.
*/
remove_entity_load_avg(&p->se);
}
/* Tell new CPU we are migrated */
p->se.avg.last_update_time = 0;
}
static void task_dead_fair(struct task_struct *p)
{
remove_entity_load_avg(&p->se);
}
#endif /* CONFIG_SMP */
static void set_next_buddy(struct sched_entity *se)
{
if (entity_is_task(se) && unlikely(task_has_idle_policy(task_of(se))))
return;
for_each_sched_entity(se) {
if (SCHED_WARN_ON(!se->on_rq))
return;
cfs_rq_of(se)->next = se;
}
}
/*
* Preempt the current task with a newly woken task if needed:
*/
static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
{
struct task_struct *curr = rq->curr;
struct sched_entity *se = &curr->se, *pse = &p->se;
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
if (unlikely(se == pse))
return;
/*
* This is possible from callers such as attach_tasks(), in which we
* unconditionally check_prempt_curr() after an enqueue (which may have
* lead to a throttle). This both saves work and prevents false
* next-buddy nomination below.
*/
if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
return;
if (sched_feat(NEXT_BUDDY) && !(wake_flags & WF_FORK)) {
set_next_buddy(pse);
}
/*
* We can come here with TIF_NEED_RESCHED already set from new task
* wake up path.
*
* Note: this also catches the edge-case of curr being in a throttled
* group (e.g. via set_curr_task), since update_curr() (in the
* enqueue of curr) will have resulted in resched being set. This
* prevents us from potentially nominating it as a false LAST_BUDDY
* below.
*/
if (test_tsk_need_resched(curr))
return;
/* Idle tasks are by definition preempted by non-idle tasks. */
if (unlikely(task_has_idle_policy(curr)) &&
likely(!task_has_idle_policy(p)))
goto preempt;
/*
* Batch and idle tasks do not preempt non-idle tasks (their preemption
* is driven by the tick):
*/
if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
return;
find_matching_se(&se, &pse);
BUG_ON(!pse);
cfs_rq = cfs_rq_of(se);
update_curr(cfs_rq);
/*
* XXX pick_eevdf(cfs_rq) != se ?
*/
if (pick_eevdf(cfs_rq) == pse)
goto preempt;
return;
preempt:
resched_curr(rq);
}
static struct task_struct *
pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
struct cfs_rq *cfs_rq = &rq->cfs;
struct sched_entity *se;
struct task_struct *p;
int new_tasks;
again:
if (!cfs_rq->nr_running)
goto idle;
#ifdef CONFIG_FAIR_GROUP_SCHED
if (prev->sched_class != &fair_sched_class)
goto simple;
/*
* Because of the set_next_buddy() in dequeue_task_fair() it is rather
* likely that a next task is from the same cgroup as the current.
*
* Therefore attempt to avoid putting and setting the entire cgroup
* hierarchy, only change the part that actually changes.
*/
do {
struct sched_entity *curr = cfs_rq->curr;
/*
* Since we got here without doing put_prev_entity() we also
* have to consider cfs_rq->curr. If it is still a runnable
* entity, update_curr() will update its vruntime, otherwise
* forget we've ever seen it.
*/
if (curr) {
if (curr->on_rq)
update_curr(cfs_rq);
else
curr = NULL;
/*
* This call to check_cfs_rq_runtime() will do the
* throttle and dequeue its entity in the parent(s).
* Therefore the nr_running test will indeed
* be correct.
*/
if (unlikely(check_cfs_rq_runtime(cfs_rq))) {
cfs_rq = &rq->cfs;
if (!cfs_rq->nr_running)
goto idle;
goto simple;
}
}
se = pick_next_entity(cfs_rq);
cfs_rq = group_cfs_rq(se);
} while (cfs_rq);
p = task_of(se);
/*
* Since we haven't yet done put_prev_entity and if the selected task
* is a different task than we started out with, try and touch the
* least amount of cfs_rqs.
*/
if (prev != p) {
struct sched_entity *pse = &prev->se;
while (!(cfs_rq = is_same_group(se, pse))) {
int se_depth = se->depth;
int pse_depth = pse->depth;
if (se_depth <= pse_depth) {
put_prev_entity(cfs_rq_of(pse), pse);
pse = parent_entity(pse);
}
if (se_depth >= pse_depth) {
set_next_entity(cfs_rq_of(se), se);
se = parent_entity(se);
}
}
put_prev_entity(cfs_rq, pse);
set_next_entity(cfs_rq, se);
}
goto done;
simple:
#endif
put_prev_task(rq, prev);
do {
se = pick_next_entity(cfs_rq);
set_next_entity(cfs_rq, se);
cfs_rq = group_cfs_rq(se);
} while (cfs_rq);
p = task_of(se);
done: __maybe_unused
#ifdef CONFIG_SMP
/*
* Move the next running task to the front of
* the list, so our cfs_tasks list becomes MRU
* one.
*/
list_move(&p->se.group_node, &rq->cfs_tasks);
#endif
if (hrtick_enabled(rq))
hrtick_start_fair(rq, p);
update_misfit_status(p, rq);
return p;
idle:
update_misfit_status(NULL, rq);
new_tasks = idle_balance(rq, rf);
/*
* Because idle_balance() releases (and re-acquires) rq->lock, it is
* possible for any higher priority task to appear. In that case we
* must re-start the pick_next_entity() loop.
*/
if (new_tasks < 0)
return RETRY_TASK;
if (new_tasks > 0)
goto again;
/*
* rq is about to be idle, check if we need to update the
* lost_idle_time of clock_pelt
*/
update_idle_rq_clock_pelt(rq);
return NULL;
}
/*
* Account for a descheduled task:
*/
static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
{
struct sched_entity *se = &prev->se;
struct cfs_rq *cfs_rq;
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
put_prev_entity(cfs_rq, se);
}
}
/*
* sched_yield() is very simple
*/
static void yield_task_fair(struct rq *rq)
{
struct task_struct *curr = rq->curr;
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
struct sched_entity *se = &curr->se;
/*
* Are we the only task in the tree?
*/
if (unlikely(rq->nr_running == 1))
return;
clear_buddies(cfs_rq, se);
update_rq_clock(rq);
/*
* Update run-time statistics of the 'current'.
*/
update_curr(cfs_rq);
/*
* Tell update_rq_clock() that we've just updated,
* so we don't do microscopic update in schedule()
* and double the fastpath cost.
*/
rq_clock_skip_update(rq, true);
se->deadline += calc_delta_fair(se->slice, se);
}
static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
{
struct sched_entity *se = &p->se;
/* throttled hierarchies are not runnable */
if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
return false;
/* Tell the scheduler that we'd really like pse to run next. */
set_next_buddy(se);
yield_task_fair(rq);
return true;
}
#ifdef CONFIG_SMP
/**************************************************
* Fair scheduling class load-balancing methods.
*
* BASICS
*
* The purpose of load-balancing is to achieve the same basic fairness the
* per-cpu scheduler provides, namely provide a proportional amount of compute
* time to each task. This is expressed in the following equation:
*
* W_i,n/P_i == W_j,n/P_j for all i,j (1)
*
* Where W_i,n is the n-th weight average for cpu i. The instantaneous weight
* W_i,0 is defined as:
*
* W_i,0 = \Sum_j w_i,j (2)
*
* Where w_i,j is the weight of the j-th runnable task on cpu i. This weight
* is derived from the nice value as per sched_prio_to_weight[].
*
* The weight average is an exponential decay average of the instantaneous
* weight:
*
* W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3)
*
* C_i is the compute capacity of cpu i, typically it is the
* fraction of 'recent' time available for SCHED_OTHER task execution. But it
* can also include other factors [XXX].
*
* To achieve this balance we define a measure of imbalance which follows
* directly from (1):
*
* imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j } (4)
*
* We them move tasks around to minimize the imbalance. In the continuous
* function space it is obvious this converges, in the discrete case we get
* a few fun cases generally called infeasible weight scenarios.
*
* [XXX expand on:
* - infeasible weights;
* - local vs global optima in the discrete case. ]
*
*
* SCHED DOMAINS
*
* In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
* for all i,j solution, we create a tree of cpus that follows the hardware
* topology where each level pairs two lower groups (or better). This results
* in O(log n) layers. Furthermore we reduce the number of cpus going up the
* tree to only the first of the previous level and we decrease the frequency
* of load-balance at each level inv. proportional to the number of cpus in
* the groups.
*
* This yields:
*
* log_2 n 1 n
* \Sum { --- * --- * 2^i } = O(n) (5)
* i = 0 2^i 2^i
* `- size of each group
* | | `- number of cpus doing load-balance
* | `- freq
* `- sum over all levels
*
* Coupled with a limit on how many tasks we can migrate every balance pass,
* this makes (5) the runtime complexity of the balancer.
*
* An important property here is that each CPU is still (indirectly) connected
* to every other cpu in at most O(log n) steps:
*
* The adjacency matrix of the resulting graph is given by:
*
* log_2 n
* A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6)
* k = 0
*
* And you'll find that:
*
* A^(log_2 n)_i,j != 0 for all i,j (7)
*
* Showing there's indeed a path between every cpu in at most O(log n) steps.
* The task movement gives a factor of O(m), giving a convergence complexity
* of:
*
* O(nm log n), n := nr_cpus, m := nr_tasks (8)
*
*
* WORK CONSERVING
*
* In order to avoid CPUs going idle while there's still work to do, new idle
* balancing is more aggressive and has the newly idle cpu iterate up the domain
* tree itself instead of relying on other CPUs to bring it work.
*
* This adds some complexity to both (5) and (8) but it reduces the total idle
* time.
*
* [XXX more?]
*
*
* CGROUPS
*
* Cgroups make a horror show out of (2), instead of a simple sum we get:
*
* s_k,i
* W_i,0 = \Sum_j \Prod_k w_k * ----- (9)
* S_k
*
* Where
*
* s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10)
*
* w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on cpu i.
*
* The big problem is S_k, its a global sum needed to compute a local (W_i)
* property.
*
* [XXX write more on how we solve this.. _after_ merging pjt's patches that
* rewrite all of this once again.]
*/
static unsigned long __read_mostly max_load_balance_interval = HZ/10;
enum fbq_type { regular, remote, all };
enum group_type {
group_other = 0,
group_misfit_task,
group_imbalanced,
group_overloaded,
};
#define LBF_ALL_PINNED 0x01
#define LBF_NEED_BREAK 0x02
#define LBF_DST_PINNED 0x04
#define LBF_SOME_PINNED 0x08
#define LBF_ACTIVE_LB 0x40
#define LBF_IGNORE_BIG_TASKS 0x100
#define LBF_IGNORE_PREFERRED_CLUSTER_TASKS 0x200
struct lb_env {
struct sched_domain *sd;
struct rq *src_rq;
int src_cpu;
int dst_cpu;
struct rq *dst_rq;
struct cpumask *dst_grpmask;
int new_dst_cpu;
enum cpu_idle_type idle;
long imbalance;
unsigned int src_grp_nr_running;
/* The set of CPUs under consideration for load-balancing */
struct cpumask *cpus;
unsigned int flags;
unsigned int loop;
unsigned int loop_break;
unsigned int loop_max;
enum fbq_type fbq_type;
enum group_type src_grp_type;
struct list_head tasks;
};
/*
* Is this task likely cache-hot:
*/
static int task_hot(struct task_struct *p, struct lb_env *env)
{
s64 delta;
lockdep_assert_held(&env->src_rq->lock);
if (p->sched_class != &fair_sched_class)
return 0;
if (unlikely(task_has_idle_policy(p)))
return 0;
/* SMT siblings share cache */
if (env->sd->flags & SD_SHARE_CPUCAPACITY)
return 0;
/*
* Buddy candidates are cache hot:
*/
if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running &&
(&p->se == cfs_rq_of(&p->se)->next))
return 1;
if (sysctl_sched_migration_cost == -1)
return 1;
if (sysctl_sched_migration_cost == 0)
return 0;
delta = rq_clock_task(env->src_rq) - p->se.exec_start;
return delta < (s64)sysctl_sched_migration_cost;
}
#ifdef CONFIG_NUMA_BALANCING
/*
* Returns 1, if task migration degrades locality
* Returns 0, if task migration improves locality i.e migration preferred.
* Returns -1, if task migration is not affected by locality.
*/
static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
{
struct numa_group *numa_group = rcu_dereference(p->numa_group);
unsigned long src_faults, dst_faults;
int src_nid, dst_nid;
if (!static_branch_likely(&sched_numa_balancing))
return -1;
if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
return -1;
src_nid = cpu_to_node(env->src_cpu);
dst_nid = cpu_to_node(env->dst_cpu);
if (src_nid == dst_nid)
return -1;
/* Migrating away from the preferred node is always bad. */
if (src_nid == p->numa_preferred_nid) {
if (env->src_rq->nr_running > env->src_rq->nr_preferred_running)
return 1;
else
return -1;
}
/* Encourage migration to the preferred node. */
if (dst_nid == p->numa_preferred_nid)
return 0;
/* Leaving a core idle is often worse than degrading locality. */
if (env->idle != CPU_NOT_IDLE)
return -1;
if (numa_group) {
src_faults = group_faults(p, src_nid);
dst_faults = group_faults(p, dst_nid);
} else {
src_faults = task_faults(p, src_nid);
dst_faults = task_faults(p, dst_nid);
}
return dst_faults < src_faults;
}
#else
static inline int migrate_degrades_locality(struct task_struct *p,
struct lb_env *env)
{
return -1;
}
#endif
/*
* can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
*/
static
int can_migrate_task(struct task_struct *p, struct lb_env *env)
{
int tsk_cache_hot;
lockdep_assert_held(&env->src_rq->lock);
/*
* We do not migrate tasks that are:
* 1) throttled_lb_pair, or
* 2) cannot be migrated to this CPU due to cpus_allowed, or
* 3) running (obviously), or
* 4) are cache-hot on their current CPU.
*/
if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
return 0;
/* Disregard pcpu kthreads; they are where they need to be. */
if ((p->flags & PF_KTHREAD) && kthread_is_per_cpu(p))
return 0;
if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {
int cpu;
schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
env->flags |= LBF_SOME_PINNED;
/*
* Remember if this task can be migrated to any other cpu in
* our sched_group. We may want to revisit it if we couldn't
* meet load balance goals by pulling other tasks on src_cpu.
*
* Avoid computing new_dst_cpu
* - for NEWLY_IDLE
* - if we have already computed one in current iteration
* - if it's an active balance
*/
if (env->idle == CPU_NEWLY_IDLE ||
env->flags & (LBF_DST_PINNED | LBF_ACTIVE_LB))
return 0;
/* Prevent to re-select dst_cpu via env's cpus */
for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
if (cpumask_test_cpu(cpu, p->cpus_ptr)) {
env->flags |= LBF_DST_PINNED;
env->new_dst_cpu = cpu;
break;
}
}
return 0;
}
/* Record that we found atleast one task that could run on dst_cpu */
env->flags &= ~LBF_ALL_PINNED;
if (energy_aware() && !sd_overutilized(env->sd) &&
env->idle == CPU_NEWLY_IDLE &&
!task_in_related_thread_group(p)) {
long util_cum_dst, util_cum_src;
unsigned long demand;
demand = task_util(p);
util_cum_dst = cpu_util_cum(env->dst_cpu, 0) + demand;
util_cum_src = cpu_util_cum(env->src_cpu, 0) - demand;
if (util_cum_dst > util_cum_src)
return 0;
}
#ifdef CONFIG_SCHED_WALT
if (env->flags & LBF_IGNORE_PREFERRED_CLUSTER_TASKS &&
!preferred_cluster(cpu_rq(env->dst_cpu)->cluster, p))
return 0;
#endif
/* Don't detach task if it doesn't fit on the destination */
if (env->flags & LBF_IGNORE_BIG_TASKS &&
!task_fits_max(p, env->dst_cpu))
return 0;
if (task_running(env->src_rq, p)) {
schedstat_inc(p->se.statistics.nr_failed_migrations_running);
return 0;
}
/* Don't detach task if it is under active migration */
if (env->src_rq->push_task == p)
return 0;
/*
* Aggressive migration if:
* 1) active balance
* 2) IDLE or NEWLY_IDLE balance.
* 3) destination numa is preferred
* 4) task is cache cold, or
* 5) too many balance attempts have failed.
*/
if (env->flags & LBF_ACTIVE_LB)
return 1;
tsk_cache_hot = migrate_degrades_locality(p, env);
if (tsk_cache_hot == -1)
tsk_cache_hot = task_hot(p, env);
if (env->idle != CPU_NOT_IDLE || tsk_cache_hot <= 0 ||
env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
if (tsk_cache_hot == 1) {
schedstat_inc(env->sd->lb_hot_gained[env->idle]);
schedstat_inc(p->se.statistics.nr_forced_migrations);
}
return 1;
}
schedstat_inc(p->se.statistics.nr_failed_migrations_hot);
return 0;
}
/*
* detach_task() -- detach the task for the migration specified in env
*/
static void detach_task(struct task_struct *p, struct lb_env *env)
{
lockdep_assert_held(&env->src_rq->lock);
p->on_rq = TASK_ON_RQ_MIGRATING;
deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK);
#ifdef CONFIG_SCHED_WALT
double_lock_balance(env->src_rq, env->dst_rq);
if (!(env->src_rq->clock_update_flags & RQCF_UPDATED))
update_rq_clock(env->src_rq);
set_task_cpu(p, env->dst_cpu);
double_unlock_balance(env->src_rq, env->dst_rq);
lockdep_on();
#else
set_task_cpu(p, env->dst_cpu);
#endif
}
/*
* detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as
* part of active balancing operations within "domain".
*
* Returns a task if successful and NULL otherwise.
*/
static struct task_struct *detach_one_task(struct lb_env *env)
{
struct task_struct *p;
lockdep_assert_held(&env->src_rq->lock);
list_for_each_entry_reverse(p,
&env->src_rq->cfs_tasks, se.group_node) {
if (!can_migrate_task(p, env))
continue;
detach_task(p, env);
/*
* Right now, this is only the second place where
* lb_gained[env->idle] is updated (other is detach_tasks)
* so we can safely collect stats here rather than
* inside detach_tasks().
*/
schedstat_inc(env->sd->lb_gained[env->idle]);
return p;
}
return NULL;
}
static const unsigned int sched_nr_migrate_break = 32;
/*
* detach_tasks() -- tries to detach up to imbalance weighted load from
* busiest_rq, as part of a balancing operation within domain "sd".
*
* Returns number of detached tasks if successful and 0 otherwise.
*/
static int detach_tasks(struct lb_env *env)
{
struct list_head *tasks = &env->src_rq->cfs_tasks;
struct task_struct *p;
unsigned long load = 0;
int detached = 0;
int orig_loop = env->loop;
lockdep_assert_held(&env->src_rq->lock);
if (env->imbalance <= 0)
return 0;
if (!same_cluster(env->dst_cpu, env->src_cpu))
env->flags |= LBF_IGNORE_PREFERRED_CLUSTER_TASKS;
if (cpu_capacity(env->dst_cpu) < cpu_capacity(env->src_cpu))
env->flags |= LBF_IGNORE_BIG_TASKS;
redo:
while (!list_empty(tasks)) {
/*
* We don't want to steal all, otherwise we may be treated likewise,
* which could at worst lead to a livelock crash.
*/
if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1)
break;
p = list_last_entry(tasks, struct task_struct, se.group_node);
env->loop++;
/*
* We've more or less seen every task there is, call it quits
* unless we haven't found any movable task yet.
*/
if (env->loop > env->loop_max &&
!(env->flags & LBF_ALL_PINNED))
break;
/* take a breather every nr_migrate tasks */
if (env->loop > env->loop_break) {
env->loop_break += sched_nr_migrate_break;
env->flags |= LBF_NEED_BREAK;
break;
}
if (!can_migrate_task(p, env))
goto next;
/*
* Depending of the number of CPUs and tasks and the
* cgroup hierarchy, task_h_load() can return a null
* value. Make sure that env->imbalance decreases
* otherwise detach_tasks() will stop only after
* detaching up to loop_max tasks.
*/
load = max_t(unsigned long, task_h_load(p), 1);
if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
goto next;
if ((load / 2) > env->imbalance)
goto next;
detach_task(p, env);
list_add(&p->se.group_node, &env->tasks);
detached++;
env->imbalance -= load;
#ifdef CONFIG_PREEMPT
/*
* NEWIDLE balancing is a source of latency, so preemptible
* kernels will stop after the first task is detached to minimize
* the critical section.
*/
if (env->idle == CPU_NEWLY_IDLE)
break;
#endif
/*
* We only want to steal up to the prescribed amount of
* weighted load.
*/
if (env->imbalance <= 0)
break;
continue;
next:
trace_sched_load_balance_skip_tasks(env->src_cpu, env->dst_cpu,
env->src_grp_type, p->pid, load, task_util(p),
cpumask_bits(&p->cpus_ptr)[0], env->flags );
list_move(&p->se.group_node, tasks);
}
if (env->flags & (LBF_IGNORE_BIG_TASKS |
LBF_IGNORE_PREFERRED_CLUSTER_TASKS) && !detached) {
tasks = &env->src_rq->cfs_tasks;
env->flags &= ~(LBF_IGNORE_BIG_TASKS |
LBF_IGNORE_PREFERRED_CLUSTER_TASKS);
env->loop = orig_loop;
goto redo;
}
/*
* Right now, this is one of only two places we collect this stat
* so we can safely collect detach_one_task() stats here rather
* than inside detach_one_task().
*/
schedstat_add(env->sd->lb_gained[env->idle], detached);
return detached;
}
/*
* attach_task() -- attach the task detached by detach_task() to its new rq.
*/
static void attach_task(struct rq *rq, struct task_struct *p)
{
lockdep_assert_held(&rq->lock);
BUG_ON(task_rq(p) != rq);
activate_task(rq, p, ENQUEUE_NOCLOCK);
p->on_rq = TASK_ON_RQ_QUEUED;
check_preempt_curr(rq, p, 0);
}
/*
* attach_one_task() -- attaches the task returned from detach_one_task() to
* its new rq.
*/
static void attach_one_task(struct rq *rq, struct task_struct *p)
{
struct rq_flags rf;
rq_lock(rq, &rf);
update_rq_clock(rq);
attach_task(rq, p);
update_overutilized_status(rq);
rq_unlock(rq, &rf);
}
/*
* attach_tasks() -- attaches all tasks detached by detach_tasks() to their
* new rq.
*/
static void attach_tasks(struct lb_env *env)
{
struct list_head *tasks = &env->tasks;
struct task_struct *p;
struct rq_flags rf;
rq_lock(env->dst_rq, &rf);
update_rq_clock(env->dst_rq);
while (!list_empty(tasks)) {
p = list_first_entry(tasks, struct task_struct, se.group_node);
list_del_init(&p->se.group_node);
attach_task(env->dst_rq, p);
}
/*
* The enqueue_task_fair only updates the overutilized status
* for the waking tasks. Since multiple tasks may get migrated
* from load balancer, instead of doing it there, update the
* overutilized status here at the end.
*/
update_overutilized_status(env->dst_rq);
rq_unlock(env->dst_rq, &rf);
}
static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq)
{
if (cfs_rq->avg.load_avg)
return true;
if (cfs_rq->avg.util_avg)
return true;
return false;
}
static inline bool others_have_blocked(struct rq *rq)
{
if (READ_ONCE(rq->avg_rt.util_avg))
return true;
if (READ_ONCE(rq->avg_dl.util_avg))
return true;
if (thermal_load_avg(rq))
return true;
#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
if (READ_ONCE(rq->avg_irq.util_avg))
return true;
#endif
return false;
}
#ifdef CONFIG_FAIR_GROUP_SCHED
static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
{
if (cfs_rq->load.weight)
return false;
if (cfs_rq->avg.load_sum)
return false;
if (cfs_rq->avg.util_sum)
return false;
if (cfs_rq->avg.runnable_load_sum)
return false;
return true;
}
static void update_blocked_averages(int cpu)
{
struct rq *rq = cpu_rq(cpu);
struct cfs_rq *cfs_rq, *pos;
const struct sched_class *curr_class;
struct rq_flags rf;
unsigned long thermal_pressure;
rq_lock_irqsave(rq, &rf);
update_rq_clock(rq);
/*
* Iterates the task_group tree in a bottom up fashion, see
* list_add_leaf_cfs_rq() for details.
*/
for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) {
struct sched_entity *se;
if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq))
update_tg_load_avg(cfs_rq, 0);
/* Propagate pending load changes to the parent, if any: */
se = cfs_rq->tg->se[cpu];
if (se && !skip_blocked_update(se))
update_load_avg(cfs_rq_of(se), se, 0);
/*
* There can be a lot of idle CPU cgroups. Don't let fully
* decayed cfs_rqs linger on the list.
*/
if (cfs_rq_is_decayed(cfs_rq))
list_del_leaf_cfs_rq(cfs_rq);
}
curr_class = rq->curr->sched_class;
thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq));
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure);
update_irq_load_avg(rq, 0);
#ifdef CONFIG_NO_HZ_COMMON
rq->last_blocked_load_update_tick = jiffies;
#endif
rq_unlock_irqrestore(rq, &rf);
}
/*
* Compute the hierarchical load factor for cfs_rq and all its ascendants.
* This needs to be done in a top-down fashion because the load of a child
* group is a fraction of its parents load.
*/
static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
{
struct rq *rq = rq_of(cfs_rq);
struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
unsigned long now = jiffies;
unsigned long load;
if (cfs_rq->last_h_load_update == now)
return;
WRITE_ONCE(cfs_rq->h_load_next, NULL);
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
WRITE_ONCE(cfs_rq->h_load_next, se);
if (cfs_rq->last_h_load_update == now)
break;
}
if (!se) {
cfs_rq->h_load = cfs_rq_load_avg(cfs_rq);
cfs_rq->last_h_load_update = now;
}
while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) {
load = cfs_rq->h_load;
load = div64_ul(load * se->avg.load_avg,
cfs_rq_load_avg(cfs_rq) + 1);
cfs_rq = group_cfs_rq(se);
cfs_rq->h_load = load;
cfs_rq->last_h_load_update = now;
}
}
static unsigned long task_h_load(struct task_struct *p)
{
struct cfs_rq *cfs_rq = task_cfs_rq(p);
update_cfs_rq_h_load(cfs_rq);
return div64_ul(p->se.avg.load_avg * cfs_rq->h_load,
cfs_rq_load_avg(cfs_rq) + 1);
}
#else
static inline void update_blocked_averages(int cpu)
{
struct rq *rq = cpu_rq(cpu);
struct cfs_rq *cfs_rq = &rq->cfs;
const struct sched_class *curr_class;
struct rq_flags rf;
rq_lock_irqsave(rq, &rf);
update_rq_clock(rq);
update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
curr_class = rq->curr->sched_class;
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
update_irq_load_avg(rq, 0);
#ifdef CONFIG_NO_HZ_COMMON
rq->last_blocked_load_update_tick = jiffies;
#endif
rq_unlock_irqrestore(rq, &rf);
}
static unsigned long task_h_load(struct task_struct *p)
{
return p->se.avg.load_avg;
}
#endif
/********** Helpers for find_busiest_group ************************/
/*
* sg_lb_stats - stats of a sched_group required for load_balancing
*/
struct sg_lb_stats {
unsigned long avg_load; /*Avg load across the CPUs of the group */
unsigned long group_load; /* Total load over the CPUs of the group */
unsigned long sum_weighted_load; /* Weighted load of group's tasks */
unsigned long load_per_task;
unsigned long group_capacity;
unsigned long group_util; /* Total utilization of the group */
unsigned int sum_nr_running; /* Nr tasks running in the group */
unsigned int idle_cpus;
unsigned int group_weight;
enum group_type group_type;
int group_no_capacity;
/* A cpu has a task too big for its capacity */
unsigned long group_misfit_task_load;
#ifdef CONFIG_NUMA_BALANCING
unsigned int nr_numa_running;
unsigned int nr_preferred_running;
#endif
};
/*
* sd_lb_stats - Structure to store the statistics of a sched_domain
* during load balancing.
*/
struct sd_lb_stats {
struct sched_group *busiest; /* Busiest group in this sd */
struct sched_group *local; /* Local group in this sd */
unsigned long total_running;
unsigned long total_load; /* Total load of all groups in sd */
unsigned long total_capacity; /* Total capacity of all groups in sd */
unsigned long total_util; /* Total util of all groups in sd */
unsigned long avg_load; /* Average load across all groups in sd */
struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
struct sg_lb_stats local_stat; /* Statistics of the local group */
};
static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
{
/*
* Skimp on the clearing to avoid duplicate work. We can avoid clearing
* local_stat because update_sg_lb_stats() does a full clear/assignment.
* We must however clear busiest_stat::avg_load because
* update_sd_pick_busiest() reads this before assignment.
*/
*sds = (struct sd_lb_stats){
.busiest = NULL,
.local = NULL,
.total_running = 0UL,
.total_load = 0UL,
.total_capacity = 0UL,
.total_util = 0UL,
.busiest_stat = {
.avg_load = 0UL,
.sum_nr_running = 0,
.group_type = group_other,
},
};
}
static unsigned long scale_rt_capacity(int cpu)
{
struct rq *rq = cpu_rq(cpu);
unsigned long max = arch_scale_cpu_capacity(cpu);
unsigned long used, free;
unsigned long irq;
irq = cpu_util_irq(rq);
if (unlikely(irq >= max))
return 1;
used = READ_ONCE(rq->avg_rt.util_avg);
used += READ_ONCE(rq->avg_dl.util_avg);
if (unlikely(used >= max))
return 1;
free = max - used;
return scale_irq_capacity(free, irq, max);
}
void init_max_cpu_capacity(struct max_cpu_capacity *mcc)
{
raw_spin_lock_init(&mcc->lock);
mcc->val = 0;
mcc->cpu = -1;
}
static void update_cpu_capacity(struct sched_domain *sd, int cpu)
{
unsigned long capacity = scale_rt_capacity(cpu);
struct sched_group *sdg = sd->groups;
struct max_cpu_capacity *mcc;
unsigned long max_capacity;
int max_cap_cpu;
unsigned long flags;
__maybe_unused bool update = false;
cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu);
capacity *= arch_scale_max_freq_capacity(sd, cpu);
capacity >>= SCHED_CAPACITY_SHIFT;
mcc = &cpu_rq(cpu)->rd->max_cpu_capacity;
raw_spin_lock_irqsave(&mcc->lock, flags);
max_capacity = mcc->val;
max_cap_cpu = mcc->cpu;
if ((max_capacity > capacity && max_cap_cpu == cpu) ||
max_capacity < capacity) {
mcc->val = capacity;
mcc->cpu = cpu;
#ifdef CONFIG_SCHED_DEBUG
raw_spin_unlock_irqrestore(&mcc->lock, flags);
printk_deferred(KERN_INFO "CPU%d: update max cpu_capacity %lu\n",
cpu, capacity);
goto skip_unlock;
#endif
}
raw_spin_unlock_irqrestore(&mcc->lock, flags);
skip_unlock: __attribute__ ((unused));
if (!capacity)
capacity = 1;
cpu_rq(cpu)->cpu_capacity = capacity;
sdg->sgc->capacity = capacity;
sdg->sgc->min_capacity = capacity;
sdg->sgc->max_capacity = capacity;
}
void update_group_capacity(struct sched_domain *sd, int cpu)
{
struct sched_domain *child = sd->child;
struct sched_group *group, *sdg = sd->groups;
unsigned long capacity, min_capacity, max_capacity;
if (!child) {
update_cpu_capacity(sd, cpu);
return;
}
capacity = 0;
min_capacity = ULONG_MAX;
max_capacity = 0;
if (child->flags & SD_OVERLAP) {
/*
* SD_OVERLAP domains cannot assume that child groups
* span the current group.
*/
for_each_cpu(cpu, sched_group_span(sdg)) {
unsigned long cpu_cap = capacity_of(cpu);
if (cpumask_test_cpu(cpu, cpu_isolated_mask))
capacity += cpu_cap;
min_capacity = min(cpu_cap, min_capacity);
max_capacity = max(cpu_cap, max_capacity);
}
} else {
/*
* !SD_OVERLAP domains can assume that child groups
* span the current group.
*/
group = child->groups;
do {
struct sched_group_capacity *sgc = group->sgc;
cpumask_t *cpus = sched_group_span(group);
if (!cpu_isolated(cpumask_first(cpus))) {
capacity += sgc->capacity;
min_capacity = min(sgc->min_capacity,
min_capacity);
max_capacity = max(sgc->max_capacity,
max_capacity);
}
group = group->next;
} while (group != child->groups);
}
sdg->sgc->capacity = capacity;
sdg->sgc->min_capacity = min_capacity;
sdg->sgc->max_capacity = max_capacity;
}
/*
* Check whether the capacity of the rq has been noticeably reduced by side
* activity. The imbalance_pct is used for the threshold.
* Return true is the capacity is reduced
*/
static inline int
check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
{
return ((rq->cpu_capacity * sd->imbalance_pct) <
(rq->cpu_capacity_orig * 100));
}
/*
* Group imbalance indicates (and tries to solve) the problem where balancing
* groups is inadequate due to ->cpus_ptr constraints.
*
* Imagine a situation of two groups of 4 cpus each and 4 tasks each with a
* cpumask covering 1 cpu of the first group and 3 cpus of the second group.
* Something like:
*
* { 0 1 2 3 } { 4 5 6 7 }
* * * * *
*
* If we were to balance group-wise we'd place two tasks in the first group and
* two tasks in the second group. Clearly this is undesired as it will overload
* cpu 3 and leave one of the cpus in the second group unused.
*
* The current solution to this issue is detecting the skew in the first group
* by noticing the lower domain failed to reach balance and had difficulty
* moving tasks due to affinity constraints.
*
* When this is so detected; this group becomes a candidate for busiest; see
* update_sd_pick_busiest(). And calculate_imbalance() and
* find_busiest_group() avoid some of the usual balance conditions to allow it
* to create an effective group imbalance.
*
* This is a somewhat tricky proposition since the next run might not find the
* group imbalance and decide the groups need to be balanced again. A most
* subtle and fragile situation.
*/
static inline int sg_imbalanced(struct sched_group *group)
{
return group->sgc->imbalance;
}
/*
* group_has_capacity returns true if the group has spare capacity that could
* be used by some tasks.
* We consider that a group has spare capacity if the * number of task is
* smaller than the number of CPUs or if the utilization is lower than the
* available capacity for CFS tasks.
* For the latter, we use a threshold to stabilize the state, to take into
* account the variance of the tasks' load and to return true if the available
* capacity in meaningful for the load balancer.
* As an example, an available capacity of 1% can appear but it doesn't make
* any benefit for the load balance.
*/
static inline bool
group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs)
{
if (sgs->sum_nr_running < sgs->group_weight)
return true;
if ((sgs->group_capacity * 100) >
(sgs->group_util * env->sd->imbalance_pct))
return true;
return false;
}
/*
* group_is_overloaded returns true if the group has more tasks than it can
* handle.
* group_is_overloaded is not equals to !group_has_capacity because a group
* with the exact right number of tasks, has no more spare capacity but is not
* overloaded so both group_has_capacity and group_is_overloaded return
* false.
*/
static inline bool
group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs)
{
if (sgs->sum_nr_running <= sgs->group_weight)
return false;
#ifdef CONFIG_SCHED_WALT
if (env->idle != CPU_NOT_IDLE && walt_rotation_enabled)
return true;
#endif
if ((sgs->group_capacity * 100) <
(sgs->group_util * env->sd->imbalance_pct))
return true;
return false;
}
static inline enum
group_type group_classify(struct sched_group *group,
struct sg_lb_stats *sgs)
{
if (sgs->group_no_capacity)
return group_overloaded;
if (sg_imbalanced(group))
return group_imbalanced;
if (sgs->group_misfit_task_load)
return group_misfit_task;
return group_other;
}
/**
* update_sg_lb_stats - Update sched_group's statistics for load balancing.
* @env: The load balancing environment.
* @group: sched_group whose statistics are to be updated.
* @sgs: variable to hold the statistics for this group.
* @sg_status: Holds flag indicating the status of the sched_group
* @overutilized: Indicate overutilization for any CPU.
*/
static inline void update_sg_lb_stats(struct lb_env *env,
struct sched_group *group,
struct sg_lb_stats *sgs,
int *sg_status, bool *overutilized, bool *misfit_task)
{
int i, nr_running;
memset(sgs, 0, sizeof(*sgs));
for_each_cpu_and(i, sched_group_span(group), env->cpus) {
struct rq *rq = cpu_rq(i);
if (cpu_isolated(i))
continue;
sgs->group_load += weighted_cpuload(rq);
sgs->group_util += cpu_util(i);
nr_running = rq->nr_running;
sgs->sum_nr_running += nr_running;
if (nr_running > 1)
*sg_status |= SG_OVERLOAD;
#ifdef CONFIG_NUMA_BALANCING
sgs->nr_numa_running += rq->nr_numa_running;
sgs->nr_preferred_running += rq->nr_preferred_running;
#endif
sgs->sum_weighted_load += weighted_cpuload(rq);
/*
* No need to call idle_cpu() if nr_running is not 0
*/
if (!nr_running && idle_cpu(i))
sgs->idle_cpus++;
if (env->sd->flags & SD_ASYM_CPUCAPACITY &&
sgs->group_misfit_task_load < rq->misfit_task_load) {
sgs->group_misfit_task_load = rq->misfit_task_load;
*sg_status |= SG_OVERLOAD;
}
if (cpu_overutilized(i)) {
*overutilized = true;
if (rq->misfit_task_load)
*misfit_task = true;
}
}
/* Isolated CPU has no weight */
if (!group->group_weight) {
sgs->group_capacity = 0;
sgs->avg_load = 0;
sgs->group_no_capacity = 1;
sgs->group_type = group_other;
sgs->group_weight = group->group_weight;
} else {
/* Adjust by relative CPU capacity of the group */
sgs->group_capacity = group->sgc->capacity;
sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) /
sgs->group_capacity;
sgs->group_weight = group->group_weight;
sgs->group_no_capacity = group_is_overloaded(env, sgs);
sgs->group_type = group_classify(group, sgs);
}
if (sgs->sum_nr_running)
sgs->load_per_task = sgs->sum_weighted_load /
sgs->sum_nr_running;
}
/**
* update_sd_pick_busiest - return 1 on busiest group
* @env: The load balancing environment.
* @sds: sched_domain statistics
* @sg: sched_group candidate to be checked for being the busiest
* @sgs: sched_group statistics
*
* Determine if @sg is a busier group than the previously selected
* busiest group.
*
* Return: %true if @sg is a busier group than the previously selected
* busiest group. %false otherwise.
*/
static bool update_sd_pick_busiest(struct lb_env *env,
struct sd_lb_stats *sds,
struct sched_group *sg,
struct sg_lb_stats *sgs)
{
struct sg_lb_stats *busiest = &sds->busiest_stat;
/*
* Don't try to pull misfit tasks we can't help.
* We can use max_capacity here as reduction in capacity on some
* cpus in the group should either be possible to resolve
* internally or be covered by avg_load imbalance (eventually).
*/
if (sgs->group_type == group_misfit_task &&
(!capacity_greater(capacity_of(env->dst_cpu), sg->sgc->max_capacity) ||
!group_has_capacity(env, &sds->local_stat)))
return false;
if (sgs->group_type > busiest->group_type)
return true;
if (sgs->group_type < busiest->group_type)
return false;
if (sgs->avg_load <= busiest->avg_load)
return false;
if (!(env->sd->flags & SD_ASYM_CPUCAPACITY))
goto asym_packing;
/*
* If we have more than one misfit sg go with the biggest misfit.
*/
if (sgs->group_type == group_misfit_task &&
sgs->group_misfit_task_load < busiest->group_misfit_task_load)
return false;
asym_packing:
/* This is the busiest node in its class. */
if (!(env->sd->flags & SD_ASYM_PACKING))
return true;
/* No ASYM_PACKING if target cpu is already busy */
if (env->idle == CPU_NOT_IDLE)
return true;
/*
* ASYM_PACKING needs to move all the work to the highest
* prority CPUs in the group, therefore mark all groups
* of lower priority than ourself as busy.
*/
if (sgs->sum_nr_running &&
sched_asym_prefer(env->dst_cpu, sg->asym_prefer_cpu)) {
if (!sds->busiest)
return true;
/* Prefer to move from lowest priority cpu's work */
if (sched_asym_prefer(sds->busiest->asym_prefer_cpu,
sg->asym_prefer_cpu))
return true;
}
return false;
}
#ifdef CONFIG_NUMA_BALANCING
static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
{
if (sgs->sum_nr_running > sgs->nr_numa_running)
return regular;
if (sgs->sum_nr_running > sgs->nr_preferred_running)
return remote;
return all;
}
static inline enum fbq_type fbq_classify_rq(struct rq *rq)
{
if (rq->nr_running > rq->nr_numa_running)
return regular;
if (rq->nr_running > rq->nr_preferred_running)
return remote;
return all;
}
#else
static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
{
return all;
}
static inline enum fbq_type fbq_classify_rq(struct rq *rq)
{
return regular;
}
#endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_NO_HZ_COMMON
static struct {
cpumask_var_t idle_cpus_mask;
atomic_t nr_cpus;
unsigned long next_balance; /* in jiffy units */
unsigned long next_update; /* in jiffy units */
} nohz ____cacheline_aligned;
#endif
#define lb_sd_parent(sd) \
(sd->parent && sd->parent->groups != sd->parent->groups->next)
/**
* update_sd_lb_stats - Update sched_domain's statistics for load balancing.
* @env: The load balancing environment.
* @sds: variable to hold the statistics for this sched_domain.
*/
static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
{
struct sched_domain *child = env->sd->child;
struct sched_group *sg = env->sd->groups;
struct sg_lb_stats *local = &sds->local_stat;
struct sg_lb_stats tmp_sgs;
bool overutilized = false, misfit_task = false;
bool prefer_sibling = child && child->flags & SD_PREFER_SIBLING;
int sg_status = 0;
#ifdef CONFIG_NO_HZ_COMMON
if (env->idle == CPU_NEWLY_IDLE) {
int cpu;
/* Update the stats of NOHZ idle CPUs in the sd */
for_each_cpu_and(cpu, sched_domain_span(env->sd),
nohz.idle_cpus_mask) {
struct rq *rq = cpu_rq(cpu);
/* ... Unless we've already done since the last tick */
if (time_after(jiffies,
rq->last_blocked_load_update_tick))
update_blocked_averages(cpu);
}
}
/*
* If we've just updated all of the NOHZ idle CPUs, then we can push
* back the next nohz.next_update, which will prevent an unnecessary
* wakeup for the nohz stats kick
*/
if (cpumask_subset(nohz.idle_cpus_mask, sched_domain_span(env->sd)))
nohz.next_update = jiffies + LOAD_AVG_PERIOD;
#endif
do {
struct sg_lb_stats *sgs = &tmp_sgs;
int local_group;
local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(sg));
if (local_group) {
sds->local = sg;
sgs = local;
update_group_capacity(env->sd, env->dst_cpu);
}
update_sg_lb_stats(env, sg, sgs,
&sg_status, &overutilized,
&misfit_task);
if (local_group)
goto next_group;
/*
* In case the child domain prefers tasks go to siblings
* first, lower the sg capacity so that we'll try
* and move all the excess tasks away. We lower the capacity
* of a group only if the local group has the capacity to fit
* these excess tasks. The extra check prevents the case where
* you always pull from the heaviest group when it is already
* under-utilized (possible with a large weight task outweighs
* the tasks on the system).
*/
if (prefer_sibling && sds->local &&
group_has_capacity(env, local) &&
(sgs->sum_nr_running > local->sum_nr_running + 1)) {
sgs->group_no_capacity = 1;
sgs->group_type = group_classify(sg, sgs);
}
/*
* Disallow moving tasks from asym cap sibling CPUs to other
* CPUs (lower capacity) unless the asym cap sibling group has
* no capacity to manage the current load.
*/
if ((env->sd->flags & SD_ASYM_CPUCAPACITY) &&
sgs->group_no_capacity &&
asym_cap_sibling_group_has_capacity(env->dst_cpu,
env->sd->imbalance_pct)) {
sgs->group_no_capacity = 0;
sgs->group_type = group_classify(sg, sgs);
}
if (update_sd_pick_busiest(env, sds, sg, sgs)) {
sds->busiest = sg;
sds->busiest_stat = *sgs;
}
next_group:
/* Now, start updating sd_lb_stats */
sds->total_running += sgs->sum_nr_running;
sds->total_load += sgs->group_load;
sds->total_capacity += sgs->group_capacity;
sds->total_util += sgs->group_util;
trace_sched_load_balance_sg_stats(sg->cpumask[0], sgs->group_type,
sgs->idle_cpus, sgs->sum_nr_running,
sgs->group_load, sgs->group_capacity,
sgs->group_util, sgs->group_no_capacity,
sgs->load_per_task,
sgs->group_misfit_task_load,
sds->busiest ? sds->busiest->cpumask[0] : 0);
sg = sg->next;
} while (sg != env->sd->groups);
if (env->sd->flags & SD_NUMA)
env->fbq_type = fbq_classify_group(&sds->busiest_stat);
env->src_grp_nr_running = sds->busiest_stat.sum_nr_running;
if (!lb_sd_parent(env->sd)) {
/* update overload indicator if we are at root domain */
WRITE_ONCE(env->dst_rq->rd->overload, sg_status & SG_OVERLOAD);
}
if (overutilized)
set_sd_overutilized(env->sd);
else
clear_sd_overutilized(env->sd);
/*
* If there is a misfit task in one cpu in this sched_domain
* it is likely that the imbalance cannot be sorted out among
* the cpu's in this sched_domain. In this case set the
* overutilized flag at the parent sched_domain.
*/
if (misfit_task) {
struct sched_domain *sd = env->sd->parent;
/*
* In case of a misfit task, load balance at the parent
* sched domain level will make sense only if the the cpus
* have a different capacity. If cpus at a domain level have
* the same capacity, the misfit task cannot be well
* accomodated in any of the cpus and there in no point in
* trying a load balance at this level
*/
while (sd) {
if (sd->flags & SD_ASYM_CPUCAPACITY) {
set_sd_overutilized(sd);
break;
}
sd = sd->parent;
}
}
/*
* If the domain util is greater that domain capacity, load balancing
* needs to be done at the next sched domain level as well.
*/
if (lb_sd_parent(env->sd) &&
sds->total_capacity * 1024 < sds->total_util *
sched_capacity_margin_up[group_first_cpu(sds->local)])
set_sd_overutilized(env->sd->parent);
}
/**
* check_asym_packing - Check to see if the group is packed into the
* sched domain.
*
* This is primarily intended to used at the sibling level. Some
* cores like POWER7 prefer to use lower numbered SMT threads. In the
* case of POWER7, it can move to lower SMT modes only when higher
* threads are idle. When in lower SMT modes, the threads will
* perform better since they share less core resources. Hence when we
* have idle threads, we want them to be the higher ones.
*
* This packing function is run on idle threads. It checks to see if
* the busiest CPU in this domain (core in the P7 case) has a higher
* CPU number than the packing function is being run on. Here we are
* assuming lower CPU number will be equivalent to lower a SMT thread
* number.
*
* Return: 1 when packing is required and a task should be moved to
* this CPU. The amount of the imbalance is returned in env->imbalance.
*
* @env: The load balancing environment.
* @sds: Statistics of the sched_domain which is to be packed
*/
static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
{
int busiest_cpu;
if (!(env->sd->flags & SD_ASYM_PACKING))
return 0;
if (env->idle == CPU_NOT_IDLE)
return 0;
if (!sds->busiest)
return 0;
busiest_cpu = sds->busiest->asym_prefer_cpu;
if (sched_asym_prefer(busiest_cpu, env->dst_cpu))
return 0;
env->imbalance = DIV_ROUND_CLOSEST(
sds->busiest_stat.avg_load * sds->busiest_stat.group_capacity,
SCHED_CAPACITY_SCALE);
return 1;
}
/**
* fix_small_imbalance - Calculate the minor imbalance that exists
* amongst the groups of a sched_domain, during
* load balancing.
* @env: The load balancing environment.
* @sds: Statistics of the sched_domain whose imbalance is to be calculated.
*/
static inline
void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
{
unsigned long tmp, capa_now = 0, capa_move = 0;
unsigned int imbn = 2;
unsigned long scaled_busy_load_per_task;
struct sg_lb_stats *local, *busiest;
local = &sds->local_stat;
busiest = &sds->busiest_stat;
if (!local->sum_nr_running)
local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
else if (busiest->load_per_task > local->load_per_task)
imbn = 1;
scaled_busy_load_per_task =
(busiest->load_per_task * SCHED_CAPACITY_SCALE) /
busiest->group_capacity;
if (busiest->avg_load + scaled_busy_load_per_task >=
local->avg_load + (scaled_busy_load_per_task * imbn)) {
env->imbalance = busiest->load_per_task;
return;
}
/*
* OK, we don't have enough imbalance to justify moving tasks,
* however we may be able to increase total CPU capacity used by
* moving them.
*/
capa_now += busiest->group_capacity *
min(busiest->load_per_task, busiest->avg_load);
capa_now += local->group_capacity *
min(local->load_per_task, local->avg_load);
capa_now /= SCHED_CAPACITY_SCALE;
/* Amount of load we'd subtract */
if (busiest->avg_load > scaled_busy_load_per_task) {
capa_move += busiest->group_capacity *
min(busiest->load_per_task,
busiest->avg_load - scaled_busy_load_per_task);
}
/* Amount of load we'd add */
if (busiest->avg_load * busiest->group_capacity <
busiest->load_per_task * SCHED_CAPACITY_SCALE) {
tmp = (busiest->avg_load * busiest->group_capacity) /
local->group_capacity;
} else {
tmp = (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
local->group_capacity;
}
capa_move += local->group_capacity *
min(local->load_per_task, local->avg_load + tmp);
capa_move /= SCHED_CAPACITY_SCALE;
/* Move if we gain throughput */
if (capa_move > capa_now) {
env->imbalance = busiest->load_per_task;
return;
}
/* We can't see throughput improvement with the load-based
* method, but it is possible depending upon group size and
* capacity range that there might still be an underutilized
* cpu available in an asymmetric capacity system. Do one last
* check just in case.
*/
if (env->sd->flags & SD_ASYM_CPUCAPACITY &&
busiest->group_type == group_overloaded &&
busiest->sum_nr_running > busiest->group_weight &&
local->sum_nr_running < local->group_weight &&
local->group_capacity < busiest->group_capacity)
env->imbalance = busiest->load_per_task;
}
/**
* calculate_imbalance - Calculate the amount of imbalance present within the
* groups of a given sched_domain during load balance.
* @env: load balance environment
* @sds: statistics of the sched_domain whose imbalance is to be calculated.
*/
static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
{
unsigned long max_pull, load_above_capacity = ~0UL;
struct sg_lb_stats *local, *busiest;
bool no_imbalance = false;
local = &sds->local_stat;
busiest = &sds->busiest_stat;
if (busiest->group_type == group_imbalanced) {
/*
* In the group_imb case we cannot rely on group-wide averages
* to ensure cpu-load equilibrium, look at wider averages. XXX
*/
busiest->load_per_task =
min(busiest->load_per_task, sds->avg_load);
}
/*
* Avg load of busiest sg can be less and avg load of local sg can
* be greater than avg load across all sgs of sd because avg load
* factors in sg capacity and sgs with smaller group_type are
* skipped when updating the busiest sg:
*/
if (busiest->avg_load <= sds->avg_load ||
local->avg_load >= sds->avg_load)
no_imbalance = true;
if (busiest->group_type != group_misfit_task && no_imbalance) {
env->imbalance = 0;
if (busiest->group_type == group_overloaded &&
local->group_type <= group_misfit_task) {
env->imbalance = busiest->load_per_task;
return;
}
return fix_small_imbalance(env, sds);
}
/*
* If there aren't any idle cpus, avoid creating some.
*/
if (busiest->group_type == group_overloaded &&
local->group_type == group_overloaded) {
load_above_capacity = busiest->sum_nr_running * SCHED_CAPACITY_SCALE;
if (load_above_capacity > busiest->group_capacity) {
load_above_capacity -= busiest->group_capacity;
load_above_capacity *= scale_load_down(NICE_0_LOAD);
load_above_capacity /= busiest->group_capacity;
} else
load_above_capacity = ~0UL;
}
/*
* In case of a misfit task, independent of avg loads we do load balance
* at the parent sched domain level for B.L systems, so it is possible
* that busiest group avg load can be less than sd avg load.
* So skip calculating load based imbalance between groups.
*/
if (!no_imbalance) {
/*
* We're trying to get all the cpus to the average_load,
* so we don't want to push ourselves above the average load,
* nor do we wish to reduce the max loaded cpu below the average
* load. At the same time, we also don't want to reduce the
* group load below the group capacity.
* Thus we look for the minimum possible imbalance.
*/
max_pull = min(busiest->avg_load - sds->avg_load,
load_above_capacity);
/* How much load to actually move to equalise the imbalance */
env->imbalance = min(max_pull * busiest->group_capacity,
(sds->avg_load - local->avg_load) *
local->group_capacity) /
SCHED_CAPACITY_SCALE;
} else {
/*
* Skipped load based imbalance calculations, but let's find
* imbalance based on busiest group type or fix small imbalance.
*/
env->imbalance = 0;
}
/* Boost imbalance to allow misfit task to be balanced.
* Always do this if we are doing a NEWLY_IDLE balance
* on the assumption that any tasks we have must not be
* long-running (and hence we cannot rely upon load).
* However if we are not idle, we should assume the tasks
* we have are longer running and not override load-based
* calculations above unless we are sure that the local
* group is underutilized.
*/
if (busiest->group_type == group_misfit_task &&
(env->idle == CPU_NEWLY_IDLE ||
local->sum_nr_running < local->group_weight)) {
env->imbalance = max_t(long, env->imbalance,
busiest->group_misfit_task_load);
}
/*
* if *imbalance is less than the average load per runnable task
* there is no guarantee that any tasks will be moved so we'll have
* a think about bumping its value to force at least one task to be
* moved
*/
if (env->imbalance < busiest->load_per_task) {
/*
* The busiest group is overloaded so it could use help
* from the other groups. If the local group has idle CPUs
* and it is not overloaded and has no imbalance with in
* the group, allow the load balance by bumping the
* imbalance.
*/
if (busiest->group_type == group_overloaded &&
local->group_type <= group_misfit_task &&
env->idle != CPU_NOT_IDLE) {
env->imbalance = busiest->load_per_task;
return;
}
return fix_small_imbalance(env, sds);
}
}
/******* find_busiest_group() helpers end here *********************/
/**
* find_busiest_group - Returns the busiest group within the sched_domain
* if there is an imbalance.
*
* Also calculates the amount of weighted load which should be moved
* to restore balance.
*
* @env: The load balancing environment.
*
* Return: - The busiest group if imbalance exists.
*/
static struct sched_group *find_busiest_group(struct lb_env *env)
{
struct sg_lb_stats *local, *busiest;
struct sd_lb_stats sds;
init_sd_lb_stats(&sds);
/*
* Compute the various statistics relavent for load balancing at
* this level.
*/
update_sd_lb_stats(env, &sds);
if (energy_aware() && !sd_overutilized(env->sd)) {
int cpu_local, cpu_busiest;
unsigned long capacity_local, capacity_busiest;
if (env->idle != CPU_NEWLY_IDLE)
goto out_balanced;
if (!sds.local || !sds.busiest)
goto out_balanced;
cpu_local = group_first_cpu(sds.local);
cpu_busiest = group_first_cpu(sds.busiest);
/* TODO: don't assume same cap cpus are in same domain */
capacity_local = capacity_orig_of(cpu_local);
capacity_busiest = capacity_orig_of(cpu_busiest);
if (capacity_local > capacity_busiest) {
goto out_balanced;
} else if (capacity_local == capacity_busiest) {
if (cpu_rq(cpu_busiest)->nr_running < 2)
goto out_balanced;
}
}
local = &sds.local_stat;
busiest = &sds.busiest_stat;
/* ASYM feature bypasses nice load balance check */
if (check_asym_packing(env, &sds))
return sds.busiest;
/* There is no busy sibling group to pull tasks from */
if (!sds.busiest || busiest->sum_nr_running == 0)
goto out_balanced;
/* XXX broken for overlapping NUMA groups */
sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load)
/ sds.total_capacity;
/*
* If the busiest group is imbalanced the below checks don't
* work because they assume all things are equal, which typically
* isn't true due to cpus_allowed constraints and the like.
*/
if (busiest->group_type == group_imbalanced)
goto force_balance;
/*
* When dst_cpu is idle, prevent SMP nice and/or asymmetric group
* capacities from resulting in underutilization due to avg_load.
*/
if (env->idle != CPU_NOT_IDLE && group_has_capacity(env, local) &&
busiest->group_no_capacity)
goto force_balance;
/* Misfit tasks should be dealt with regardless of the avg load */
if (busiest->group_type == group_misfit_task)
goto force_balance;
/*
* If the local group is busier than the selected busiest group
* don't try and pull any tasks.
*/
if (local->avg_load >= busiest->avg_load)
goto out_balanced;
/*
* Don't pull any tasks if this group is already above the domain
* average load.
*/
if (local->avg_load >= sds.avg_load)
goto out_balanced;
if (env->idle == CPU_IDLE) {
/*
* This cpu is idle. If the busiest group is not overloaded
* and there is no imbalance between this and busiest group
* wrt idle cpus, it is balanced. The imbalance becomes
* significant if the diff is greater than 1 otherwise we
* might end up to just move the imbalance on another group
*/
if ((busiest->group_type != group_overloaded) &&
(local->idle_cpus <= (busiest->idle_cpus + 1)))
goto out_balanced;
} else {
/*
* In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
* imbalance_pct to be conservative.
*/
if (100 * busiest->avg_load <=
env->sd->imbalance_pct * local->avg_load)
goto out_balanced;
}
force_balance:
/* Looks like there is an imbalance. Compute it */
env->src_grp_type = busiest->group_type;
calculate_imbalance(env, &sds);
trace_sched_load_balance_stats(sds.busiest->cpumask[0], busiest->group_type,
busiest->avg_load, busiest->load_per_task,
sds.local->cpumask[0], local->group_type,
local->avg_load, local->load_per_task,
sds.avg_load, env->imbalance);
return env->imbalance ? sds.busiest : NULL;
out_balanced:
env->imbalance = 0;
return NULL;
}
/*
* find_busiest_queue - find the busiest runqueue among the cpus in group.
*/
static struct rq *find_busiest_queue(struct lb_env *env,
struct sched_group *group)
{
struct rq *busiest = NULL, *rq;
unsigned long busiest_load = 0, busiest_capacity = 1;
int i;
for_each_cpu_and(i, sched_group_span(group), env->cpus) {
unsigned long capacity, wl;
enum fbq_type rt;
rq = cpu_rq(i);
rt = fbq_classify_rq(rq);
/*
* We classify groups/runqueues into three groups:
* - regular: there are !numa tasks
* - remote: there are numa tasks that run on the 'wrong' node
* - all: there is no distinction
*
* In order to avoid migrating ideally placed numa tasks,
* ignore those when there's better options.
*
* If we ignore the actual busiest queue to migrate another
* task, the next balance pass can still reduce the busiest
* queue by moving tasks around inside the node.
*
* If we cannot move enough load due to this classification
* the next pass will adjust the group classification and
* allow migration of more tasks.
*
* Both cases only affect the total convergence complexity.
*/
if (rt > env->fbq_type)
continue;
/*
* For ASYM_CPUCAPACITY domains with misfit tasks we simply
* seek the "biggest" misfit task.
*/
if (env->src_grp_type == group_misfit_task) {
if (rq->misfit_task_load > busiest_load) {
busiest_load = rq->misfit_task_load;
busiest = rq;
}
continue;
}
/*
* Ignore cpu, which is undergoing active_balance and doesn't
* have more than 2 tasks.
*/
if (rq->active_balance && rq->nr_running <= 2)
continue;
capacity = capacity_of(i);
/*
* For ASYM_CPUCAPACITY domains, don't pick a cpu that could
* eventually lead to active_balancing high->low capacity.
* Higher per-cpu capacity is considered better than balancing
* average load.
*/
if (env->sd->flags & SD_ASYM_CPUCAPACITY &&
!capacity_greater(capacity_of(env->dst_cpu), capacity) &&
rq->nr_running == 1)
continue;
wl = weighted_cpuload(rq);
/*
* When comparing with imbalance, use weighted_cpuload()
* which is not scaled with the cpu capacity.
*/
if (rq->nr_running == 1 && wl > env->imbalance &&
!check_cpu_capacity(rq, env->sd))
continue;
/*
* For the load comparisons with the other cpu's, consider
* the weighted_cpuload() scaled with the cpu capacity, so
* that the load can be moved away from the cpu that is
* potentially running at a lower capacity.
*
* Thus we're looking for max(wl_i / capacity_i), crosswise
* multiplication to rid ourselves of the division works out
* to: wl_i * capacity_j > wl_j * capacity_i; where j is
* our previous maximum.
*/
if (wl * busiest_capacity > busiest_load * capacity) {
busiest_load = wl;
busiest_capacity = capacity;
busiest = rq;
}
}
return busiest;
}
/*
* Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
* so long as it is large enough.
*/
#define MAX_PINNED_INTERVAL 512
#define NEED_ACTIVE_BALANCE_THRESHOLD 10
static inline bool
asym_active_balance(struct lb_env *env)
{
/*
* ASYM_PACKING needs to force migrate tasks from busy but
* lower priority CPUs in order to pack all tasks in the
* highest priority CPUs.
*/
return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) &&
sched_asym_prefer(env->dst_cpu, env->src_cpu);
}
static inline bool
voluntary_active_balance(struct lb_env *env)
{
struct sched_domain *sd = env->sd;
if (asym_active_balance(env))
return 1;
if (env->src_grp_type == group_overloaded &&
env->src_rq->misfit_task_load)
return 1;
return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
}
static int group_balance_cpu_not_isolated(struct sched_group *sg)
{
cpumask_t cpus;
cpumask_and(&cpus, sched_group_span(sg), group_balance_mask(sg));
cpumask_andnot(&cpus, &cpus, cpu_isolated_mask);
return cpumask_first(&cpus);
}
static int need_active_balance(struct lb_env *env)
{
struct sched_domain *sd = env->sd;
if (voluntary_active_balance(env))
return 1;
return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
}
static int active_load_balance_cpu_stop(void *data);
static int should_we_balance(struct lb_env *env)
{
struct sched_group *sg = env->sd->groups;
int cpu, balance_cpu = -1;
/*
* Ensure the balancing environment is consistent; can happen
* when the softirq triggers 'during' hotplug.
*/
if (!cpumask_test_cpu(env->dst_cpu, env->cpus))
return 0;
/*
* Ensure the balancing environment is consistent; can happen
* when the softirq triggers 'during' hotplug.
*/
if (!cpumask_test_cpu(env->dst_cpu, env->cpus))
return 0;
/*
* Ensure the balancing environment is consistent; can happen
* when the softirq triggers 'during' hotplug.
*/
if (!cpumask_test_cpu(env->dst_cpu, env->cpus))
return 0;
/*
* In the newly idle case, we will allow all the cpu's
* to do the newly idle load balance.
*
* However, we bail out if we already have tasks or a wakeup pending,
* to optimize wakeup latency.
*/
if (env->idle == CPU_NEWLY_IDLE) {
#if SCHED_FEAT_TTWU_QUEUE
if (env->dst_rq->nr_running > 0 || !llist_empty(&env->dst_rq->wake_list))
return 0;
#else
if (env->dst_rq->nr_running > 0)
return 0;
#endif
return 1;
}
/* Try to find first idle cpu */
for_each_cpu_and(cpu, group_balance_mask(sg), env->cpus) {
if (!idle_cpu(cpu) || cpu_isolated(cpu))
continue;
balance_cpu = cpu;
break;
}
if (balance_cpu == -1)
balance_cpu = group_balance_cpu_not_isolated(sg);
/*
* First idle cpu or the first cpu(busiest) in this sched group
* is eligible for doing load balancing at this and above domains.
*/
return balance_cpu == env->dst_cpu;
}
/*
* Check this_cpu to ensure it is balanced within domain. Attempt to move
* tasks if there is an imbalance.
*/
static int load_balance(int this_cpu, struct rq *this_rq,
struct sched_domain *sd, enum cpu_idle_type idle,
int *continue_balancing)
{
int ld_moved = 0, cur_ld_moved, active_balance = 0;
struct sched_domain *sd_parent = lb_sd_parent(sd) ? sd->parent : NULL;
struct sched_group *group = NULL;
struct rq *busiest = NULL;
struct rq_flags rf;
struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
struct lb_env env = {
.sd = sd,
.dst_cpu = this_cpu,
.dst_rq = this_rq,
.dst_grpmask = group_balance_mask(sd->groups),
.idle = idle,
.loop_break = sched_nr_migrate_break,
.cpus = cpus,
.fbq_type = all,
.tasks = LIST_HEAD_INIT(env.tasks),
.imbalance = 0,
.flags = 0,
.loop = 0,
};
cpumask_and(cpus, sched_domain_span(sd), cpu_active_mask);
schedstat_inc(sd->lb_count[idle]);
redo:
if (!should_we_balance(&env)) {
*continue_balancing = 0;
goto out_balanced;
}
group = find_busiest_group(&env);
if (!group) {
schedstat_inc(sd->lb_nobusyg[idle]);
goto out_balanced;
}
busiest = find_busiest_queue(&env, group);
if (!busiest) {
schedstat_inc(sd->lb_nobusyq[idle]);
goto out_balanced;
}
BUG_ON(busiest == env.dst_rq);
schedstat_add(sd->lb_imbalance[idle], env.imbalance);
env.src_cpu = busiest->cpu;
env.src_rq = busiest;
ld_moved = 0;
/* Clear this flag as soon as we find a pullable task */
env.flags |= LBF_ALL_PINNED;
if (busiest->nr_running > 1) {
/*
* Attempt to move tasks. If find_busiest_group has found
* an imbalance but busiest->nr_running <= 1, the group is
* still unbalanced. ld_moved simply stays zero, so it is
* correctly treated as an imbalance.
*/
env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
more_balance:
rq_lock_irqsave(busiest, &rf);
/*
* The world might have changed. Validate assumptions.
* And also, if the busiest cpu is undergoing active_balance,
* it doesn't need help if it has less than 2 tasks on it.
*/
if (busiest->nr_running <= 1 ||
(busiest->active_balance && busiest->nr_running <= 2)) {
rq_unlock_irqrestore(busiest, &rf);
env.flags &= ~LBF_ALL_PINNED;
goto no_move;
}
update_rq_clock(busiest);
/*
* cur_ld_moved - load moved in current iteration
* ld_moved - cumulative load moved across iterations
*/
cur_ld_moved = detach_tasks(&env);
/*
* We've detached some tasks from busiest_rq. Every
* task is masked "TASK_ON_RQ_MIGRATING", so we can safely
* unlock busiest->lock, and we are able to be sure
* that nobody can manipulate the tasks in parallel.
* See task_rq_lock() family for the details.
*/
rq_unlock(busiest, &rf);
if (cur_ld_moved) {
attach_tasks(&env);
ld_moved += cur_ld_moved;
}
local_irq_restore(rf.flags);
if (env.flags & LBF_NEED_BREAK) {
env.flags &= ~LBF_NEED_BREAK;
/* Stop if we tried all running tasks */
if (env.loop < busiest->nr_running)
goto more_balance;
}
/*
* Revisit (affine) tasks on src_cpu that couldn't be moved to
* us and move them to an alternate dst_cpu in our sched_group
* where they can run. The upper limit on how many times we
* iterate on same src_cpu is dependent on number of cpus in our
* sched_group.
*
* This changes load balance semantics a bit on who can move
* load to a given_cpu. In addition to the given_cpu itself
* (or a ilb_cpu acting on its behalf where given_cpu is
* nohz-idle), we now have balance_cpu in a position to move
* load to given_cpu. In rare situations, this may cause
* conflicts (balance_cpu and given_cpu/ilb_cpu deciding
* _independently_ and at _same_ time to move some load to
* given_cpu) causing exceess load to be moved to given_cpu.
* This however should not happen so much in practice and
* moreover subsequent load balance cycles should correct the
* excess load moved.
*/
if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
/* Prevent to re-select dst_cpu via env's cpus */
cpumask_clear_cpu(env.dst_cpu, env.cpus);
env.dst_rq = cpu_rq(env.new_dst_cpu);
env.dst_cpu = env.new_dst_cpu;
env.flags &= ~LBF_DST_PINNED;
env.loop = 0;
env.loop_break = sched_nr_migrate_break;
/*
* Go back to "more_balance" rather than "redo" since we
* need to continue with same src_cpu.
*/
goto more_balance;
}
/*
* We failed to reach balance because of affinity.
*/
if (sd_parent) {
int *group_imbalance = &sd_parent->groups->sgc->imbalance;
if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0)
*group_imbalance = 1;
}
/* All tasks on this runqueue were pinned by CPU affinity */
if (unlikely(env.flags & LBF_ALL_PINNED)) {
cpumask_clear_cpu(cpu_of(busiest), cpus);
/*
* Attempting to continue load balancing at the current
* sched_domain level only makes sense if there are
* active CPUs remaining as possible busiest CPUs to
* pull load from which are not contained within the
* destination group that is receiving any migrated
* load.
*/
if (!cpumask_subset(cpus, env.dst_grpmask)) {
env.loop = 0;
env.loop_break = sched_nr_migrate_break;
goto redo;
}
goto out_all_pinned;
}
}
no_move:
if (!ld_moved) {
/*
* Increment the failure counter only on periodic balance.
* We do not want newidle balance, which can be very
* frequent, pollute the failure counter causing
* excessive cache_hot migrations and active balances.
*/
if (idle != CPU_NEWLY_IDLE) {
if (env.src_grp_nr_running > 1)
sd->nr_balance_failed++;
}
if (need_active_balance(&env)) {
unsigned long flags;
raw_spin_lock_irqsave(&busiest->lock, flags);
/*
* The CPUs are marked as reserved if tasks
* are pushed/pulled from other CPUs. In that case,
* bail out from the load balancer.
*/
if (is_reserved(this_cpu) ||
is_reserved(cpu_of(busiest))) {
raw_spin_unlock_irqrestore(&busiest->lock,
flags);
*continue_balancing = 0;
goto out;
}
/* don't kick the active_load_balance_cpu_stop,
* if the curr task on busiest cpu can't be
* moved to this_cpu
*/
if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) {
raw_spin_unlock_irqrestore(&busiest->lock,
flags);
goto out_one_pinned;
}
/* Record that we found at least one task that could run on this_cpu */
env.flags &= ~LBF_ALL_PINNED;
/*
* ->active_balance synchronizes accesses to
* ->active_balance_work. Once set, it's cleared
* only after active load balance is finished.
*/
if (!busiest->active_balance &&
!cpu_isolated(cpu_of(busiest))) {
busiest->active_balance = 1;
busiest->push_cpu = this_cpu;
active_balance = 1;
}
raw_spin_unlock_irqrestore(&busiest->lock, flags);
if (active_balance) {
stop_one_cpu_nowait(cpu_of(busiest),
active_load_balance_cpu_stop, busiest,
&busiest->active_balance_work);
*continue_balancing = 0;
}
}
} else
sd->nr_balance_failed = 0;
if (likely(!active_balance) || voluntary_active_balance(&env)) {
/* We were unbalanced, so reset the balancing interval */
sd->balance_interval = sd->min_interval;
} else {
/*
* If we've begun active balancing, start to back off. This
* case may not be covered by the all_pinned logic if there
* is only 1 task on the busy runqueue (because we don't call
* detach_tasks).
*/
if (sd->balance_interval < sd->max_interval)
sd->balance_interval *= 2;
}
goto out;
out_balanced:
/*
* We reach balance although we may have faced some affinity
* constraints. Clear the imbalance flag only if other tasks got
* a chance to move and fix the imbalance.
*/
if (sd_parent && !(env.flags & LBF_ALL_PINNED)) {
int *group_imbalance = &sd_parent->groups->sgc->imbalance;
if (*group_imbalance)
*group_imbalance = 0;
}
out_all_pinned:
/*
* We reach balance because all tasks are pinned at this level so
* we can't migrate them. Let the imbalance flag set so parent level
* can try to migrate them.
*/
schedstat_inc(sd->lb_balanced[idle]);
sd->nr_balance_failed = 0;
out_one_pinned:
ld_moved = 0;
/*
* idle_balance() disregards balance intervals, so we could repeatedly
* reach this code, which would lead to balance_interval skyrocketting
* in a short amount of time. Skip the balance_interval increase logic
* to avoid that.
*/
if (env.idle == CPU_NEWLY_IDLE)
goto out;
/* tune up the balancing interval */
if (((env.flags & LBF_ALL_PINNED) &&
sd->balance_interval < MAX_PINNED_INTERVAL) ||
(sd->balance_interval < sd->max_interval))
sd->balance_interval *= 2;
out:
trace_sched_load_balance(this_cpu, idle, *continue_balancing,
group ? group->cpumask[0] : 0,
busiest ? busiest->nr_running : 0,
env.imbalance, env.flags, ld_moved,
sd->balance_interval, active_balance);
return ld_moved;
}
static inline unsigned long
get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
{
unsigned long interval = sd->balance_interval;
unsigned int cpu;
if (cpu_busy)
interval *= sd->busy_factor;
/* scale ms to jiffies */
interval = msecs_to_jiffies(interval);
/*
* Reduce likelihood of busy balancing at higher domains racing with
* balancing at lower domains by preventing their balancing periods
* from being multiples of each other.
*/
if (cpu_busy)
interval -= 1;
interval = clamp(interval, 1UL, max_load_balance_interval);
/*
* check if sched domain is marked as overutilized
* we ought to only do this on systems which have SD_ASYMCAPACITY
* but we want to do it for all sched domains in those systems
* So for now, just check if overutilized as a proxy.
*/
/*
* If we are overutilized and we have a misfit task, then
* we want to balance as soon as practically possible, so
* we return an interval of zero.
*/
if (energy_aware() && sd_overutilized(sd)) {
/* we know the root is overutilized, let's check for a misfit task */
for_each_cpu(cpu, sched_domain_span(sd)) {
if (cpu_rq(cpu)->misfit_task_load)
return 1;
}
}
return interval;
}
static inline void
update_next_balance(struct sched_domain *sd, unsigned long *next_balance)
{
unsigned long interval, next;
/* used by idle balance, so cpu_busy = 0 */
interval = get_sd_balance_interval(sd, 0);
next = sd->last_balance + interval;
if (time_after(*next_balance, next))
*next_balance = next;
}
#ifdef CONFIG_SCHED_WALT
static inline bool min_cap_cluster_has_misfit_task(void)
{
int cpu;
for_each_possible_cpu(cpu) {
if (!is_min_capacity_cpu(cpu))
break;
if (cpu_rq(cpu)->walt_stats.nr_big_tasks)
return true;
}
return false;
}
#else
static inline bool min_cap_cluster_has_misfit_task(void)
{
return false;
}
#endif
/*
* idle_balance is called by schedule() if this_cpu is about to become
* idle. Attempts to pull tasks from other CPUs.
*/
static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
{
unsigned long next_balance = jiffies + HZ;
int this_cpu = this_rq->cpu;
struct sched_domain *sd;
int pulled_task = 0;
u64 curr_cost = 0;
bool force_lb = false;
if (cpu_isolated(this_cpu))
return 0;
/*
* We must set idle_stamp _before_ calling idle_balance(), such that we
* measure the duration of idle_balance() as idle time.
*/
this_rq->idle_stamp = rq_clock(this_rq);
/*
* Do not pull tasks towards !active CPUs...
*/
if (!cpu_active(this_cpu))
return 0;
/*
* Force higher capacity CPUs doing load balance, when the lower
* capacity CPUs has some misfit tasks.
*/
if (!is_min_capacity_cpu(this_cpu) && min_cap_cluster_has_misfit_task())
force_lb = true;
/*
* This is OK, because current is on_cpu, which avoids it being picked
* for load-balance and preemption/IRQs are still disabled avoiding
* further scheduler activity on it and we're being very careful to
* re-start the picking loop.
*/
rq_unpin_lock(this_rq, rf);
if (!force_lb && (this_rq->avg_idle < sysctl_sched_migration_cost ||
!READ_ONCE(this_rq->rd->overload))) {
rcu_read_lock();
sd = rcu_dereference_check_sched_domain(this_rq->sd);
if (sd)
update_next_balance(sd, &next_balance);
rcu_read_unlock();
goto out;
}
raw_spin_unlock(&this_rq->lock);
update_blocked_averages(this_cpu);
rcu_read_lock();
for_each_domain(this_cpu, sd) {
int continue_balancing = 1;
u64 t0, domain_cost;
if (!(sd->flags & SD_LOAD_BALANCE)) {
update_group_capacity(sd, this_cpu);
continue;
}
if (!force_lb &&
this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) {
update_next_balance(sd, &next_balance);
break;
}
if (sd->flags & SD_BALANCE_NEWIDLE) {
t0 = sched_clock_cpu(this_cpu);
pulled_task = load_balance(this_cpu, this_rq,
sd, CPU_NEWLY_IDLE,
&continue_balancing);
domain_cost = sched_clock_cpu(this_cpu) - t0;
if (domain_cost > sd->max_newidle_lb_cost)
sd->max_newidle_lb_cost = domain_cost;
curr_cost += domain_cost;
}
update_next_balance(sd, &next_balance);
/*
* Stop searching for tasks to pull if there are
* now runnable tasks on this rq.
*/
if (pulled_task || this_rq->nr_running > 0)
break;
}
rcu_read_unlock();
raw_spin_lock(&this_rq->lock);
if (curr_cost > this_rq->max_idle_balance_cost)
this_rq->max_idle_balance_cost = curr_cost;
out:
/*
* While browsing the domains, we released the rq lock, a task could
* have been enqueued in the meantime. Since we're not going idle,
* pretend we pulled a task.
*/
if (this_rq->cfs.h_nr_running && !pulled_task)
pulled_task = 1;
/* Move the next balance forward */
if (time_after(this_rq->next_balance, next_balance))
this_rq->next_balance = next_balance;
/* Is there a task of a high priority class? */
if (this_rq->nr_running != this_rq->cfs.h_nr_running)
pulled_task = -1;
if (pulled_task)
this_rq->idle_stamp = 0;
rq_repin_lock(this_rq, rf);
return pulled_task;
}
/*
* active_load_balance_cpu_stop is run by cpu stopper. It pushes
* running tasks off the busiest CPU onto idle CPUs. It requires at
* least 1 task to be running on each physical CPU where possible, and
* avoids physical / logical imbalances.
*/
static int active_load_balance_cpu_stop(void *data)
{
struct rq *busiest_rq = data;
int busiest_cpu = cpu_of(busiest_rq);
int target_cpu = busiest_rq->push_cpu;
struct rq *target_rq = cpu_rq(target_cpu);
struct sched_domain *sd = NULL;
struct task_struct *p = NULL;
struct rq_flags rf;
struct task_struct *push_task;
int push_task_detached = 0;
struct lb_env env = {
.sd = sd,
.dst_cpu = target_cpu,
.dst_rq = target_rq,
.src_cpu = busiest_rq->cpu,
.src_rq = busiest_rq,
.idle = CPU_IDLE,
.flags = 0,
.loop = 0,
};
bool moved = false;
rq_lock_irq(busiest_rq, &rf);
/*
* Between queueing the stop-work and running it is a hole in which
* CPUs can become inactive. We should not move tasks from or to
* inactive CPUs.
*/
if (!cpu_active(busiest_cpu) || !cpu_active(target_cpu))
goto out_unlock;
/* make sure the requested cpu hasn't gone down in the meantime */
if (unlikely(busiest_cpu != smp_processor_id() ||
!busiest_rq->active_balance))
goto out_unlock;
/* Is there any task to move? */
if (busiest_rq->nr_running <= 1)
goto out_unlock;
/*
* This condition is "impossible", if it occurs
* we need to fix it. Originally reported by
* Bjorn Helgaas on a 128-cpu setup.
*/
BUG_ON(busiest_rq == target_rq);
push_task = busiest_rq->push_task;
target_cpu = busiest_rq->push_cpu;
if (push_task) {
if (task_on_rq_queued(push_task) &&
push_task->state == TASK_RUNNING &&
task_cpu(push_task) == busiest_cpu &&
cpu_online(target_cpu)) {
update_rq_clock(busiest_rq);
detach_task(push_task, &env);
push_task_detached = 1;
moved = true;
}
goto out_unlock;
}
/* Search for an sd spanning us and the target CPU. */
rcu_read_lock();
for_each_domain(target_cpu, sd) {
if ((sd->flags & SD_LOAD_BALANCE) &&
cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
break;
}
if (likely(sd)) {
struct lb_env env = {
.sd = sd,
.dst_cpu = target_cpu,
.dst_rq = target_rq,
.src_cpu = busiest_rq->cpu,
.src_rq = busiest_rq,
.idle = CPU_IDLE,
.flags = LBF_ACTIVE_LB,
};
schedstat_inc(sd->alb_count);
update_rq_clock(busiest_rq);
p = detach_one_task(&env);
if (p) {
schedstat_inc(sd->alb_pushed);
/* Active balancing done, reset the failure counter. */
sd->nr_balance_failed = 0;
moved = true;
} else {
schedstat_inc(sd->alb_failed);
}
}
rcu_read_unlock();
out_unlock:
busiest_rq->active_balance = 0;
push_task = busiest_rq->push_task;
target_cpu = busiest_rq->push_cpu;
if (push_task)
busiest_rq->push_task = NULL;
rq_unlock(busiest_rq, &rf);
if (push_task) {
if (push_task_detached)
attach_one_task(target_rq, push_task);
put_task_struct(push_task);
clear_reserved(target_cpu);
}
if (p)
attach_one_task(target_rq, p);
local_irq_enable();
return 0;
}
static inline int on_null_domain(struct rq *rq)
{
return unlikely(!rcu_dereference_sched(rq->sd));
}
#ifdef CONFIG_NO_HZ_COMMON
/*
* idle load balancing details
* - When one of the busy CPUs notice that there may be an idle rebalancing
* needed, they will kick the idle load balancer, which then does idle
* load balancing for all the idle CPUs.
*/
static inline int find_new_ilb(void)
{
int ilb = nr_cpu_ids;
struct sched_domain *sd;
int cpu = raw_smp_processor_id();
struct rq *rq = cpu_rq(cpu);
cpumask_t cpumask;
rcu_read_lock();
sd = rcu_dereference_check_sched_domain(rq->sd);
if (sd) {
cpumask_and(&cpumask, nohz.idle_cpus_mask,
sched_domain_span(sd));
cpumask_andnot(&cpumask, &cpumask,
cpu_isolated_mask);
ilb = cpumask_first(&cpumask);
}
rcu_read_unlock();
if (sd && (ilb >= nr_cpu_ids || !idle_cpu(ilb))) {
if (!energy_aware() ||
(capacity_orig_of(cpu) ==
cpu_rq(cpu)->rd->max_cpu_capacity.val ||
cpu_overutilized(cpu))) {
cpumask_andnot(&cpumask, nohz.idle_cpus_mask,
cpu_isolated_mask);
ilb = cpumask_first(&cpumask);
}
}
if (ilb < nr_cpu_ids && idle_cpu(ilb))
return ilb;
return nr_cpu_ids;
}
/*
* Kick a CPU to do the nohz balancing, if it is time for it. We pick the
* nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
* CPU (if there is one).
*/
static void nohz_balancer_kick(bool only_update)
{
int ilb_cpu;
nohz.next_balance++;
ilb_cpu = find_new_ilb();
if (ilb_cpu >= nr_cpu_ids)
return;
if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
return;
if (only_update)
set_bit(NOHZ_STATS_KICK, nohz_flags(ilb_cpu));
/*
* Use smp_send_reschedule() instead of resched_cpu().
* This way we generate a sched IPI on the target cpu which
* is idle. And the softirq performing nohz idle load balance
* will be run before returning from the IPI.
*/
trace_sched_load_balance_nohz_kick(smp_processor_id(), ilb_cpu);
smp_send_reschedule(ilb_cpu);
return;
}
void nohz_balance_exit_idle(unsigned int cpu)
{
if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
/*
* Completely isolated CPUs don't ever set, so we must test.
*/
if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
atomic_dec(&nohz.nr_cpus);
}
clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
}
}
static inline void set_cpu_sd_state_busy(void)
{
struct sched_domain *sd;
int cpu = smp_processor_id();
rcu_read_lock();
sd = rcu_dereference(per_cpu(sd_llc, cpu));
if (!sd || !sd->nohz_idle)
goto unlock;
sd->nohz_idle = 0;
atomic_inc(&sd->shared->nr_busy_cpus);
unlock:
rcu_read_unlock();
}
void set_cpu_sd_state_idle(void)
{
struct sched_domain *sd;
int cpu = smp_processor_id();
rcu_read_lock();
sd = rcu_dereference(per_cpu(sd_llc, cpu));
if (!sd || sd->nohz_idle)
goto unlock;
sd->nohz_idle = 1;
atomic_dec(&sd->shared->nr_busy_cpus);
unlock:
rcu_read_unlock();
}
/*
* This routine will record that the cpu is going idle with tick stopped.
* This info will be used in performing idle load balancing in the future.
*/
void nohz_balance_enter_idle(int cpu)
{
if (!cpu_active(cpu)) {
/*
* A CPU can be paused while it is idle with it's tick
* stopped. nohz_balance_exit_idle() should be called
* from the local CPU, so it can't be called during
* pause. This results in paused CPU participating in
* the nohz idle balance, which should be avoided.
* When the paused CPU exits idle and enters again,
* exempt the paused CPU from nohz_balance_exit_idle.
*/
nohz_balance_exit_idle(cpu);
return;
}
/* Spare idle load balancing on CPUs that don't want to be disturbed: */
if (!is_housekeeping_cpu(cpu))
return;
if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
return;
/*
* If we're a completely isolated CPU, we don't play.
*/
if (on_null_domain(cpu_rq(cpu)) || cpu_isolated(cpu))
return;
cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
atomic_inc(&nohz.nr_cpus);
set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
}
#else
static inline void nohz_balancer_kick(bool only_update) {}
#endif
static DEFINE_SPINLOCK(balancing);
/*
* Scale the max load_balance interval with the number of CPUs in the system.
* This trades load-balance latency on larger machines for less cross talk.
*/
void update_max_interval(void)
{
cpumask_t avail_mask;
unsigned int available_cpus;
cpumask_andnot(&avail_mask, cpu_online_mask, cpu_isolated_mask);
available_cpus = cpumask_weight(&avail_mask);
max_load_balance_interval = HZ*available_cpus/10;
}
/*
* It checks each scheduling domain to see if it is due to be balanced,
* and initiates a balancing operation if so.
*
* Balancing parameters are set up in init_sched_domains.
*/
static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
{
int continue_balancing = 1;
int cpu = rq->cpu;
int busy = idle != CPU_IDLE && !sched_idle_cpu(cpu);
unsigned long interval;
struct sched_domain *sd;
/* Earliest time when we have to do rebalance again */
unsigned long next_balance = jiffies + 60*HZ;
int update_next_balance = 0;
int need_serialize, need_decay = 0;
u64 max_cost = 0;
rcu_read_lock();
for_each_domain(cpu, sd) {
/*
* Decay the newidle max times here because this is a regular
* visit to all the domains. Decay ~1% per second.
*/
if (time_after(jiffies, sd->next_decay_max_lb_cost)) {
sd->max_newidle_lb_cost =
(sd->max_newidle_lb_cost * 253) / 256;
sd->next_decay_max_lb_cost = jiffies + HZ;
need_decay = 1;
}
max_cost += sd->max_newidle_lb_cost;
if (energy_aware() && !sd_overutilized(sd))
continue;
if (!(sd->flags & SD_LOAD_BALANCE)) {
update_group_capacity(sd, cpu);
continue;
}
/*
* Stop the load balance at this level. There is another
* CPU in our sched group which is doing load balancing more
* actively.
*/
if (!continue_balancing) {
if (need_decay)
continue;
break;
}
interval = get_sd_balance_interval(sd, busy);
need_serialize = sd->flags & SD_SERIALIZE;
if (need_serialize) {
if (!spin_trylock(&balancing))
goto out;
}
if (time_after_eq(jiffies, sd->last_balance + interval)) {
if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
/*
* The LBF_DST_PINNED logic could have changed
* env->dst_cpu, so we can't know our idle
* state even if we migrated tasks. Update it.
*/
idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
busy = idle != CPU_IDLE && !sched_idle_cpu(cpu);
}
sd->last_balance = jiffies;
interval = get_sd_balance_interval(sd, busy);
}
if (need_serialize)
spin_unlock(&balancing);
out:
if (time_after(next_balance, sd->last_balance + interval)) {
next_balance = sd->last_balance + interval;
update_next_balance = 1;
}
}
if (need_decay) {
/*
* Ensure the rq-wide value also decays but keep it at a
* reasonable floor to avoid funnies with rq->avg_idle.
*/
rq->max_idle_balance_cost =
max((u64)sysctl_sched_migration_cost, max_cost);
}
rcu_read_unlock();
/*
* next_balance will be updated only when there is a need.
* When the cpu is attached to null domain for ex, it will not be
* updated.
*/
if (likely(update_next_balance)) {
rq->next_balance = next_balance;
#ifdef CONFIG_NO_HZ_COMMON
/*
* If this CPU has been elected to perform the nohz idle
* balance. Other idle CPUs have already rebalanced with
* nohz_idle_balance() and nohz.next_balance has been
* updated accordingly. This CPU is now running the idle load
* balance for itself and we need to update the
* nohz.next_balance accordingly.
*/
if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance))
nohz.next_balance = rq->next_balance;
#endif
}
}
#ifdef CONFIG_NO_HZ_COMMON
/*
* In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
* rebalancing for all the cpus for whom scheduler ticks are stopped.
*/
static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
{
int this_cpu = this_rq->cpu;
struct rq *rq;
struct sched_domain *sd;
int balance_cpu;
/* Earliest time when we have to do rebalance again */
unsigned long next_balance = jiffies + 60*HZ;
int update_next_balance = 0;
cpumask_t cpus;
if (idle != CPU_IDLE ||
!test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
goto end;
/*
* This cpu is going to update the blocked load of idle CPUs either
* before doing a rebalancing or just to keep metrics up to date. we
* can safely update the next update timestamp
*/
rcu_read_lock();
sd = rcu_dereference(this_rq->sd);
/*
* Check whether there is a sched_domain available for this cpu.
* The last other cpu can have been unplugged since the ILB has been
* triggered and the sched_domain can now be null. The idle balance
* sequence will quickly be aborted as there is no more idle CPUs
*/
if (sd)
nohz.next_update = jiffies + msecs_to_jiffies(LOAD_AVG_PERIOD);
rcu_read_unlock();
cpumask_andnot(&cpus, nohz.idle_cpus_mask, cpu_isolated_mask);
for_each_cpu(balance_cpu, &cpus) {
if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
continue;
/*
* If this cpu gets work to do, stop the load balancing
* work being done for other cpus. Next load
* balancing owner will pick it up.
*/
if (need_resched())
break;
rq = cpu_rq(balance_cpu);
/*
* If time for next balance is due,
* do the balance.
*/
if (time_after_eq(jiffies, rq->next_balance)) {
struct rq_flags rf;
rq_lock_irq(rq, &rf);
update_rq_clock(rq);
rq_unlock_irq(rq, &rf);
update_blocked_averages(balance_cpu);
/*
* This idle load balance softirq may have been
* triggered only to update the blocked load and shares
* of idle CPUs (which we have just done for
* balance_cpu). In that case skip the actual balance.
*/
if (!test_bit(NOHZ_STATS_KICK, nohz_flags(this_cpu)))
rebalance_domains(rq, idle);
}
if (time_after(next_balance, rq->next_balance)) {
next_balance = rq->next_balance;
update_next_balance = 1;
}
}
/*
* next_balance will be updated only when there is a need.
* When the CPU is attached to null domain for ex, it will not be
* updated.
*/
if (likely(update_next_balance))
nohz.next_balance = next_balance;
end:
clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
}
/*
* Current heuristic for kicking the idle load balancer in the presence
* of an idle cpu in the system.
* - This rq has more than one task.
* - This rq has at least one CFS task and the capacity of the CPU is
* significantly reduced because of RT tasks or IRQs.
* - At parent of LLC scheduler domain level, this cpu's scheduler group has
* multiple busy cpu.
* - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
* domain span are idle.
*/
static inline bool nohz_kick_needed(struct rq *rq, bool only_update)
{
unsigned long now = jiffies;
struct sched_domain_shared *sds;
struct sched_domain *sd;
int nr_busy, i, cpu = rq->cpu;
bool kick = false;
cpumask_t cpumask;
if (unlikely(rq->idle_balance) && !only_update)
return false;
/*
* We may be recently in ticked or tickless idle mode. At the first
* busy tick after returning from idle, we will update the busy stats.
*/
set_cpu_sd_state_busy();
nohz_balance_exit_idle(cpu);
/*
* None are in tickless mode and hence no need for NOHZ idle load
* balancing.
*/
cpumask_andnot(&cpumask, nohz.idle_cpus_mask, cpu_isolated_mask);
if (cpumask_empty(&cpumask))
return false;
if (only_update) {
if (time_before(now, nohz.next_update))
return false;
else
return true;
}
if (time_before(now, nohz.next_balance))
return false;
/*
* If energy aware is enabled, do idle load balance if runqueue has
* at least 2 tasks and cpu is overutilized
*/
if (rq->nr_running >= 2 &&
(!energy_aware() || cpu_overutilized(cpu)))
return true;
if (energy_aware())
return rq->misfit_task_load > 0;
rcu_read_lock();
sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
if (sds && !energy_aware()) {
/*
* XXX: write a coherent comment on why we do this.
* See also: http://lkml.kernel.org/r/20111202010832.602203411@sbsiddha-desk.sc.intel.com
*/
nr_busy = atomic_read(&sds->nr_busy_cpus);
if (nr_busy > 1) {
kick = true;
goto unlock;
}
}
sd = rcu_dereference(per_cpu(sd_asym, cpu));
if (sd) {
for_each_cpu(i, sched_domain_span(sd)) {
if (i == cpu ||
!cpumask_test_cpu(i, &cpumask))
continue;
if (sched_asym_prefer(i, cpu)) {
kick = true;
goto unlock;
}
}
}
unlock:
rcu_read_unlock();
return kick;
}
#else
static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
static inline bool nohz_kick_needed(struct rq *rq, bool only_update) { return false; }
#endif
/*
* run_rebalance_domains is triggered when needed from the scheduler tick.
* Also triggered for nohz idle balancing (with nohz_balancing_kick set).
*/
static __latent_entropy void run_rebalance_domains(struct softirq_action *h)
{
struct rq *this_rq = this_rq();
enum cpu_idle_type idle = this_rq->idle_balance ?
CPU_IDLE : CPU_NOT_IDLE;
/*
* Since core isolation doesn't update nohz.idle_cpus_mask, there
* is a possibility this nohz kicked cpu could be isolated. Hence
* return if the cpu is isolated.
*/
if (cpu_isolated(this_rq->cpu))
return;
/*
* If this cpu has a pending nohz_balance_kick, then do the
* balancing on behalf of the other idle cpus whose ticks are
* stopped. Do nohz_idle_balance *before* rebalance_domains to
* give the idle cpus a chance to load balance. Else we may
* load balance only within the local sched_domain hierarchy
* and abort nohz_idle_balance altogether if we pull some load.
*/
nohz_idle_balance(this_rq, idle);
update_blocked_averages(this_rq->cpu);
#ifdef CONFIG_NO_HZ_COMMON
if (!test_bit(NOHZ_STATS_KICK, nohz_flags(this_rq->cpu)))
rebalance_domains(this_rq, idle);
clear_bit(NOHZ_STATS_KICK, nohz_flags(this_rq->cpu));
#else
rebalance_domains(this_rq, idle);
#endif
}
/*
* Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
*/
void trigger_load_balance(struct rq *rq)
{
/* Don't need to rebalance while attached to NULL domain or
* cpu is isolated.
*/
if (unlikely(on_null_domain(rq)) || cpu_isolated(cpu_of(rq)))
return;
if (time_after_eq(jiffies, rq->next_balance))
raise_softirq(SCHED_SOFTIRQ);
#ifdef CONFIG_NO_HZ_COMMON
if (nohz_kick_needed(rq, false))
nohz_balancer_kick(false);
#endif
}
static void rq_online_fair(struct rq *rq)
{
update_sysctl();
update_runtime_enabled(rq);
}
static void rq_offline_fair(struct rq *rq)
{
update_sysctl();
/* Ensure any throttled groups are reachable by pick_next_task */
unthrottle_offline_cfs_rqs(rq);
}
#endif /* CONFIG_SMP */
/*
* scheduler tick hitting a task of our scheduling class:
*/
static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
{
struct cfs_rq *cfs_rq;
struct sched_entity *se = &curr->se;
#ifdef CONFIG_SCHED_WALT
bool old_misfit = curr->misfit;
bool misfit;
#endif
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
entity_tick(cfs_rq, se, queued);
}
if (IS_ENABLED(CONFIG_NUMA_BALANCING) &&
static_branch_unlikely(&sched_numa_balancing))
task_tick_numa(rq, curr);
update_misfit_status(curr, rq);
#ifdef CONFIG_SCHED_WALT
misfit = rq->misfit_task_load;
if (old_misfit != misfit) {
walt_fixup_nr_big_tasks(rq, curr, 1, misfit);
curr->misfit = misfit;
}
#endif
update_overutilized_status(rq);
}
/*
* called on fork with the child task as argument from the parent's context
* - child not yet on the tasklist
* - preemption disabled
*/
static void task_fork_fair(struct task_struct *p)
{
struct sched_entity *se = &p->se, *curr;
struct cfs_rq *cfs_rq;
struct rq *rq = this_rq();
struct rq_flags rf;
rq_lock(rq, &rf);
update_rq_clock(rq);
cfs_rq = task_cfs_rq(current);
curr = cfs_rq->curr;
if (curr)
update_curr(cfs_rq);
place_entity(cfs_rq, se, ENQUEUE_INITIAL);
rq_unlock(rq, &rf);
}
/*
* Priority of the task has changed. Check to see if we preempt
* the current task.
*/
static void
prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
{
if (!task_on_rq_queued(p))
return;
/*
* Reschedule if we are currently running on this runqueue and
* our priority decreased, or if we are not currently running on
* this runqueue and our priority is higher than the current's
*/
if (rq->curr == p) {
if (p->prio > oldprio)
resched_curr(rq);
} else
check_preempt_curr(rq, p, 0);
}
#ifdef CONFIG_FAIR_GROUP_SCHED
/*
* Propagate the changes of the sched_entity across the tg tree to make it
* visible to the root
*/
static void propagate_entity_cfs_rq(struct sched_entity *se)
{
struct cfs_rq *cfs_rq;
/* Start to propagate at parent */
se = se->parent;
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
if (cfs_rq_throttled(cfs_rq))
break;
update_load_avg(cfs_rq, se, UPDATE_TG);
}
}
#else
static void propagate_entity_cfs_rq(struct sched_entity *se) { }
#endif
static void detach_entity_cfs_rq(struct sched_entity *se)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);
#ifdef CONFIG_SMP
/*
* In case the task sched_avg hasn't been attached:
* - A forked task which hasn't been woken up by wake_up_new_task().
* - A task which has been woken up by try_to_wake_up() but is
* waiting for actually being woken up by sched_ttwu_pending().
*/
if (!se->avg.last_update_time)
return;
#endif
/* Catch up with the cfs_rq and remove our load when we leave */
update_load_avg(cfs_rq, se, 0);
detach_entity_load_avg(cfs_rq, se);
update_tg_load_avg(cfs_rq, false);
propagate_entity_cfs_rq(se);
}
static void attach_entity_cfs_rq(struct sched_entity *se)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);
#ifdef CONFIG_FAIR_GROUP_SCHED
/*
* Since the real-depth could have been changed (only FAIR
* class maintain depth value), reset depth properly.
*/
se->depth = se->parent ? se->parent->depth + 1 : 0;
#endif
/* Synchronize entity with its cfs_rq */
update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
attach_entity_load_avg(cfs_rq, se);
update_tg_load_avg(cfs_rq, false);
propagate_entity_cfs_rq(se);
}
static void detach_task_cfs_rq(struct task_struct *p)
{
struct sched_entity *se = &p->se;
detach_entity_cfs_rq(se);
}
static void attach_task_cfs_rq(struct task_struct *p)
{
struct sched_entity *se = &p->se;
attach_entity_cfs_rq(se);
}
static void switched_from_fair(struct rq *rq, struct task_struct *p)
{
detach_task_cfs_rq(p);
}
static void switched_to_fair(struct rq *rq, struct task_struct *p)
{
attach_task_cfs_rq(p);
if (task_on_rq_queued(p)) {
/*
* We were most likely switched from sched_rt, so
* kick off the schedule if running, otherwise just see
* if we can still preempt the current task.
*/
if (rq->curr == p)
resched_curr(rq);
else
check_preempt_curr(rq, p, 0);
}
}
/* Account for a task changing its policy or group.
*
* This routine is mostly called to set cfs_rq->curr field when a task
* migrates between groups/classes.
*/
static void set_curr_task_fair(struct rq *rq)
{
struct sched_entity *se = &rq->curr->se;
for_each_sched_entity(se) {
struct cfs_rq *cfs_rq = cfs_rq_of(se);
set_next_entity(cfs_rq, se);
/* ensure bandwidth has been allocated on our new cfs_rq */
account_cfs_rq_runtime(cfs_rq, 0);
}
}
void init_cfs_rq(struct cfs_rq *cfs_rq)
{
cfs_rq->tasks_timeline = RB_ROOT_CACHED;
u64_u32_store(cfs_rq->min_vruntime, (u64)(-(1LL << 20)));
#ifdef CONFIG_SMP
raw_spin_lock_init(&cfs_rq->removed.lock);
#endif
}
#ifdef CONFIG_FAIR_GROUP_SCHED
static void task_set_group_fair(struct task_struct *p)
{
struct sched_entity *se = &p->se;
set_task_rq(p, task_cpu(p));
se->depth = se->parent ? se->parent->depth + 1 : 0;
}
static void task_move_group_fair(struct task_struct *p)
{
detach_task_cfs_rq(p);
set_task_rq(p, task_cpu(p));
#ifdef CONFIG_SMP
/* Tell se's cfs_rq has been changed -- migrated */
p->se.avg.last_update_time = 0;
#endif
attach_task_cfs_rq(p);
}
static void task_change_group_fair(struct task_struct *p, int type)
{
switch (type) {
case TASK_SET_GROUP:
task_set_group_fair(p);
break;
case TASK_MOVE_GROUP:
task_move_group_fair(p);
break;
}
}
void free_fair_sched_group(struct task_group *tg)
{
int i;
for_each_possible_cpu(i) {
if (tg->cfs_rq)
kfree(tg->cfs_rq[i]);
if (tg->se)
kfree(tg->se[i]);
}
kfree(tg->cfs_rq);
kfree(tg->se);
}
int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
{
struct sched_entity *se;
struct cfs_rq *cfs_rq;
int i;
tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL);
if (!tg->cfs_rq)
goto err;
tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL);
if (!tg->se)
goto err;
tg->shares = NICE_0_LOAD;
init_cfs_bandwidth(tg_cfs_bandwidth(tg));
for_each_possible_cpu(i) {
cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
GFP_KERNEL, cpu_to_node(i));
if (!cfs_rq)
goto err;
se = kzalloc_node(sizeof(struct sched_entity),
GFP_KERNEL, cpu_to_node(i));
if (!se)
goto err_free_rq;
init_cfs_rq(cfs_rq);
init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
init_entity_runnable_average(se);
}
return 1;
err_free_rq:
kfree(cfs_rq);
err:
return 0;
}
void online_fair_sched_group(struct task_group *tg)
{
struct sched_entity *se;
struct rq_flags rf;
struct rq *rq;
int i;
for_each_possible_cpu(i) {
rq = cpu_rq(i);
se = tg->se[i];
rq_lock_irq(rq, &rf);
update_rq_clock(rq);
attach_entity_cfs_rq(se);
sync_throttle(tg, i);
rq_unlock_irq(rq, &rf);
}
}
void unregister_fair_sched_group(struct task_group *tg)
{
unsigned long flags;
struct rq *rq;
int cpu;
destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
for_each_possible_cpu(cpu) {
if (tg->se[cpu])
remove_entity_load_avg(tg->se[cpu]);
/*
* Only empty task groups can be destroyed; so we can speculatively
* check on_list without danger of it being re-added.
*/
if (!tg->cfs_rq[cpu]->on_list)
continue;
rq = cpu_rq(cpu);
raw_spin_lock_irqsave(&rq->lock, flags);
list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
}
void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
struct sched_entity *se, int cpu,
struct sched_entity *parent)
{
struct rq *rq = cpu_rq(cpu);
cfs_rq->tg = tg;
cfs_rq->rq = rq;
init_cfs_rq_runtime(cfs_rq);
tg->cfs_rq[cpu] = cfs_rq;
tg->se[cpu] = se;
/* se could be NULL for root_task_group */
if (!se)
return;
if (!parent) {
se->cfs_rq = &rq->cfs;
se->depth = 0;
} else {
se->cfs_rq = parent->my_q;
se->depth = parent->depth + 1;
}
se->my_q = cfs_rq;
/* guarantee group entities always have weight */
update_load_set(&se->load, NICE_0_LOAD);
se->parent = parent;
}
static DEFINE_MUTEX(shares_mutex);
int sched_group_set_shares(struct task_group *tg, unsigned long shares)
{
int i;
/*
* We can't change the weight of the root cgroup.
*/
if (!tg->se[0])
return -EINVAL;
shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
mutex_lock(&shares_mutex);
if (tg->shares == shares)
goto done;
tg->shares = shares;
for_each_possible_cpu(i) {
struct rq *rq = cpu_rq(i);
struct sched_entity *se = tg->se[i];
struct rq_flags rf;
/* Propagate contribution to hierarchy */
rq_lock_irqsave(rq, &rf);
update_rq_clock(rq);
for_each_sched_entity(se) {
update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
update_cfs_group(se);
}
rq_unlock_irqrestore(rq, &rf);
}
done:
mutex_unlock(&shares_mutex);
return 0;
}
#else /* CONFIG_FAIR_GROUP_SCHED */
void free_fair_sched_group(struct task_group *tg) { }
int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
{
return 1;
}
void online_fair_sched_group(struct task_group *tg) { }
void unregister_fair_sched_group(struct task_group *tg) { }
#endif /* CONFIG_FAIR_GROUP_SCHED */
static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
{
struct sched_entity *se = &task->se;
unsigned int rr_interval = 0;
/*
* Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
* idle runqueue:
*/
if (rq->cfs.load.weight)
rr_interval = NS_TO_JIFFIES(se->slice);
return rr_interval;
}
#ifdef CONFIG_SCHED_CASS
#include "cass.c"
/* Use CASS. A dummy wrapper ensures the replaced function is still "used". */
static inline void *select_task_rq_fair_dummy(void)
{
return (void *)select_task_rq_fair;
}
#define select_task_rq_fair cass_select_task_rq_fair
#endif /* CONFIG_SCHED_CASS */
/*
* All the scheduling class methods:
*/
const struct sched_class fair_sched_class = {
.next = &idle_sched_class,
.enqueue_task = enqueue_task_fair,
.dequeue_task = dequeue_task_fair,
.yield_task = yield_task_fair,
.yield_to_task = yield_to_task_fair,
.check_preempt_curr = check_preempt_wakeup,
.pick_next_task = pick_next_task_fair,
.put_prev_task = put_prev_task_fair,
#ifdef CONFIG_SMP
.select_task_rq = select_task_rq_fair,
.migrate_task_rq = migrate_task_rq_fair,
.rq_online = rq_online_fair,
.rq_offline = rq_offline_fair,
.task_dead = task_dead_fair,
.set_cpus_allowed = set_cpus_allowed_common,
#endif
.set_curr_task = set_curr_task_fair,
.task_tick = task_tick_fair,
.task_fork = task_fork_fair,
.prio_changed = prio_changed_fair,
.switched_from = switched_from_fair,
.switched_to = switched_to_fair,
.get_rr_interval = get_rr_interval_fair,
.update_curr = update_curr_fair,
#ifdef CONFIG_FAIR_GROUP_SCHED
.task_change_group = task_change_group_fair,
#endif
#ifdef CONFIG_SCHED_WALT
.fixup_walt_sched_stats = walt_fixup_sched_stats_fair,
#endif
#ifdef CONFIG_UCLAMP_TASK
.uclamp_enabled = 1,
#endif
};
#ifdef CONFIG_SCHED_DEBUG
void print_cfs_stats(struct seq_file *m, int cpu)
{
struct cfs_rq *cfs_rq, *pos;
rcu_read_lock();
for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos)
print_cfs_rq(m, cpu, cfs_rq);
rcu_read_unlock();
}
#ifdef CONFIG_NUMA_BALANCING
void show_numa_stats(struct task_struct *p, struct seq_file *m)
{
int node;
unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0;
for_each_online_node(node) {
if (p->numa_faults) {
tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)];
tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)];
}
if (p->numa_group) {
gsf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 0)],
gpf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 1)];
}
print_numa_stats(m, node, tsf, tpf, gsf, gpf);
}
}
#endif /* CONFIG_NUMA_BALANCING */
#endif /* CONFIG_SCHED_DEBUG */
__init void init_sched_fair_class(void)
{
#ifdef CONFIG_SMP
open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
#ifdef CONFIG_NO_HZ_COMMON
nohz.next_balance = jiffies;
nohz.next_update = jiffies;
zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
#endif
alloc_eenv();
#endif /* SMP */
}
/* WALT sched implementation begins here */
#ifdef CONFIG_SCHED_WALT
#ifdef CONFIG_CFS_BANDWIDTH
static void walt_init_cfs_rq_stats(struct cfs_rq *cfs_rq)
{
cfs_rq->walt_stats.nr_big_tasks = 0;
cfs_rq->walt_stats.cumulative_runnable_avg_scaled = 0;
cfs_rq->walt_stats.pred_demands_sum_scaled = 0;
}
static void walt_inc_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p)
{
inc_nr_big_task(&cfs_rq->walt_stats, p);
fixup_cumulative_runnable_avg(&cfs_rq->walt_stats,
p->ravg.demand_scaled,
p->ravg.pred_demand_scaled);
}
static void walt_dec_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p)
{
dec_nr_big_task(&cfs_rq->walt_stats, p);
fixup_cumulative_runnable_avg(&cfs_rq->walt_stats,
-(s64)p->ravg.demand_scaled,
-(s64)p->ravg.pred_demand_scaled);
}
static void walt_inc_throttled_cfs_rq_stats(struct walt_sched_stats *stats,
struct cfs_rq *tcfs_rq)
{
struct rq *rq = rq_of(tcfs_rq);
stats->nr_big_tasks += tcfs_rq->walt_stats.nr_big_tasks;
fixup_cumulative_runnable_avg(stats,
tcfs_rq->walt_stats.cumulative_runnable_avg_scaled,
tcfs_rq->walt_stats.pred_demands_sum_scaled);
if (stats == &rq->walt_stats)
walt_fixup_cum_window_demand(rq,
tcfs_rq->walt_stats.cumulative_runnable_avg_scaled);
}
static void walt_dec_throttled_cfs_rq_stats(struct walt_sched_stats *stats,
struct cfs_rq *tcfs_rq)
{
struct rq *rq = rq_of(tcfs_rq);
stats->nr_big_tasks -= tcfs_rq->walt_stats.nr_big_tasks;
fixup_cumulative_runnable_avg(stats,
-tcfs_rq->walt_stats.cumulative_runnable_avg_scaled,
-tcfs_rq->walt_stats.pred_demands_sum_scaled);
/*
* We remove the throttled cfs_rq's tasks's contribution from the
* cumulative window demand so that the same can be added
* unconditionally when the cfs_rq is unthrottled.
*/
if (stats == &rq->walt_stats)
walt_fixup_cum_window_demand(rq,
-tcfs_rq->walt_stats.cumulative_runnable_avg_scaled);
}
static void walt_fixup_sched_stats_fair(struct rq *rq, struct task_struct *p,
u16 updated_demand_scaled,
u16 updated_pred_demand_scaled)
{
struct cfs_rq *cfs_rq;
struct sched_entity *se = &p->se;
s64 task_load_delta = (s64)updated_demand_scaled -
p->ravg.demand_scaled;
s64 pred_demand_delta = (s64)updated_pred_demand_scaled -
p->ravg.pred_demand_scaled;
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
fixup_cumulative_runnable_avg(&cfs_rq->walt_stats,
task_load_delta,
pred_demand_delta);
if (cfs_rq_throttled(cfs_rq))
break;
}
/* Fix up rq->walt_stats only if we didn't find any throttled cfs_rq */
if (!se) {
fixup_cumulative_runnable_avg(&rq->walt_stats,
task_load_delta,
pred_demand_delta);
walt_fixup_cum_window_demand(rq, task_load_delta);
}
}
static void walt_fixup_nr_big_tasks(struct rq *rq, struct task_struct *p,
int delta, bool inc)
{
struct cfs_rq *cfs_rq;
struct sched_entity *se = &p->se;
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
cfs_rq->walt_stats.nr_big_tasks += inc ? delta : -delta;
BUG_ON(cfs_rq->walt_stats.nr_big_tasks < 0);
if (cfs_rq_throttled(cfs_rq))
break;
}
/* Fix up rq->walt_stats only if we didn't find any throttled cfs_rq */
if (!se)
walt_adjust_nr_big_tasks(rq, delta, inc);
}
/*
* Check if task is part of a hierarchy where some cfs_rq does not have any
* runtime left.
*
* We can't rely on throttled_hierarchy() to do this test, as
* cfs_rq->throttle_count will not be updated yet when this function is called
* from scheduler_tick()
*/
static int task_will_be_throttled(struct task_struct *p)
{
struct sched_entity *se = &p->se;
struct cfs_rq *cfs_rq;
if (!cfs_bandwidth_used())
return 0;
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
if (!cfs_rq->runtime_enabled)
continue;
if (cfs_rq->runtime_remaining <= 0)
return 1;
}
return 0;
}
#else /* CONFIG_CFS_BANDWIDTH */
static void walt_fixup_sched_stats_fair(struct rq *rq, struct task_struct *p,
u16 updated_demand_scaled,
u16 updated_pred_demand_scaled)
{
fixup_walt_sched_stats_common(rq, p, updated_demand_scaled,
updated_pred_demand_scaled);
}
static void walt_fixup_nr_big_tasks(struct rq *rq, struct task_struct *p,
int delta, bool inc)
{
walt_adjust_nr_big_tasks(rq, delta, inc);
}
static int task_will_be_throttled(struct task_struct *p)
{
return false;
}
#endif /* CONFIG_CFS_BANDWIDTH */
static inline int
kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu)
{
unsigned long flags;
int rc = 0;
/* Invoke active balance to force migrate currently running task */
raw_spin_lock_irqsave(&rq->lock, flags);
if (!rq->active_balance) {
rq->active_balance = 1;
rq->push_cpu = new_cpu;
get_task_struct(p);
rq->push_task = p;
rc = 1;
}
raw_spin_unlock_irqrestore(&rq->lock, flags);
return rc;
}
#ifdef CONFIG_SCHED_WALT
struct walt_rotate_work {
struct work_struct w;
struct task_struct *src_task;
struct task_struct *dst_task;
int src_cpu;
int dst_cpu;
};
static DEFINE_PER_CPU(struct walt_rotate_work, walt_rotate_works);
static void walt_rotate_work_func(struct work_struct *work)
{
struct walt_rotate_work *wr = container_of(work,
struct walt_rotate_work, w);
migrate_swap(wr->src_task, wr->dst_task);
put_task_struct(wr->src_task);
put_task_struct(wr->dst_task);
clear_reserved(wr->src_cpu);
clear_reserved(wr->dst_cpu);
}
void walt_rotate_work_init(void)
{
int i;
for_each_possible_cpu(i) {
struct walt_rotate_work *wr = &per_cpu(walt_rotate_works, i);
INIT_WORK(&wr->w, walt_rotate_work_func);
}
}
#define WALT_ROTATION_THRESHOLD_NS 16000000
static void walt_check_for_rotation(struct rq *src_rq)
{
u64 wc, wait, max_wait = 0, run, max_run = 0;
int deserved_cpu = nr_cpu_ids, dst_cpu = nr_cpu_ids;
int i, src_cpu = cpu_of(src_rq);
struct rq *dst_rq;
struct walt_rotate_work *wr = NULL;
if (!walt_rotation_enabled)
return;
if (!is_min_capacity_cpu(src_cpu))
return;
wc = sched_ktime_clock();
for_each_possible_cpu(i) {
struct rq *rq = cpu_rq(i);
if (!is_min_capacity_cpu(i))
break;
if (is_reserved(i))
continue;
if (!rq->misfit_task_load || rq->curr->sched_class !=
&fair_sched_class)
continue;
wait = wc - rq->curr->last_enqueued_ts;
if (wait > max_wait) {
max_wait = wait;
deserved_cpu = i;
}
}
if (deserved_cpu != src_cpu)
return;
for_each_possible_cpu(i) {
struct rq *rq = cpu_rq(i);
if (is_min_capacity_cpu(i))
continue;
if (is_reserved(i))
continue;
if (rq->curr->sched_class != &fair_sched_class)
continue;
if (rq->nr_running > 1)
continue;
run = wc - rq->curr->last_enqueued_ts;
if (run < WALT_ROTATION_THRESHOLD_NS)
continue;
if (run > max_run) {
max_run = run;
dst_cpu = i;
}
}
if (dst_cpu == nr_cpu_ids)
return;
dst_rq = cpu_rq(dst_cpu);
double_rq_lock(src_rq, dst_rq);
if (dst_rq->curr->sched_class == &fair_sched_class) {
get_task_struct(src_rq->curr);
get_task_struct(dst_rq->curr);
mark_reserved(src_cpu);
mark_reserved(dst_cpu);
wr = &per_cpu(walt_rotate_works, src_cpu);
wr->src_task = src_rq->curr;
wr->dst_task = dst_rq->curr;
wr->src_cpu = src_cpu;
wr->dst_cpu = dst_cpu;
}
double_rq_unlock(src_rq, dst_rq);
if (wr)
queue_work_on(src_cpu, system_highpri_wq, &wr->w);
}
#else
static inline void walt_check_for_rotation(struct rq *rq)
{
}
#endif
static DEFINE_RAW_SPINLOCK(migration_lock);
void check_for_migration(struct rq *rq, struct task_struct *p)
{
int active_balance;
int new_cpu = -1;
int cpu = smp_processor_id();
int prev_cpu = task_cpu(p);
struct sched_domain *sd = NULL;
if (IS_ENABLED(CONFIG_SCHED_CASS))
return;
if (rq->misfit_task_load) {
if (rq->curr->state != TASK_RUNNING ||
rq->curr->nr_cpus_allowed == 1)
return;
if (task_will_be_throttled(p))
return;
raw_spin_lock(&migration_lock);
rcu_read_lock();
new_cpu = find_energy_efficient_cpu(sd, p, cpu, prev_cpu,
0, false);
rcu_read_unlock();
if ((new_cpu != -1) &&
(capacity_orig_of(new_cpu) > capacity_orig_of(cpu))) {
active_balance = kick_active_balance(rq, p, new_cpu);
if (active_balance) {
mark_reserved(new_cpu);
raw_spin_unlock(&migration_lock);
stop_one_cpu_nowait(cpu,
active_load_balance_cpu_stop, rq,
&rq->active_balance_work);
return;
}
} else {
walt_check_for_rotation(rq);
}
raw_spin_unlock(&migration_lock);
}
}
#endif /* CONFIG_SCHED_WALT */