Files
kernel_google_wahoo/include/linux/vmstat.h
Andres Oportus 82a585bafa Sync scheduler to branch android-4.4 in common/kernel
With these changes kernel/sched directories of branches
android-msm-8998-4.4-common (under
partner-android/kernel/private/msm-qcom) and android-4.4 (under
partner-android/kernel/common) match

Bug: 32492390
Test: MTP boots to UI and successfully runs Vellamo and Benchmark tests

defconfig: re-add interactive governor
qcom-cpufreq: remove calls to trace cpu frequency switch
Revert "trace: cpu_freq_switch: Add profiler for CPU frequency switch times"
dd2460f DEBUG: sched,cpufreq: add cpu_capacity change tracepoint
7bb5218 cgroup: Remove leftover instances of allow_attach
e78f134 CHROMIUM: remove Android's cgroup generic permissions checks
53c63d3 sched: Add Kconfig option DEFAULT_USE_ENERGY_AWARE to set ENERGY_AWARE feature flag
df23243 sched/fair: remove printk while schedule is in progress
cf43809 sched/walt: Drop arch-specific timer access
5449876 eas/sched/fair: Fixing comments in find_best_target.
a85045c FIXUP: sched/tune: update accouting before CPU capacity
fb3cce0 FIXUP: sched/tune: add fixes missing from a previous patch
aeb4a31 sched/walt: use do_div instead of division operator
6c7e03d DEBUG: cpufreq: fix cpu_capacity tracing build for non-smp systems
1c81f45 sched/walt: include missing header for arm_timer_read_counter()
8133095 cpufreq: Kconfig: Fixup incorrect selection by CPU_FREQ_DEFAULT_GOV_SCHED
7655461 sched/fair: Avoid redundant idle_cpu() call in update_sg_lb_stats()
34828bd FIXUP: sched: scheduler-driven cpu frequency selection
2a44453 sched/rt: Add Kconfig option to enable panicking for RT throttling
989f33f sched/rt: print RT tasks when RT throttling is activated
3276c3d UPSTREAM: sched: Fix a race between __kthread_bind() and sched_setaffinity()
07ec7db sched/fair: Favor higher cpus only for boosted tasks
142b2ac vmstat: make vmstat_updater deferrable again and shut down on idle
74b4fa8 sched/fair: call OPP update when going idle after migration
bf93a36 sched/cpufreq_sched: fix thermal capping events
c80a9af sched/fair: Picking cpus with low OPPs for tasks that prefer idle CPUs
b9534b8 FIXUP: sched/tune: do initialization as a postcore_initicall
93db70f DEBUG: sched: add tracepoint for RD overutilized
c5a00c2 sched/tune: Introducing a new schedtune attribute prefer_idle
d4cda03 sched: use util instead of capacity to select busy cpu
23ed57d arch_timer: add error handling when the MPM global timer is cleared
8935b6b FIXUP: sched: Fix double-release of spinlock in move_queued_task
740d312 FIXUP: sched/fair: Fix hang during suspend in sched_group_energy
5156b67 FIXUP: sched: fix SchedFreq integration for both PELT and WALT
782c9d6 sched: EAS: Avoid causing spikes to max-freq unnecessarily
dfc1151 FIXUP: sched: fix set_cfs_cpu_capacity when WALT is in use
519c627 sched/walt: Accounting for number of irqs pending on each core
efb86bd sched: Introduce Window Assisted Load Tracking (WALT)
345be81 sched/tune: fix PB and PC cuts indexes definition
369bcbb sched/fair: optimize idle cpu selection for boosted tasks
af14760 FIXUP: sched/tune: fix accounting for runnable tasks
7f8f24a sched/tune: use a single initialisation function
274bbcf sched/{fair,tune}: simplify fair.c code
254f509 FIXUP: sched/tune: fix payoff calculation for boost region
00aae8d sched/tune: Add support for negative boost values
6ba071d FIX: sched/tune: move schedtune_nornalize_energy into fair.c
28e8cb9 FIX: sched/tune: update usage of boosted task utilisation on CPU selection
c50cc22 sched/fair: add tunable to set initial task load
4a5e890 sched/fair: add tunable to force selection at cpu granularity
2e9abbc sched: EAS: take cstate into account when selecting idle core
d753e92 sched/cpufreq_sched: Consolidated update
765c2ab FIXUP: sched: fix build for non-SMP target
ae54c77 DEBUG: sched/tune: add tracepoint on P-E space filtering
4525aa3 DEBUG: sched/tune: add tracepoint for energy_diff() values
962b7c1 DEBUG: sched/tune: add tracepoint for task boost signal
3a400ab CHROMIUM: sched: update the average of nr_running
85a09f2 UPSTREAM: sched: panic on corrupted stack end
287d9d0 cpufreq: interactive: drop cpufreq_{get,put}_global_kobject func calls
d495067 Revert "cpufreq: interactive: build fixes for 4.4"
fed95d9 DEBUG: schedtune: add tracepoint for schedtune_tasks_update() values
a09a25c DEBUG: schedtune: add tracepoint for CPU boost signal
c8a65d2 DEBUG: schedtune: add tracepoint for SchedTune configuration update
0a0f4aa DEBUG: sched: add energy procfs interface
e5a2599 DEBUG: sched: add tracepoint for CPU load/util signals
8017fd7 DEBUG: sched: add tracepoint for task load/util signals
99ed4e5 DEBUG: sched: add tracepoint for cpu/freq scale invariance
a2a6dc7 sched/fair: filter energy_diff() based on energy_payoff value
637ee37 sched/tune: add support to compute normalized energy
36967b2 sched/fair: keep track of energy/capacity variations
a8f6558 sched/fair: add boosted task utilization
a515b88 sched/{fair,tune}: track RUNNABLE tasks impact on per CPU boost value
9cd53fb sched/tune: compute and keep track of per CPU boost value
13001f4 sched/tune: add initial support for CGroups based boosting
07e2294 sched/fair: add boosted CPU usage
6ed2714 sched/fair: add function to convert boost value into "margin"
344f4ec sched/tune: add sysctl interface to define a boost value
08d1cfd fixup! sched/fair: jump to max OPP when crossing UP threshold
3eb2910 fixup! sched: scheduler-driven cpu frequency selection
6e1e1ed sched: rt scheduler sets capacity requirement
fab5cc5 sched: deadline: use deadline bandwidth in scale_rt_capacity
cd248fa sched: remove call of sched_avg_update from sched_rt_avg_update
9d44dc7 sched/cpufreq_sched: add trace events
6b6c192 sched/fair: jump to max OPP when crossing UP threshold
f99e3fe sched/fair: cpufreq_sched triggers for load balancing
7ff814dd sched/{core,fair}: trigger OPP change request on fork()
ea429cc sched/fair: add triggers for OPP change requests
a967a45 sched: scheduler-driven cpu frequency selection
c3b2e76 cpufreq: introduce cpufreq_driver_is_slow
0d2b1cd sched: Consider misfit tasks when load-balancing
f2a8923 sched: Add group_misfit_task load-balance type
563ddb6 sched: Add per-cpu max capacity to sched_group_capacity
e14f151 sched: Do eas idle balance regardless of the rq avg idle value
05a773b sched: Update max cpu capacity in case of max frequency constraints
b03f1ba cpufreq: Max freq invariant scheduler load-tracking and cpu capacity support
d2b3db0 sched: Support for extracting EAS energy costs from DT
b71188b sched: Disable energy-unfriendly nohz kicks
52b7b8a sched: Consider a not over-utilized energy-aware system as balanced
e38982b sched: Energy-aware wake-up task placement
ec055b9 sched: Determine the current sched_group idle-state
19a5ebe sched, cpuidle: Track cpuidle state index in the scheduler
1b5ec5d sched: Add over-utilization/tipping point indicator
2c6a8a4 sched: Estimate energy impact of scheduling decisions
df2030c sched: Extend sched_group_energy to test load-balancing decisions
c1770a5 sched: Calculate energy consumption of sched_group
5ec8cca sched: Highest energy aware balancing sched_domain level pointer
b6c0399 sched: Relocated cpu_util() and change return type
3e55d2f sched: Compute cpu capacity available at current frequency
f0f739d sched: Introduce SD_SHARE_CAP_STATES sched_domain flag
4ce990e sched: Initialize energy data structures
0b3bda5 sched: Introduce energy data structures
e496f32 sched: Make energy awareness a sched feature
681fa14 sched: Prevent unnecessary active balance of single task in sched group
cda2bd3 sched: Enable idle balance to pull single task towards cpu with higher capacity
8a5c033 sched: Consider spare cpu capacity at task wake-up
e8bcb272 sched: Add cpu capacity awareness to wakeup balancing
1eb2b8a sched: Store system-wide maximum cpu capacity in root domain
259bd4c cpufreq: Frequency invariant scheduler load-tracking support
440a577 cpufreq: interactive: only apply interactive boost when enabled
2ebb4d9 cpufreq: interactive: fix policy locking
31a3049 android: skip building drivers as modules
0d6687a cgroup: Fix issues in allow_attach callback
2665ab9 trace: cpufreq: Add tracing for min/max cpufreq
4808d28 subsystem: CPU FREQUENCY DRIVERS- Set cpu_load calculation on current frequency
bc68f6c cpufreq: interactive: build fixes for 4.4
e197a21 cpufreq: interactive: replace strict_strtoul() with kstrtoul()
b1b0fd3 cpufreq: interactive: Rearm governor timer at max freq
0ad0834 cpufreq: interactive: Implement cluster-based min_sample_time
54c3ec0 cpufreq: interactive: Exercise hispeed settings at a policy level
4db1f0a cpufreq: interactive: Round up timer_rate to match jiffy
16e989d cpufreq: interactive: Don't set floor_validate_time during boost
10fe4289 cpufreq: interactive: Put global cpufreq kobject on failure
1e04bc2 cpufreq: interactive: only boost tunable affected cpus
0b4d5f5 cpufreq: interactive: don't skip waking up speedchange_task if target_freq > policy->cur
39b5c1b cpufreq: interactive: make common_tunables static
bc88e63 cpufreq: interactive: prevents the frequency to directly raise above the hispeed_freq from a lower frequency.
61a37b3 cpufreq: interactive: remove compilation error from commit 49cc72365fb7ee87762a7ccc6a32ef68627216c5
6c2c93b cpufreq: interactive: turn boost_pulse off on boost off
528ef7d cpufreq: interactive: restructure CPUFREQ_GOV_LIMITS
479449e cpufreq: interactive: hold reference on global cpufreq kobject if needed
0be8516 cpufreq: interactive: Use generic get_cpu_idle_time() from cpufreq.c
8385286 cpufreq: interactive: fix NULL pointer dereference at sysfs ops
ef37f1f cpufreq: interactive: fix compiling warnings
8fbe05e cpufreq: interactive: delete timers for GOV_START
5690bb1 cpufreq: Interactive: Implement per policy instances of governor
4d30036 cpufreq: interactive: Move definition of cpufreq_gov_interactive downwards
8db7e96 cpufreq: interactive: Remove unnecessary cpu_online() check
6728dfa cpufreq: interactive: fix show_target_loads and show_above_hispeed_delay
e8ad1a85 cpufreq: interactive: resched timer if max freq raised
7a5c8ba cpufreq: interactive: fix race on cpufreq TRANSITION notifier
83720c3 cpufreq: interactive: avoid underflow on active time calculation
aedd63a cpufreq: interactive: reduce chance of zero time delta on load eval
f71b480 cpufreq: interactive: handle errors from cpufreq_frequency_table_target
f587d09 cpufreq: interactive: fix uninitialized spinlock
75f9b06 cpufreq: interactive: base above_hispeed_delay on target freq, not current
cc80bd4 cpufreq: interactive: fix crash on error paths in get_tokenized_data
6b2fd6c cpufreq: interactive: add io_is_busy interface
c9d7bc6 cpufreq: interactive: allow arbitrary speed / delay mappings
939e7f1 cpufreq: interactive: fix race on governor start/stop
c7ad1e1 cpufreq: interactive: fix deadlock on spinlock in timer
06371b5 cpufreq: interactive: don't handle transition notification if not enabled
c5ec6c6 cpufreq: interactive: init default values at compile time
b4f2820 cpufreq: interactive: default go_hispeed_load 99%, doc updates
1dc7486 cpufreq: interactive: fix race on timer restart on governor start
583695f cpufreq: interactive: fix racy timer stopping
74f0d69 cpufreq: interactive: fix boosting logic
959433f cpufreq: interactive: add timer slack to limit idle at speed > min
a6d6051 cpufreq: interactive: specify duration of CPU speed boost pulse
2c2b492 cpufreq: interactive: adjust load for changes in speed
482f37e cpufreq: interactive: remove load since last speed change
fbc1d52 cpufreq: interactive: allow arbitrary speed / target load mappings
e337153 cpufreq: interactive: apply above_hispeed_delay to each step above hispeed
4727e1a cpufreq: interactive: change speed according to current speed and target load
dc202c3 cpufreq: interactive: trace actual speed in target speed decisions
2aadfa7 cpufreq: interactive: kick timer on idle exit past expiry
f5b4e66 cpufreq: interactive: use deferrable timer by default
916d056 cpufreq: interactive: pin timers to associated CPU
53d77c9 cpufreq: interactive: run at fraction of hispeed_freq when load is low
984e8bb cpufreq: interactive: always limit initial speed bump to hispeed
f9e8727 cpufreq: interactive: remove input_boost handling
4429f8b cpufreq: interactive: handle speed up and down in the realtime task
d00caa6 cpufreq: interactive: keep freezer happy when not current governor
5722666 cpufreq: interactive: take idle notifications only when active
122c60b cpufreq: interactive: restart above_hispeed_delay at each hispeed load
b62e5a1 cpufreq-interactive: Compile fixup
c4241c9 cpufreq: interactive: add boost pulse interface
b486bd1 cpufreq: interactive: set floor for boosted speed
1ea7b77 cpufreq: interactive: Add sysfs boost interface for hints from userspace
910dea0 cpufreq: interactive: remove unused target_validate_time_in_idle
c48fcaa cpufreq: interactive: Boost frequency on touchscreen input
1c31ed4 cpufreq: interactive: Separate speed target revalidate time and initial set time
759fcdd cpufreq: interactive: base hispeed bump on target freq, not actual
3b14df5 cpufreq: interactive: adjust code and documentation to match
4ca4034 cpufreq: interactive: configurable delay before raising above hispeed
27c22a7 cpufreq: interactive: don't drop speed if recently at higher load
7b2dc7e cpufreq: interactive: set at least hispeed when above hispeed load
077dfe8 cpufreq: interactive: apply intermediate load to max speed not current
2561571 cpufreq interactive governor: event tracing
008bd61 cpufreq: interactive: New 'interactive' governor
7abc800 sched: add sched blocked tracepoint which dumps out context of sleep.
b3bb0e5 sched: Enable might_sleep before initializing drivers.

Signed-off-by: Andres Oportus <andresoportus@google.com>
Change-Id: Ibbeaddb04b44dba77aaead172d07d5bb29e61a3e
2016-10-27 21:53:59 -07:00

290 lines
7.5 KiB
C

#ifndef _LINUX_VMSTAT_H
#define _LINUX_VMSTAT_H
#include <linux/types.h>
#include <linux/percpu.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/vm_event_item.h>
#include <linux/atomic.h>
extern int sysctl_stat_interval;
#ifdef CONFIG_VM_EVENT_COUNTERS
/*
* Light weight per cpu counter implementation.
*
* Counters should only be incremented and no critical kernel component
* should rely on the counter values.
*
* Counters are handled completely inline. On many platforms the code
* generated will simply be the increment of a global address.
*/
struct vm_event_state {
unsigned long event[NR_VM_EVENT_ITEMS];
};
DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
/*
* vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
* local_irq_disable overhead.
*/
static inline void __count_vm_event(enum vm_event_item item)
{
raw_cpu_inc(vm_event_states.event[item]);
}
static inline void count_vm_event(enum vm_event_item item)
{
this_cpu_inc(vm_event_states.event[item]);
}
static inline void __count_vm_events(enum vm_event_item item, long delta)
{
raw_cpu_add(vm_event_states.event[item], delta);
}
static inline void count_vm_events(enum vm_event_item item, long delta)
{
this_cpu_add(vm_event_states.event[item], delta);
}
extern void all_vm_events(unsigned long *);
extern void vm_events_fold_cpu(int cpu);
#else
/* Disable counters */
static inline void count_vm_event(enum vm_event_item item)
{
}
static inline void count_vm_events(enum vm_event_item item, long delta)
{
}
static inline void __count_vm_event(enum vm_event_item item)
{
}
static inline void __count_vm_events(enum vm_event_item item, long delta)
{
}
static inline void all_vm_events(unsigned long *ret)
{
}
static inline void vm_events_fold_cpu(int cpu)
{
}
#endif /* CONFIG_VM_EVENT_COUNTERS */
#ifdef CONFIG_NUMA_BALANCING
#define count_vm_numa_event(x) count_vm_event(x)
#define count_vm_numa_events(x, y) count_vm_events(x, y)
#else
#define count_vm_numa_event(x) do {} while (0)
#define count_vm_numa_events(x, y) do { (void)(y); } while (0)
#endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_DEBUG_TLBFLUSH
#define count_vm_tlb_event(x) count_vm_event(x)
#define count_vm_tlb_events(x, y) count_vm_events(x, y)
#else
#define count_vm_tlb_event(x) do {} while (0)
#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
#endif
#ifdef CONFIG_DEBUG_VM_VMACACHE
#define count_vm_vmacache_event(x) count_vm_event(x)
#else
#define count_vm_vmacache_event(x) do {} while (0)
#endif
#define __count_zone_vm_events(item, zone, delta) \
__count_vm_events(item##_NORMAL - ZONE_NORMAL + \
zone_idx(zone), delta)
/*
* Zone based page accounting with per cpu differentials.
*/
extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
static inline void zone_page_state_add(long x, struct zone *zone,
enum zone_stat_item item)
{
atomic_long_add(x, &zone->vm_stat[item]);
atomic_long_add(x, &vm_stat[item]);
}
static inline unsigned long global_page_state(enum zone_stat_item item)
{
long x = atomic_long_read(&vm_stat[item]);
#ifdef CONFIG_SMP
if (x < 0)
x = 0;
#endif
return x;
}
static inline unsigned long zone_page_state(struct zone *zone,
enum zone_stat_item item)
{
long x = atomic_long_read(&zone->vm_stat[item]);
#ifdef CONFIG_SMP
if (x < 0)
x = 0;
#endif
return x;
}
/*
* More accurate version that also considers the currently pending
* deltas. For that we need to loop over all cpus to find the current
* deltas. There is no synchronization so the result cannot be
* exactly accurate either.
*/
static inline unsigned long zone_page_state_snapshot(struct zone *zone,
enum zone_stat_item item)
{
long x = atomic_long_read(&zone->vm_stat[item]);
#ifdef CONFIG_SMP
int cpu;
for_each_online_cpu(cpu)
x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
if (x < 0)
x = 0;
#endif
return x;
}
static inline unsigned long global_page_state_snapshot(enum zone_stat_item item)
{
long x = atomic_long_read(&vm_stat[item]);
#ifdef CONFIG_SMP
struct zone *zone;
int cpu;
for_each_online_cpu(cpu) {
for_each_populated_zone(zone)
x += per_cpu_ptr(zone->pageset,
cpu)->vm_stat_diff[item];
}
if (x < 0)
x = 0;
#endif
return x;
}
#ifdef CONFIG_NUMA
extern unsigned long node_page_state(int node, enum zone_stat_item item);
extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
#else
#define node_page_state(node, item) global_page_state(item)
#define zone_statistics(_zl, _z, gfp) do { } while (0)
#endif /* CONFIG_NUMA */
#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
#ifdef CONFIG_SMP
void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
void __inc_zone_page_state(struct page *, enum zone_stat_item);
void __dec_zone_page_state(struct page *, enum zone_stat_item);
void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
void inc_zone_page_state(struct page *, enum zone_stat_item);
void dec_zone_page_state(struct page *, enum zone_stat_item);
extern void inc_zone_state(struct zone *, enum zone_stat_item);
extern void __inc_zone_state(struct zone *, enum zone_stat_item);
extern void dec_zone_state(struct zone *, enum zone_stat_item);
extern void __dec_zone_state(struct zone *, enum zone_stat_item);
void quiet_vmstat(void);
void cpu_vm_stats_fold(int cpu);
void refresh_zone_stat_thresholds(void);
void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
int calculate_pressure_threshold(struct zone *zone);
int calculate_normal_threshold(struct zone *zone);
void set_pgdat_percpu_threshold(pg_data_t *pgdat,
int (*calculate_pressure)(struct zone *));
#else /* CONFIG_SMP */
/*
* We do not maintain differentials in a single processor configuration.
* The functions directly modify the zone and global counters.
*/
static inline void __mod_zone_page_state(struct zone *zone,
enum zone_stat_item item, long delta)
{
zone_page_state_add(delta, zone, item);
}
static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
{
atomic_long_inc(&zone->vm_stat[item]);
atomic_long_inc(&vm_stat[item]);
}
static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
{
atomic_long_dec(&zone->vm_stat[item]);
atomic_long_dec(&vm_stat[item]);
}
static inline void __inc_zone_page_state(struct page *page,
enum zone_stat_item item)
{
__inc_zone_state(page_zone(page), item);
}
static inline void __dec_zone_page_state(struct page *page,
enum zone_stat_item item)
{
__dec_zone_state(page_zone(page), item);
}
/*
* We only use atomic operations to update counters. So there is no need to
* disable interrupts.
*/
#define inc_zone_page_state __inc_zone_page_state
#define dec_zone_page_state __dec_zone_page_state
#define mod_zone_page_state __mod_zone_page_state
#define inc_zone_state __inc_zone_state
#define dec_zone_state __dec_zone_state
#define set_pgdat_percpu_threshold(pgdat, callback) { }
static inline void refresh_zone_stat_thresholds(void) { }
static inline void cpu_vm_stats_fold(int cpu) { }
static inline void quiet_vmstat(void) { }
static inline void drain_zonestat(struct zone *zone,
struct per_cpu_pageset *pset) { }
#endif /* CONFIG_SMP */
static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
int migratetype)
{
__mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
if (is_migrate_cma(migratetype))
__mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
}
extern const char * const vmstat_text[];
#endif /* _LINUX_VMSTAT_H */